Skip to content
GitHubDiscord

Generators

Input generators for creating dynamic test data and simulating user interactions.


Simulate user behavior in multi-turn conversations or workflows.

Module: giskard.checks.generators.user

UserSimulator uses an LLM to simulate realistic user inputs based on instructions and conversation context. This is particularly useful for testing conversational AI agents or chatbots with dynamic, realistic user interactions.

AttributeTypeDefaultDescription
instructionsstrrequiredInstructions defining the user’s behavior and goals
max_stepsint10Maximum number of interaction steps to generate
generatorBaseGenerator | NoneNoneLLM generator for simulating user behavior
from giskard.checks.generators.user import UserSimulator
from giskard.agents.generators import Generator
# Create a user simulator
user_sim = UserSimulator(
instructions="""
You are a customer trying to book a flight.
- Start by asking about available flights to Paris
- Ask about prices
- Request to book if the price is reasonable
- Be polite and patient
""",
max_steps=5,
generator=Generator(model="openai/gpt-4")
)
from giskard.checks import scenario, from_fn
# Create a scenario with simulated user
test_scenario = (
scenario("booking_flow")
.interact(
inputs=user_sim, # Use simulator for inputs
outputs=lambda inputs: booking_agent(inputs)
)
.check(from_fn(
lambda trace: "booked" in trace.last.outputs.lower(),
name="booking_completed"
))
)
result = await test_scenario.run()
from giskard.checks import scenario, Equals
# Simulate a complete customer support conversation
support_sim = UserSimulator(
instructions="""
You are a frustrated customer whose order is late.
- Express your frustration politely
- Provide order number when asked: #12345
- Accept a solution if offered (refund or replacement)
""",
max_steps=10,
generator=Generator(model="openai/gpt-4")
)
test_scenario = (
scenario("support_escalation")
.interact(
inputs=support_sim,
outputs=lambda inputs: support_agent(inputs)
)
# Check that issue was resolved
.check(from_fn(
lambda trace: any(
keyword in str(trace.last.metadata.get("resolution", "")).lower()
for keyword in ["refund", "replacement", "resolved"]
),
name="issue_resolved"
))
)
result = await test_scenario.run()
# Test if the user reaches their goal
goal_oriented_sim = UserSimulator(
instructions="""
Your goal: Find and purchase a red t-shirt in size M under $30.
- Browse available products
- Filter by color, size, and price
- Add to cart if you find a matching item
- Complete checkout
""",
max_steps=15,
generator=Generator(model="openai/gpt-4")
)
test_scenario = (
scenario("product_search")
.interact(
inputs=goal_oriented_sim,
outputs=lambda inputs: ecommerce_agent(inputs)
)
)
result = await test_scenario.run()
# Check if goal was reached
output = result.trace.last.metadata.get("simulator_output")
if isinstance(output, UserSimulatorOutput):
print(f"Goal reached: {output.goal_reached}")
print(f"Message: {output.message}")

Output format for user simulator results.

Module: giskard.checks.generators.user

AttributeTypeDescription
goal_reachedboolWhether the simulator achieved its goal
messagestr | NoneOptional message about the simulation outcome
# After running a scenario with UserSimulator
result = await test_scenario.run()
# Get simulator output from metadata
last_interaction = result.trace.last
if "simulator_output" in last_interaction.metadata:
output = last_interaction.metadata["simulator_output"]
if output.goal_reached:
print("✓ User goal was achieved")
print(f"Details: {output.message}")
else:
print("✗ User goal was not achieved")
print(f"Reason: {output.message}")

Base class for creating custom input generators.

Module: giskard.checks.core.input_generator

InputGenerator is the abstract base class for all input generators. Extend it to create custom generators for dynamic test data generation.

from giskard.checks.core.input_generator import InputGenerator
from giskard.checks import scenario
class SequentialInputGenerator(InputGenerator):
"""Generate sequential inputs from a list."""
def __init__(self, inputs_list: list[str]):
self.inputs_list = inputs_list
self.index = 0
async def generate(self, trace):
"""Generate the next input from the sequence."""
if self.index >= len(self.inputs_list):
return None # No more inputs
input_value = self.inputs_list[self.index]
self.index += 1
return input_value
# Use custom generator
gen = SequentialInputGenerator([
"Hello",
"How are you?",
"Goodbye"
])
test_scenario = (
scenario("sequential_test")
.interact(
inputs=gen,
outputs=lambda inputs: chatbot(inputs)
)
)
import random
from giskard.checks.core.input_generator import InputGenerator
class RandomQuestionGenerator(InputGenerator):
"""Generate random questions from a pool."""
def __init__(self, question_pool: list[str]):
self.question_pool = question_pool
async def generate(self, trace):
"""Generate a random question."""
return random.choice(self.question_pool)
# Use in testing
questions = [
"What's the weather like?",
"Tell me a joke",
"What time is it?",
"How can you help me?"
]
gen = RandomQuestionGenerator(questions)
test_scenario = (
scenario("random_qa")
.interact(inputs=gen, outputs=lambda inputs: assistant(inputs))
.check(from_fn(
lambda trace: len(trace.last.outputs) > 0,
name="has_response"
))
)
from giskard.checks.core.input_generator import InputGenerator
class ContextAwareGenerator(InputGenerator):
"""Generate inputs based on conversation context."""
def __init__(self, strategy: str = "follow_up"):
self.strategy = strategy
async def generate(self, trace):
"""Generate context-aware input."""
if not trace.last:
return "Hello, I need help"
last_output = trace.last.outputs
if self.strategy == "follow_up":
if "question" in last_output.lower():
return "Yes, please tell me more"
elif "help" in last_output.lower():
return "I need assistance with my account"
return "Thank you"
gen = ContextAwareGenerator(strategy="follow_up")
test_scenario = (
scenario("context_test")
.interact(inputs=gen, outputs=lambda inputs: agent(inputs))
)

from giskard.checks import scenario, from_fn
from giskard.checks.generators.user import UserSimulator
from giskard.agents.generators import Generator
# Simulate a customer service conversation
customer_sim = UserSimulator(
instructions="""
You are a customer with a defective product.
- Explain the issue: the product stopped working after 2 days
- Provide details when asked
- Accept a solution if reasonable
""",
max_steps=8,
generator=Generator(model="openai/gpt-4")
)
test_flow = (
scenario("customer_service")
.interact(
inputs=customer_sim,
outputs=lambda inputs: customer_service_bot(inputs)
)
.check(from_fn(
lambda trace: "solution" in str(trace.last.metadata).lower() or
"replacement" in str(trace.last.outputs).lower(),
name="solution_offered"
))
)
result = await test_flow.run()
# Test different user personas
personas = [
{
"name": "impatient_user",
"instructions": "You are impatient and want quick answers. Be brief.",
},
{
"name": "detailed_user",
"instructions": "You like detailed explanations. Ask follow-up questions.",
},
{
"name": "confused_user",
"instructions": "You are confused and need things explained simply.",
},
]
results = {}
for persona in personas:
sim = UserSimulator(
instructions=persona["instructions"],
max_steps=5,
generator=Generator(model="openai/gpt-4")
)
test_scenario = (
scenario(persona["name"])
.interact(inputs=sim, outputs=lambda inputs: agent(inputs))
)
results[persona["name"]] = await test_scenario.run()
# Compare results
for name, result in results.items():
print(f"{name}: {result.status}")
# Test agent with rapid-fire questions
rapid_fire_sim = UserSimulator(
instructions="""
Ask 10 different questions rapidly:
- Weather questions
- Time questions
- Math problems
- General knowledge
Be concise and move quickly between topics.
""",
max_steps=10,
generator=Generator(model="openai/gpt-4")
)
stress_test = (
scenario("rapid_fire")
.interact(inputs=rapid_fire_sim, outputs=lambda inputs: agent(inputs))
.check(from_fn(
lambda trace: len(trace.interactions) >= 5,
name="handled_multiple_questions"
))
)
result = await stress_test.run()

  • Core API - Trace, Interaction, and InteractionSpec
  • Scenarios - Building multi-step test workflows
  • Built-in Checks - Validation checks for generated interactions