Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
import pytest


def test_gen_user_scenarios(client):
"""Tests that generate_user_scenarios() correctly calls the API and parses the response."""
eval_dataset = client.evals.generate_user_scenarios(
def test_gen_conversation_scenarios(client):
"""Tests that generate_conversation_scenarios() correctly calls the API and parses the response."""
eval_dataset = client.evals.generate_conversation_scenarios(
agent_info=types.evals.AgentInfo(
agents={
"booking-agent": types.evals.AgentConfig(
Expand All @@ -43,13 +43,13 @@ def test_gen_user_scenarios(client):
},
root_agent_id="booking-agent",
),
user_scenario_generation_config=types.evals.UserScenarioGenerationConfig(
user_scenario_count=2,
simulation_instruction=(
config=types.evals.UserScenarioGenerationConfig(
count=2,
generation_instruction=(
"Generate scenarios where the user tries to book a flight but"
" changes their mind about the destination."
),
environment_data="Today is Monday. Flights to Paris are available.",
environment_context="Today is Monday. Flights to Paris are available.",
model_name="gemini-2.5-flash",
),
)
Expand All @@ -63,9 +63,9 @@ def test_gen_user_scenarios(client):


@pytest.mark.asyncio
async def test_gen_user_scenarios_async(client):
"""Tests that generate_user_scenarios() async correctly calls the API and parses the response."""
eval_dataset = await client.aio.evals.generate_user_scenarios(
async def test_gen_conversation_scenarios_async(client):
"""Tests that generate_conversation_scenarios() async correctly calls the API and parses the response."""
eval_dataset = await client.aio.evals.generate_conversation_scenarios(
agent_info=types.evals.AgentInfo(
agents={
"booking-agent": types.evals.AgentConfig(
Expand All @@ -87,13 +87,13 @@ async def test_gen_user_scenarios_async(client):
},
root_agent_id="booking-agent",
),
user_scenario_generation_config=types.evals.UserScenarioGenerationConfig(
user_scenario_count=2,
simulation_instruction=(
config=types.evals.UserScenarioGenerationConfig(
count=2,
generation_instruction=(
"Generate scenarios where the user tries to book a flight but"
" changes their mind about the destination."
),
environment_data="Today is Monday. Flights to Paris are available.",
environment_context="Today is Monday. Flights to Paris are available.",
model_name="gemini-2.5-flash",
),
)
Expand All @@ -106,5 +106,5 @@ async def test_gen_user_scenarios_async(client):
pytestmark = pytest_helper.setup(
file=__file__,
globals_for_file=globals(),
test_method="evals.generate_user_scenarios",
test_method="evals.generate_conversation_scenarios",
)
30 changes: 18 additions & 12 deletions tests/unit/vertexai/genai/test_evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -6187,8 +6187,8 @@ def read_file_contents_side_effect(src: str) -> str:
)


class TestEvalsGenerateUserScenarios(unittest.TestCase):
"""Unit tests for the Evals generate_user_scenarios method."""
class TestEvalsGenerateConversationScenarios(unittest.TestCase):
"""Unit tests for the Evals generate_conversation_scenarios method."""

def setUp(self):
self.addCleanup(mock.patch.stopall)
Expand All @@ -6208,13 +6208,16 @@ def setUp(self):
)
self.mock_api_client.request.return_value = self.mock_response

def test_generate_user_scenarios(self):
"""Tests that generate_user_scenarios correctly calls the API and parses the response."""
def test_generate_conversation_scenarios(self):
"""Tests that generate_conversation_scenarios correctly calls the API and parses the response."""
evals_module = evals.Evals(api_client_=self.mock_api_client)

eval_dataset = evals_module.generate_user_scenarios(
agent_info={"agents": {"agent_1": {}}, "root_agent_id": "agent_1"},
user_scenario_generation_config={"user_scenario_count": 2},
eval_dataset = evals_module.generate_conversation_scenarios(
agent_info=vertexai_genai_types.evals.AgentInfo(
agents={"agent_1": {}},
root_agent_id="agent_1",
),
config={"count": 2},
)
assert isinstance(eval_dataset, vertexai_genai_types.EvaluationDataset)
assert len(eval_dataset.eval_cases) == 2
Expand All @@ -6233,17 +6236,20 @@ def test_generate_user_scenarios(self):
self.mock_api_client.request.assert_called_once()

@pytest.mark.asyncio
async def test_async_generate_user_scenarios(self):
"""Tests that async generate_user_scenarios correctly calls the API and parses the response."""
async def test_async_generate_conversation_scenarios(self):
"""Tests that async generate_conversation_scenarios correctly calls the API and parses the response."""

self.mock_api_client.async_request = mock.AsyncMock(
return_value=self.mock_response
)
async_evals_module = evals.AsyncEvals(api_client_=self.mock_api_client)

eval_dataset = await async_evals_module.generate_user_scenarios(
agent_info={"agents": {"agent_1": {}}, "root_agent_id": "agent_1"},
user_scenario_generation_config={"user_scenario_count": 2},
eval_dataset = await async_evals_module.generate_conversation_scenarios(
agent_info=vertexai_genai_types.evals.AgentInfo(
agents={"agent_1": {}},
root_agent_id="agent_1",
),
config={"count": 2},
)
assert isinstance(eval_dataset, vertexai_genai_types.EvaluationDataset)
assert len(eval_dataset.eval_cases) == 2
Expand Down
21 changes: 21 additions & 0 deletions vertexai/_genai/_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,27 @@ def t_metric_sources(metrics: list[Any]) -> list[dict[str, Any]]:
return sources_payload


def t_user_scenario_generation_config(
config: "types.evals.UserScenarioGenerationConfigOrDict",
) -> dict[str, Any]:
"""Transforms UserScenarioGenerationConfig to Vertex AI format."""
payload: dict[str, Any] = {}
config_dict = config if isinstance(config, dict) else config.model_dump()

if getv(config_dict, ["count"]) is not None:
payload["user_scenario_count"] = getv(config_dict, ["count"])
if getv(config_dict, ["generation_instruction"]) is not None:
payload["simulation_instruction"] = getv(
config_dict, ["generation_instruction"]
)
if getv(config_dict, ["environment_context"]) is not None:
payload["environment_data"] = getv(config_dict, ["environment_context"])
if getv(config_dict, ["model_name"]) is not None:
payload["model_name"] = getv(config_dict, ["model_name"])

return payload


def t_metric_for_registry(
metric: "types.Metric",
) -> dict[str, Any]:
Expand Down
Loading
Loading