From 1b4c325b1f54256b09b8e0dfb4117ae26b3a2838 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Thu, 19 Mar 2026 18:08:59 -0700 Subject: [PATCH] feat: GenAI Client(evals): Update to generate_conversation_scenarios from generate_user_scenarios PiperOrigin-RevId: 886476114 --- ...> test_generate_conversation_scenarios.py} | 30 ++-- tests/unit/vertexai/genai/test_evals.py | 30 ++-- vertexai/_genai/_transformers.py | 21 +++ vertexai/_genai/evals.py | 168 +++++++++--------- vertexai/_genai/types/__init__.py | 14 +- vertexai/_genai/types/common.py | 156 ++++++++-------- vertexai/_genai/types/evals.py | 32 ++-- 7 files changed, 240 insertions(+), 211 deletions(-) rename tests/unit/vertexai/genai/replays/{test_generate_user_scenarios.py => test_generate_conversation_scenarios.py} (79%) diff --git a/tests/unit/vertexai/genai/replays/test_generate_user_scenarios.py b/tests/unit/vertexai/genai/replays/test_generate_conversation_scenarios.py similarity index 79% rename from tests/unit/vertexai/genai/replays/test_generate_user_scenarios.py rename to tests/unit/vertexai/genai/replays/test_generate_conversation_scenarios.py index 955ea690cc..5e47ec924a 100644 --- a/tests/unit/vertexai/genai/replays/test_generate_user_scenarios.py +++ b/tests/unit/vertexai/genai/replays/test_generate_conversation_scenarios.py @@ -19,9 +19,9 @@ import pytest -def test_gen_user_scenarios(client): - """Tests that generate_user_scenarios() correctly calls the API and parses the response.""" - eval_dataset = client.evals.generate_user_scenarios( +def test_gen_conversation_scenarios(client): + """Tests that generate_conversation_scenarios() correctly calls the API and parses the response.""" + eval_dataset = client.evals.generate_conversation_scenarios( agent_info=types.evals.AgentInfo( agents={ "booking-agent": types.evals.AgentConfig( @@ -43,13 +43,13 @@ def test_gen_user_scenarios(client): }, root_agent_id="booking-agent", ), - user_scenario_generation_config=types.evals.UserScenarioGenerationConfig( - user_scenario_count=2, - simulation_instruction=( + config=types.evals.UserScenarioGenerationConfig( + count=2, + generation_instruction=( "Generate scenarios where the user tries to book a flight but" " changes their mind about the destination." ), - environment_data="Today is Monday. Flights to Paris are available.", + environment_context="Today is Monday. Flights to Paris are available.", model_name="gemini-2.5-flash", ), ) @@ -63,9 +63,9 @@ def test_gen_user_scenarios(client): @pytest.mark.asyncio -async def test_gen_user_scenarios_async(client): - """Tests that generate_user_scenarios() async correctly calls the API and parses the response.""" - eval_dataset = await client.aio.evals.generate_user_scenarios( +async def test_gen_conversation_scenarios_async(client): + """Tests that generate_conversation_scenarios() async correctly calls the API and parses the response.""" + eval_dataset = await client.aio.evals.generate_conversation_scenarios( agent_info=types.evals.AgentInfo( agents={ "booking-agent": types.evals.AgentConfig( @@ -87,13 +87,13 @@ async def test_gen_user_scenarios_async(client): }, root_agent_id="booking-agent", ), - user_scenario_generation_config=types.evals.UserScenarioGenerationConfig( - user_scenario_count=2, - simulation_instruction=( + config=types.evals.UserScenarioGenerationConfig( + count=2, + generation_instruction=( "Generate scenarios where the user tries to book a flight but" " changes their mind about the destination." ), - environment_data="Today is Monday. Flights to Paris are available.", + environment_context="Today is Monday. Flights to Paris are available.", model_name="gemini-2.5-flash", ), ) @@ -106,5 +106,5 @@ async def test_gen_user_scenarios_async(client): pytestmark = pytest_helper.setup( file=__file__, globals_for_file=globals(), - test_method="evals.generate_user_scenarios", + test_method="evals.generate_conversation_scenarios", ) diff --git a/tests/unit/vertexai/genai/test_evals.py b/tests/unit/vertexai/genai/test_evals.py index c9fd9ed635..9ad33e62ac 100644 --- a/tests/unit/vertexai/genai/test_evals.py +++ b/tests/unit/vertexai/genai/test_evals.py @@ -6187,8 +6187,8 @@ def read_file_contents_side_effect(src: str) -> str: ) -class TestEvalsGenerateUserScenarios(unittest.TestCase): - """Unit tests for the Evals generate_user_scenarios method.""" +class TestEvalsGenerateConversationScenarios(unittest.TestCase): + """Unit tests for the Evals generate_conversation_scenarios method.""" def setUp(self): self.addCleanup(mock.patch.stopall) @@ -6208,13 +6208,16 @@ def setUp(self): ) self.mock_api_client.request.return_value = self.mock_response - def test_generate_user_scenarios(self): - """Tests that generate_user_scenarios correctly calls the API and parses the response.""" + def test_generate_conversation_scenarios(self): + """Tests that generate_conversation_scenarios correctly calls the API and parses the response.""" evals_module = evals.Evals(api_client_=self.mock_api_client) - eval_dataset = evals_module.generate_user_scenarios( - agent_info={"agents": {"agent_1": {}}, "root_agent_id": "agent_1"}, - user_scenario_generation_config={"user_scenario_count": 2}, + eval_dataset = evals_module.generate_conversation_scenarios( + agent_info=vertexai_genai_types.evals.AgentInfo( + agents={"agent_1": {}}, + root_agent_id="agent_1", + ), + config={"count": 2}, ) assert isinstance(eval_dataset, vertexai_genai_types.EvaluationDataset) assert len(eval_dataset.eval_cases) == 2 @@ -6233,17 +6236,20 @@ def test_generate_user_scenarios(self): self.mock_api_client.request.assert_called_once() @pytest.mark.asyncio - async def test_async_generate_user_scenarios(self): - """Tests that async generate_user_scenarios correctly calls the API and parses the response.""" + async def test_async_generate_conversation_scenarios(self): + """Tests that async generate_conversation_scenarios correctly calls the API and parses the response.""" self.mock_api_client.async_request = mock.AsyncMock( return_value=self.mock_response ) async_evals_module = evals.AsyncEvals(api_client_=self.mock_api_client) - eval_dataset = await async_evals_module.generate_user_scenarios( - agent_info={"agents": {"agent_1": {}}, "root_agent_id": "agent_1"}, - user_scenario_generation_config={"user_scenario_count": 2}, + eval_dataset = await async_evals_module.generate_conversation_scenarios( + agent_info=vertexai_genai_types.evals.AgentInfo( + agents={"agent_1": {}}, + root_agent_id="agent_1", + ), + config={"count": 2}, ) assert isinstance(eval_dataset, vertexai_genai_types.EvaluationDataset) assert len(eval_dataset.eval_cases) == 2 diff --git a/vertexai/_genai/_transformers.py b/vertexai/_genai/_transformers.py index f3e56e0be8..345a9d66bd 100644 --- a/vertexai/_genai/_transformers.py +++ b/vertexai/_genai/_transformers.py @@ -125,6 +125,27 @@ def t_metric_sources(metrics: list[Any]) -> list[dict[str, Any]]: return sources_payload +def t_user_scenario_generation_config( + config: "types.evals.UserScenarioGenerationConfigOrDict", +) -> dict[str, Any]: + """Transforms UserScenarioGenerationConfig to Vertex AI format.""" + payload: dict[str, Any] = {} + config_dict = config if isinstance(config, dict) else config.model_dump() + + if getv(config_dict, ["count"]) is not None: + payload["user_scenario_count"] = getv(config_dict, ["count"]) + if getv(config_dict, ["generation_instruction"]) is not None: + payload["simulation_instruction"] = getv( + config_dict, ["generation_instruction"] + ) + if getv(config_dict, ["environment_context"]) is not None: + payload["environment_data"] = getv(config_dict, ["environment_context"]) + if getv(config_dict, ["model_name"]) is not None: + payload["model_name"] = getv(config_dict, ["model_name"]) + + return payload + + def t_metric_for_registry( metric: "types.Metric", ) -> dict[str, Any]: diff --git a/vertexai/_genai/evals.py b/vertexai/_genai/evals.py index 6c80482227..1d2deb22a4 100644 --- a/vertexai/_genai/evals.py +++ b/vertexai/_genai/evals.py @@ -613,7 +613,9 @@ def _GenerateUserScenariosParameters_to_vertex( setv( to_object, ["userScenarioGenerationConfig"], - getv(from_object, ["user_scenario_generation_config"]), + t.t_user_scenario_generation_config( + getv(from_object, ["user_scenario_generation_config"]) + ), ) if getv(from_object, ["config"]) is not None: @@ -1205,39 +1207,39 @@ def _evaluate_instances( self._api_client._verify_response(return_value) return return_value - def _generate_rubrics( + def _generate_user_scenarios( self, *, - contents: list[genai_types.ContentOrDict], - predefined_rubric_generation_spec: Optional[ - genai_types.PredefinedMetricSpecOrDict + location: Optional[str] = None, + agents: Optional[dict[str, evals_types.AgentConfigOrDict]] = None, + root_agent_id: Optional[str] = None, + user_scenario_generation_config: Optional[ + evals_types.UserScenarioGenerationConfigOrDict ] = None, - rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, - config: Optional[types.RubricGenerationConfigOrDict] = None, - metric_resource_name: Optional[str] = None, - ) -> types.GenerateInstanceRubricsResponse: + config: Optional[types.GenerateUserScenariosConfigOrDict] = None, + ) -> types.GenerateUserScenariosResponse: """ - Generates rubrics for a given prompt. + Generates user scenarios for agent evaluation. """ - parameter_model = types._GenerateInstanceRubricsRequest( - contents=contents, - predefined_rubric_generation_spec=predefined_rubric_generation_spec, - rubric_generation_spec=rubric_generation_spec, + parameter_model = types._GenerateUserScenariosParameters( + location=location, + agents=agents, + root_agent_id=root_agent_id, + user_scenario_generation_config=user_scenario_generation_config, config=config, - metric_resource_name=metric_resource_name, ) request_url_dict: Optional[dict[str, str]] if not self._api_client.vertexai: raise ValueError("This method is only supported in the Vertex AI client.") else: - request_dict = _GenerateInstanceRubricsRequest_to_vertex(parameter_model) + request_dict = _GenerateUserScenariosParameters_to_vertex(parameter_model) request_url_dict = request_dict.get("_url") if request_url_dict: - path = ":generateInstanceRubrics".format_map(request_url_dict) + path = ":generateUserScenarios".format_map(request_url_dict) else: - path = ":generateInstanceRubrics" + path = ":generateUserScenarios" query_params = request_dict.get("_query") if query_params: @@ -1259,46 +1261,46 @@ def _generate_rubrics( response_dict = {} if not response.body else json.loads(response.body) - return_value = types.GenerateInstanceRubricsResponse._from_response( + return_value = types.GenerateUserScenariosResponse._from_response( response=response_dict, kwargs=parameter_model.model_dump() ) self._api_client._verify_response(return_value) return return_value - def _generate_user_scenarios( + def _generate_rubrics( self, *, - location: Optional[str] = None, - agents: Optional[dict[str, evals_types.AgentConfigOrDict]] = None, - root_agent_id: Optional[str] = None, - user_scenario_generation_config: Optional[ - evals_types.UserScenarioGenerationConfigOrDict + contents: list[genai_types.ContentOrDict], + predefined_rubric_generation_spec: Optional[ + genai_types.PredefinedMetricSpecOrDict ] = None, - config: Optional[types.GenerateUserScenariosConfigOrDict] = None, - ) -> types.GenerateUserScenariosResponse: + rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, + config: Optional[types.RubricGenerationConfigOrDict] = None, + metric_resource_name: Optional[str] = None, + ) -> types.GenerateInstanceRubricsResponse: """ - Generates user scenarios for agent evaluation. + Generates rubrics for a given prompt. """ - parameter_model = types._GenerateUserScenariosParameters( - location=location, - agents=agents, - root_agent_id=root_agent_id, - user_scenario_generation_config=user_scenario_generation_config, + parameter_model = types._GenerateInstanceRubricsRequest( + contents=contents, + predefined_rubric_generation_spec=predefined_rubric_generation_spec, + rubric_generation_spec=rubric_generation_spec, config=config, + metric_resource_name=metric_resource_name, ) request_url_dict: Optional[dict[str, str]] if not self._api_client.vertexai: raise ValueError("This method is only supported in the Vertex AI client.") else: - request_dict = _GenerateUserScenariosParameters_to_vertex(parameter_model) + request_dict = _GenerateInstanceRubricsRequest_to_vertex(parameter_model) request_url_dict = request_dict.get("_url") if request_url_dict: - path = ":generateUserScenarios".format_map(request_url_dict) + path = ":generateInstanceRubrics".format_map(request_url_dict) else: - path = ":generateUserScenarios" + path = ":generateInstanceRubrics" query_params = request_dict.get("_query") if query_params: @@ -1320,7 +1322,7 @@ def _generate_user_scenarios( response_dict = {} if not response.body else json.loads(response.body) - return_value = types.GenerateUserScenariosResponse._from_response( + return_value = types.GenerateInstanceRubricsResponse._from_response( response=response_dict, kwargs=parameter_model.model_dump() ) @@ -2328,14 +2330,14 @@ def create_evaluation_set( ) @_common.experimental_warning( - "The Vertex SDK GenAI evals.generate_user_scenarios module is experimental, " + "The Vertex SDK GenAI evals.generate_conversation_scenarios module is experimental, " "and may change in future versions." ) - def generate_user_scenarios( + def generate_conversation_scenarios( self, *, agent_info: evals_types.AgentInfoOrDict, - user_scenario_generation_config: evals_types.UserScenarioGenerationConfigOrDict, + config: evals_types.UserScenarioGenerationConfigOrDict, ) -> types.EvaluationDataset: """Generates an evaluation dataset with user scenarios, which helps to generate conversations between a simulated user @@ -2343,7 +2345,7 @@ def generate_user_scenarios( Args: agent_info: The agent info to generate user scenarios for. - user_scenario_generation_config: Configuration for generating user scenarios. + config: Configuration for generating user scenarios. Returns: An EvaluationDataset containing the generated user scenarios. @@ -2356,7 +2358,7 @@ def generate_user_scenarios( response = self._generate_user_scenarios( agents=parsed_agent_info.agents, root_agent_id=parsed_agent_info.root_agent_id, - user_scenario_generation_config=user_scenario_generation_config, + user_scenario_generation_config=config, ) return _evals_utils._postprocess_user_scenarios_response(response) @@ -2765,39 +2767,39 @@ async def _evaluate_instances( self._api_client._verify_response(return_value) return return_value - async def _generate_rubrics( + async def _generate_user_scenarios( self, *, - contents: list[genai_types.ContentOrDict], - predefined_rubric_generation_spec: Optional[ - genai_types.PredefinedMetricSpecOrDict + location: Optional[str] = None, + agents: Optional[dict[str, evals_types.AgentConfigOrDict]] = None, + root_agent_id: Optional[str] = None, + user_scenario_generation_config: Optional[ + evals_types.UserScenarioGenerationConfigOrDict ] = None, - rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, - config: Optional[types.RubricGenerationConfigOrDict] = None, - metric_resource_name: Optional[str] = None, - ) -> types.GenerateInstanceRubricsResponse: + config: Optional[types.GenerateUserScenariosConfigOrDict] = None, + ) -> types.GenerateUserScenariosResponse: """ - Generates rubrics for a given prompt. + Generates user scenarios for agent evaluation. """ - parameter_model = types._GenerateInstanceRubricsRequest( - contents=contents, - predefined_rubric_generation_spec=predefined_rubric_generation_spec, - rubric_generation_spec=rubric_generation_spec, + parameter_model = types._GenerateUserScenariosParameters( + location=location, + agents=agents, + root_agent_id=root_agent_id, + user_scenario_generation_config=user_scenario_generation_config, config=config, - metric_resource_name=metric_resource_name, ) request_url_dict: Optional[dict[str, str]] if not self._api_client.vertexai: raise ValueError("This method is only supported in the Vertex AI client.") else: - request_dict = _GenerateInstanceRubricsRequest_to_vertex(parameter_model) + request_dict = _GenerateUserScenariosParameters_to_vertex(parameter_model) request_url_dict = request_dict.get("_url") if request_url_dict: - path = ":generateInstanceRubrics".format_map(request_url_dict) + path = ":generateUserScenarios".format_map(request_url_dict) else: - path = ":generateInstanceRubrics" + path = ":generateUserScenarios" query_params = request_dict.get("_query") if query_params: @@ -2821,46 +2823,46 @@ async def _generate_rubrics( response_dict = {} if not response.body else json.loads(response.body) - return_value = types.GenerateInstanceRubricsResponse._from_response( + return_value = types.GenerateUserScenariosResponse._from_response( response=response_dict, kwargs=parameter_model.model_dump() ) self._api_client._verify_response(return_value) return return_value - async def _generate_user_scenarios( + async def _generate_rubrics( self, *, - location: Optional[str] = None, - agents: Optional[dict[str, evals_types.AgentConfigOrDict]] = None, - root_agent_id: Optional[str] = None, - user_scenario_generation_config: Optional[ - evals_types.UserScenarioGenerationConfigOrDict + contents: list[genai_types.ContentOrDict], + predefined_rubric_generation_spec: Optional[ + genai_types.PredefinedMetricSpecOrDict ] = None, - config: Optional[types.GenerateUserScenariosConfigOrDict] = None, - ) -> types.GenerateUserScenariosResponse: + rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, + config: Optional[types.RubricGenerationConfigOrDict] = None, + metric_resource_name: Optional[str] = None, + ) -> types.GenerateInstanceRubricsResponse: """ - Generates user scenarios for agent evaluation. + Generates rubrics for a given prompt. """ - parameter_model = types._GenerateUserScenariosParameters( - location=location, - agents=agents, - root_agent_id=root_agent_id, - user_scenario_generation_config=user_scenario_generation_config, + parameter_model = types._GenerateInstanceRubricsRequest( + contents=contents, + predefined_rubric_generation_spec=predefined_rubric_generation_spec, + rubric_generation_spec=rubric_generation_spec, config=config, + metric_resource_name=metric_resource_name, ) request_url_dict: Optional[dict[str, str]] if not self._api_client.vertexai: raise ValueError("This method is only supported in the Vertex AI client.") else: - request_dict = _GenerateUserScenariosParameters_to_vertex(parameter_model) + request_dict = _GenerateInstanceRubricsRequest_to_vertex(parameter_model) request_url_dict = request_dict.get("_url") if request_url_dict: - path = ":generateUserScenarios".format_map(request_url_dict) + path = ":generateInstanceRubrics".format_map(request_url_dict) else: - path = ":generateUserScenarios" + path = ":generateInstanceRubrics" query_params = request_dict.get("_query") if query_params: @@ -2884,7 +2886,7 @@ async def _generate_user_scenarios( response_dict = {} if not response.body else json.loads(response.body) - return_value = types.GenerateUserScenariosResponse._from_response( + return_value = types.GenerateInstanceRubricsResponse._from_response( response=response_dict, kwargs=parameter_model.model_dump() ) @@ -3541,14 +3543,14 @@ async def create_evaluation_set( return result @_common.experimental_warning( - "The Vertex SDK GenAI evals.generate_user_scenarios module is experimental, " + "The Vertex SDK GenAI evals.generate_conversation_scenarios module is experimental, " "and may change in future versions." ) - async def generate_user_scenarios( + async def generate_conversation_scenarios( self, *, agent_info: evals_types.AgentInfoOrDict, - user_scenario_generation_config: evals_types.UserScenarioGenerationConfigOrDict, + config: evals_types.UserScenarioGenerationConfigOrDict, ) -> types.EvaluationDataset: """Generates an evaluation dataset with user scenarios, which helps to generate conversations between a simulated user @@ -3556,7 +3558,7 @@ async def generate_user_scenarios( Args: agent_info: The agent info to generate user scenarios for. - user_scenario_generation_config: Configuration for generating user scenarios. + config: Configuration for generating user scenarios. Returns: An EvaluationDataset containing the generated user scenarios. @@ -3569,7 +3571,7 @@ async def generate_user_scenarios( response = await self._generate_user_scenarios( agents=parsed_agent_info.agents, root_agent_id=parsed_agent_info.root_agent_id, - user_scenario_generation_config=user_scenario_generation_config, + user_scenario_generation_config=config, ) return _evals_utils._postprocess_user_scenarios_response(response) diff --git a/vertexai/_genai/types/__init__.py b/vertexai/_genai/types/__init__.py index c383bb5d49..19be171061 100644 --- a/vertexai/_genai/types/__init__.py +++ b/vertexai/_genai/types/__init__.py @@ -1459,18 +1459,18 @@ "EvaluateInstancesResponse", "EvaluateInstancesResponseDict", "EvaluateInstancesResponseOrDict", - "RubricGenerationConfig", - "RubricGenerationConfigDict", - "RubricGenerationConfigOrDict", - "GenerateInstanceRubricsResponse", - "GenerateInstanceRubricsResponseDict", - "GenerateInstanceRubricsResponseOrDict", "GenerateUserScenariosConfig", "GenerateUserScenariosConfigDict", "GenerateUserScenariosConfigOrDict", "GenerateUserScenariosResponse", "GenerateUserScenariosResponseDict", "GenerateUserScenariosResponseOrDict", + "RubricGenerationConfig", + "RubricGenerationConfigDict", + "RubricGenerationConfigOrDict", + "GenerateInstanceRubricsResponse", + "GenerateInstanceRubricsResponseDict", + "GenerateInstanceRubricsResponseOrDict", "GetEvaluationMetricConfig", "GetEvaluationMetricConfigDict", "GetEvaluationMetricConfigOrDict", @@ -2174,8 +2174,8 @@ "_CreateEvaluationRunParameters", "_CreateEvaluationSetParameters", "_EvaluateInstancesRequestParameters", - "_GenerateInstanceRubricsRequest", "_GenerateUserScenariosParameters", + "_GenerateInstanceRubricsRequest", "_GetEvaluationMetricParameters", "_GetEvaluationRunParameters", "_GetEvaluationSetParameters", diff --git a/vertexai/_genai/types/common.py b/vertexai/_genai/types/common.py index 39ab9573b4..277e5e67f1 100644 --- a/vertexai/_genai/types/common.py +++ b/vertexai/_genai/types/common.py @@ -4571,6 +4571,84 @@ class EvaluateInstancesResponseDict(TypedDict, total=False): ] +class GenerateUserScenariosConfig(_common.BaseModel): + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GenerateUserScenariosConfigDict(TypedDict, total=False): + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GenerateUserScenariosConfigOrDict = Union[ + GenerateUserScenariosConfig, GenerateUserScenariosConfigDict +] + + +class _GenerateUserScenariosParameters(_common.BaseModel): + """Parameters for GenerateUserScenarios.""" + + location: Optional[str] = Field(default=None, description="""""") + agents: Optional[dict[str, evals_types.AgentConfig]] = Field( + default=None, description="""""" + ) + root_agent_id: Optional[str] = Field(default=None, description="""""") + user_scenario_generation_config: Optional[ + evals_types.UserScenarioGenerationConfig + ] = Field(default=None, description="""""") + config: Optional[GenerateUserScenariosConfig] = Field( + default=None, description="""""" + ) + + +class _GenerateUserScenariosParametersDict(TypedDict, total=False): + """Parameters for GenerateUserScenarios.""" + + location: Optional[str] + """""" + + agents: Optional[dict[str, evals_types.AgentConfig]] + """""" + + root_agent_id: Optional[str] + """""" + + user_scenario_generation_config: Optional[evals_types.UserScenarioGenerationConfig] + """""" + + config: Optional[GenerateUserScenariosConfigDict] + """""" + + +_GenerateUserScenariosParametersOrDict = Union[ + _GenerateUserScenariosParameters, _GenerateUserScenariosParametersDict +] + + +class GenerateUserScenariosResponse(_common.BaseModel): + """Response message for DataFoundryService.GenerateUserScenarios.""" + + user_scenarios: Optional[list[evals_types.UserScenario]] = Field( + default=None, description="""""" + ) + + +class GenerateUserScenariosResponseDict(TypedDict, total=False): + """Response message for DataFoundryService.GenerateUserScenarios.""" + + user_scenarios: Optional[list[evals_types.UserScenario]] + """""" + + +GenerateUserScenariosResponseOrDict = Union[ + GenerateUserScenariosResponse, GenerateUserScenariosResponseDict +] + + class RubricGenerationConfig(_common.BaseModel): """Config for generating rubrics.""" @@ -4667,84 +4745,6 @@ class GenerateInstanceRubricsResponseDict(TypedDict, total=False): ] -class GenerateUserScenariosConfig(_common.BaseModel): - - http_options: Optional[genai_types.HttpOptions] = Field( - default=None, description="""Used to override HTTP request options.""" - ) - - -class GenerateUserScenariosConfigDict(TypedDict, total=False): - - http_options: Optional[genai_types.HttpOptionsDict] - """Used to override HTTP request options.""" - - -GenerateUserScenariosConfigOrDict = Union[ - GenerateUserScenariosConfig, GenerateUserScenariosConfigDict -] - - -class _GenerateUserScenariosParameters(_common.BaseModel): - """Parameters for GenerateUserScenarios.""" - - location: Optional[str] = Field(default=None, description="""""") - agents: Optional[dict[str, evals_types.AgentConfig]] = Field( - default=None, description="""""" - ) - root_agent_id: Optional[str] = Field(default=None, description="""""") - user_scenario_generation_config: Optional[ - evals_types.UserScenarioGenerationConfig - ] = Field(default=None, description="""""") - config: Optional[GenerateUserScenariosConfig] = Field( - default=None, description="""""" - ) - - -class _GenerateUserScenariosParametersDict(TypedDict, total=False): - """Parameters for GenerateUserScenarios.""" - - location: Optional[str] - """""" - - agents: Optional[dict[str, evals_types.AgentConfig]] - """""" - - root_agent_id: Optional[str] - """""" - - user_scenario_generation_config: Optional[evals_types.UserScenarioGenerationConfig] - """""" - - config: Optional[GenerateUserScenariosConfigDict] - """""" - - -_GenerateUserScenariosParametersOrDict = Union[ - _GenerateUserScenariosParameters, _GenerateUserScenariosParametersDict -] - - -class GenerateUserScenariosResponse(_common.BaseModel): - """Response message for DataFoundryService.GenerateUserScenarios.""" - - user_scenarios: Optional[list[evals_types.UserScenario]] = Field( - default=None, description="""""" - ) - - -class GenerateUserScenariosResponseDict(TypedDict, total=False): - """Response message for DataFoundryService.GenerateUserScenarios.""" - - user_scenarios: Optional[list[evals_types.UserScenario]] - """""" - - -GenerateUserScenariosResponseOrDict = Union[ - GenerateUserScenariosResponse, GenerateUserScenariosResponseDict -] - - class GetEvaluationMetricConfig(_common.BaseModel): """Config for getting an evaluation metric.""" diff --git a/vertexai/_genai/types/evals.py b/vertexai/_genai/types/evals.py index ec6a9cd092..3419584a67 100644 --- a/vertexai/_genai/types/evals.py +++ b/vertexai/_genai/types/evals.py @@ -526,38 +526,38 @@ class UserScenarioDict(TypedDict, total=False): class UserScenarioGenerationConfig(_common.BaseModel): """User scenario generation configuration.""" - user_scenario_count: Optional[int] = Field( + model_name: Optional[str] = Field( default=None, - description="""The number of user scenarios to generate. The maximum number of scenarios that can be generated is 100.""", + description="""The model name to use for user scenario generation.""", ) - simulation_instruction: Optional[str] = Field( + count: Optional[int] = Field( default=None, - description="""Simulation instruction to guide the user scenario generation.""", + description="""The number of user scenarios to generate. The maximum number of scenarios that can be generated is 100.""", ) - environment_data: Optional[str] = Field( + generation_instruction: Optional[str] = Field( default=None, - description="""Environment data to drive simulation. For example, for a QA agent, this could be the docs queried by the tools.""", + description="""Instruction to guide the conversation scenario generation.""", ) - model_name: Optional[str] = Field( + environment_context: Optional[str] = Field( default=None, - description="""The model name to use for user scenario generation.""", + description="""Environment context to drive simulation. For example, for a QA agent, this could be the docs queried by the tools.""", ) class UserScenarioGenerationConfigDict(TypedDict, total=False): """User scenario generation configuration.""" - user_scenario_count: Optional[int] - """The number of user scenarios to generate. The maximum number of scenarios that can be generated is 100.""" + model_name: Optional[str] + """The model name to use for user scenario generation.""" - simulation_instruction: Optional[str] - """Simulation instruction to guide the user scenario generation.""" + count: Optional[int] + """The number of user scenarios to generate. The maximum number of scenarios that can be generated is 100.""" - environment_data: Optional[str] - """Environment data to drive simulation. For example, for a QA agent, this could be the docs queried by the tools.""" + generation_instruction: Optional[str] + """Instruction to guide the conversation scenario generation.""" - model_name: Optional[str] - """The model name to use for user scenario generation.""" + environment_context: Optional[str] + """Environment context to drive simulation. For example, for a QA agent, this could be the docs queried by the tools.""" UserScenarioGenerationConfigOrDict = Union[