diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index ce7a08b7f..27760e9e2 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -271,9 +271,15 @@ async def _fetch_response( should_omit_model = prompt is not None and not self._model_is_explicit model_param: str | ChatModel | Omit = self.model if not should_omit_model else omit should_omit_tools = prompt is not None and len(converted_tools_payload) == 0 + # In prompt-managed tool flows without local tools payload, omit only named tool choices + # that must match an explicit tool list. Keep control literals like "none"/"required". + should_omit_tool_choice = should_omit_tools and isinstance(tool_choice, dict) tools_param: list[ToolParam] | Omit = ( converted_tools_payload if not should_omit_tools else omit ) + tool_choice_param: response_create_params.ToolChoice | Omit = ( + tool_choice if not should_omit_tool_choice else omit + ) include_set: set[str] = set(converted_tools.includes) if model_settings.response_include is not None: @@ -300,7 +306,7 @@ async def _fetch_response( f"{input_json}\n" f"Tools:\n{tools_json}\n" f"Stream: {stream}\n" - f"Tool choice: {tool_choice}\n" + f"Tool choice: {tool_choice_param}\n" f"Response format: {response_format}\n" f"Previous response id: {previous_response_id}\n" f"Conversation id: {conversation_id}\n" @@ -330,7 +336,7 @@ async def _fetch_response( top_p=self._non_null_or_omit(model_settings.top_p), truncation=self._non_null_or_omit(model_settings.truncation), max_output_tokens=self._non_null_or_omit(model_settings.max_tokens), - tool_choice=tool_choice, + tool_choice=tool_choice_param, parallel_tool_calls=parallel_tool_calls, stream=cast(Any, stream_param), extra_headers=self._merge_headers(model_settings), diff --git a/tests/test_openai_responses.py b/tests/test_openai_responses.py index ecd509ac6..b896ea819 100644 --- a/tests/test_openai_responses.py +++ b/tests/test_openai_responses.py @@ -135,3 +135,76 @@ def __init__(self): ) assert called_kwargs["tools"] is omit + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +async def test_prompt_id_omits_tool_choice_when_no_tools_configured(): + called_kwargs: dict[str, Any] = {} + + class DummyResponses: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return get_response_obj([]) + + class DummyResponsesClient: + def __init__(self): + self.responses = DummyResponses() + + model = OpenAIResponsesModel( + model="gpt-4", + openai_client=DummyResponsesClient(), # type: ignore[arg-type] + model_is_explicit=False, + ) + + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(tool_choice="web_search_preview"), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + prompt={"id": "pmpt_123"}, + ) + + assert called_kwargs["tools"] is omit + assert called_kwargs["tool_choice"] is omit + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +@pytest.mark.parametrize("tool_choice", ["none", "required"]) +async def test_prompt_id_keeps_literal_tool_choice_without_local_tools(tool_choice: str): + called_kwargs: dict[str, Any] = {} + + class DummyResponses: + async def create(self, **kwargs): + nonlocal called_kwargs + called_kwargs = kwargs + return get_response_obj([]) + + class DummyResponsesClient: + def __init__(self): + self.responses = DummyResponses() + + model = OpenAIResponsesModel( + model="gpt-4", + openai_client=DummyResponsesClient(), # type: ignore[arg-type] + model_is_explicit=False, + ) + + await model.get_response( + system_instructions=None, + input="hi", + model_settings=ModelSettings(tool_choice=tool_choice), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + prompt={"id": "pmpt_123"}, + ) + + assert called_kwargs["tools"] is omit + assert called_kwargs["tool_choice"] == tool_choice