Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,9 +271,15 @@ async def _fetch_response(
should_omit_model = prompt is not None and not self._model_is_explicit
model_param: str | ChatModel | Omit = self.model if not should_omit_model else omit
should_omit_tools = prompt is not None and len(converted_tools_payload) == 0
# In prompt-managed tool flows without local tools payload, omit only named tool choices
# that must match an explicit tool list. Keep control literals like "none"/"required".
should_omit_tool_choice = should_omit_tools and isinstance(tool_choice, dict)
tools_param: list[ToolParam] | Omit = (
converted_tools_payload if not should_omit_tools else omit
)
tool_choice_param: response_create_params.ToolChoice | Omit = (
tool_choice if not should_omit_tool_choice else omit
)

include_set: set[str] = set(converted_tools.includes)
if model_settings.response_include is not None:
Expand All @@ -300,7 +306,7 @@ async def _fetch_response(
f"{input_json}\n"
f"Tools:\n{tools_json}\n"
f"Stream: {stream}\n"
f"Tool choice: {tool_choice}\n"
f"Tool choice: {tool_choice_param}\n"
f"Response format: {response_format}\n"
f"Previous response id: {previous_response_id}\n"
f"Conversation id: {conversation_id}\n"
Expand Down Expand Up @@ -330,7 +336,7 @@ async def _fetch_response(
top_p=self._non_null_or_omit(model_settings.top_p),
truncation=self._non_null_or_omit(model_settings.truncation),
max_output_tokens=self._non_null_or_omit(model_settings.max_tokens),
tool_choice=tool_choice,
tool_choice=tool_choice_param,
parallel_tool_calls=parallel_tool_calls,
stream=cast(Any, stream_param),
extra_headers=self._merge_headers(model_settings),
Expand Down
73 changes: 73 additions & 0 deletions tests/test_openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,3 +135,76 @@ def __init__(self):
)

assert called_kwargs["tools"] is omit


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
async def test_prompt_id_omits_tool_choice_when_no_tools_configured():
called_kwargs: dict[str, Any] = {}

class DummyResponses:
async def create(self, **kwargs):
nonlocal called_kwargs
called_kwargs = kwargs
return get_response_obj([])

class DummyResponsesClient:
def __init__(self):
self.responses = DummyResponses()

model = OpenAIResponsesModel(
model="gpt-4",
openai_client=DummyResponsesClient(), # type: ignore[arg-type]
model_is_explicit=False,
)

await model.get_response(
system_instructions=None,
input="hi",
model_settings=ModelSettings(tool_choice="web_search_preview"),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
prompt={"id": "pmpt_123"},
)

assert called_kwargs["tools"] is omit
assert called_kwargs["tool_choice"] is omit


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
@pytest.mark.parametrize("tool_choice", ["none", "required"])
async def test_prompt_id_keeps_literal_tool_choice_without_local_tools(tool_choice: str):
called_kwargs: dict[str, Any] = {}

class DummyResponses:
async def create(self, **kwargs):
nonlocal called_kwargs
called_kwargs = kwargs
return get_response_obj([])

class DummyResponsesClient:
def __init__(self):
self.responses = DummyResponses()

model = OpenAIResponsesModel(
model="gpt-4",
openai_client=DummyResponsesClient(), # type: ignore[arg-type]
model_is_explicit=False,
)

await model.get_response(
system_instructions=None,
input="hi",
model_settings=ModelSettings(tool_choice=tool_choice),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
prompt={"id": "pmpt_123"},
)

assert called_kwargs["tools"] is omit
assert called_kwargs["tool_choice"] == tool_choice