diff --git a/src/strands/models/openai.py b/src/strands/models/openai.py index 73484e924..07c019a90 100644 --- a/src/strands/models/openai.py +++ b/src/strands/models/openai.py @@ -440,6 +440,12 @@ def format_request_messages( return [message for message in formatted_messages if "content" in message or "tool_calls" in message] + def _default_stop_tokens(self) -> list[str] | None: + model_id = str(self.config.get("model_id", "")).lower() + if "gpt-oss" in model_id: + return ["<|call|>", "<|return|>", "<|end|>"] + return None + def format_request( self, messages: Messages, @@ -467,6 +473,12 @@ def format_request( TypeError: If a message contains a content block type that cannot be converted to an OpenAI-compatible format. """ + params = dict(cast(dict[str, Any], self.config.get("params", {}))) + if "stop" not in params: + default_stop = self._default_stop_tokens() + if default_stop: + params["stop"] = default_stop + return { "messages": self.format_request_messages( messages, system_prompt, system_prompt_content=system_prompt_content @@ -486,7 +498,7 @@ def format_request( for tool_spec in tool_specs or [] ], **(self._format_request_tool_choice(tool_choice)), - **cast(dict[str, Any], self.config.get("params", {})), + **params, } def format_chunk(self, event: dict[str, Any], **kwargs: Any) -> StreamEvent: diff --git a/tests/strands/models/test_openai.py b/tests/strands/models/test_openai.py index 747e1123a..1a338371a 100644 --- a/tests/strands/models/test_openai.py +++ b/tests/strands/models/test_openai.py @@ -627,6 +627,25 @@ def test_format_request(model, messages, tool_specs, system_prompt): assert tru_request == exp_request +def test_format_request_adds_gpt_oss_stop_tokens(messages, tool_specs, system_prompt): + model = OpenAIModel(model_id="openai/gpt-oss-120b", params={"max_tokens": 1}) + + tru_request = model.format_request(messages, tool_specs, system_prompt) + + assert tru_request["stop"] == ["<|call|>", "<|return|>", "<|end|>"] + + +def test_format_request_preserves_explicit_stop_tokens(messages, tool_specs, system_prompt): + model = OpenAIModel( + model_id="openai/gpt-oss-120b", + params={"max_tokens": 1, "stop": ["<|end|>"]}, + ) + + tru_request = model.format_request(messages, tool_specs, system_prompt) + + assert tru_request["stop"] == ["<|end|>"] + + def test_format_request_with_tool_choice_auto(model, messages, tool_specs, system_prompt): tool_choice = {"auto": {}} tru_request = model.format_request(messages, tool_specs, system_prompt, tool_choice)