Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion src/strands/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,12 @@ def format_request_messages(

return [message for message in formatted_messages if "content" in message or "tool_calls" in message]

def _default_stop_tokens(self) -> list[str] | None:
model_id = str(self.config.get("model_id", "")).lower()
if "gpt-oss" in model_id:
return ["<|call|>", "<|return|>", "<|end|>"]
return None

def format_request(
self,
messages: Messages,
Expand Down Expand Up @@ -467,6 +473,12 @@ def format_request(
TypeError: If a message contains a content block type that cannot be converted to an OpenAI-compatible
format.
"""
params = dict(cast(dict[str, Any], self.config.get("params", {})))
if "stop" not in params:
default_stop = self._default_stop_tokens()
if default_stop:
params["stop"] = default_stop

return {
"messages": self.format_request_messages(
messages, system_prompt, system_prompt_content=system_prompt_content
Expand All @@ -486,7 +498,7 @@ def format_request(
for tool_spec in tool_specs or []
],
**(self._format_request_tool_choice(tool_choice)),
**cast(dict[str, Any], self.config.get("params", {})),
**params,
}

def format_chunk(self, event: dict[str, Any], **kwargs: Any) -> StreamEvent:
Expand Down
19 changes: 19 additions & 0 deletions tests/strands/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,25 @@ def test_format_request(model, messages, tool_specs, system_prompt):
assert tru_request == exp_request


def test_format_request_adds_gpt_oss_stop_tokens(messages, tool_specs, system_prompt):
model = OpenAIModel(model_id="openai/gpt-oss-120b", params={"max_tokens": 1})

tru_request = model.format_request(messages, tool_specs, system_prompt)

assert tru_request["stop"] == ["<|call|>", "<|return|>", "<|end|>"]


def test_format_request_preserves_explicit_stop_tokens(messages, tool_specs, system_prompt):
model = OpenAIModel(
model_id="openai/gpt-oss-120b",
params={"max_tokens": 1, "stop": ["<|end|>"]},
)

tru_request = model.format_request(messages, tool_specs, system_prompt)

assert tru_request["stop"] == ["<|end|>"]


def test_format_request_with_tool_choice_auto(model, messages, tool_specs, system_prompt):
tool_choice = {"auto": {}}
tru_request = model.format_request(messages, tool_specs, system_prompt, tool_choice)
Expand Down