Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 92 additions & 16 deletions astrbot/core/astr_agent_run_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,77 @@ def _should_stop_agent(astr_event) -> bool:
return astr_event.is_stopped() or bool(astr_event.get_extra("agent_stop_requested"))


def _truncate_tool_result(text: str, limit: int = 70) -> str:
if limit <= 0:
return ""
if len(text) <= limit:
return text
if limit <= 3:
return text[:limit]
return f"{text[: limit - 3]}..."


def _extract_chain_json_data(msg_chain: MessageChain) -> dict | None:
if not msg_chain.chain:
return None
first_comp = msg_chain.chain[0]
if isinstance(first_comp, Json) and isinstance(first_comp.data, dict):
return first_comp.data
return None


def _record_tool_call_name(
tool_info: dict | None, tool_name_by_call_id: dict[str, str]
) -> None:
if not isinstance(tool_info, dict):
return
tool_call_id = tool_info.get("id")
tool_name = tool_info.get("name")
if tool_call_id is None or tool_name is None:
return
tool_name_by_call_id[str(tool_call_id)] = str(tool_name)


def _build_tool_call_status_message(tool_info: dict | None) -> str:
if tool_info:
return f"🔨 调用工具: {tool_info.get('name', 'unknown')}"
return "🔨 调用工具..."


def _build_tool_result_status_message(
msg_chain: MessageChain, tool_name_by_call_id: dict[str, str]
) -> str:
tool_name = "unknown"
tool_result = ""

result_data = _extract_chain_json_data(msg_chain)
if result_data:
tool_call_id = result_data.get("id")
if tool_call_id is not None:
tool_name = tool_name_by_call_id.pop(str(tool_call_id), "unknown")
tool_result = str(result_data.get("result", ""))

if not tool_result:
tool_result = msg_chain.get_plain_text(with_other_comps_mark=True)
tool_result = _truncate_tool_result(tool_result, 70)

status_msg = f"🔨 调用工具: {tool_name}"
if tool_result:
status_msg = f"{status_msg}\n📎 返回结果: {tool_result}"
return status_msg


async def run_agent(
agent_runner: AgentRunner,
max_step: int = 30,
show_tool_use: bool = True,
show_tool_call_result: bool = False,
stream_to_general: bool = False,
show_reasoning: bool = False,
) -> AsyncGenerator[MessageChain | None, None]:
step_idx = 0
astr_event = agent_runner.run_context.context.event
tool_name_by_call_id: dict[str, str] = {}
while step_idx < max_step + 1:
step_idx += 1

Expand Down Expand Up @@ -90,32 +152,36 @@ async def run_agent(
continue
if astr_event.get_platform_id() == "webchat":
await astr_event.send(msg_chain)
elif show_tool_use and show_tool_call_result:
status_msg = _build_tool_result_status_message(
msg_chain, tool_name_by_call_id
)
await astr_event.send(
MessageChain(type="tool_call").message(status_msg)
)
# 对于其他情况,暂时先不处理
continue
elif resp.type == "tool_call":
if agent_runner.streaming:
# 用来标记流式响应需要分节
yield MessageChain(chain=[], type="break")

tool_info = None

if resp.data["chain"].chain:
json_comp = resp.data["chain"].chain[0]
if isinstance(json_comp, Json):
tool_info = json_comp.data
astr_event.trace.record(
"agent_tool_call",
tool_name=tool_info if tool_info else "unknown",
)
tool_info = _extract_chain_json_data(resp.data["chain"])
astr_event.trace.record(
"agent_tool_call",
tool_name=tool_info if tool_info else "unknown",
)
_record_tool_call_name(tool_info, tool_name_by_call_id)

if astr_event.get_platform_name() == "webchat":
await astr_event.send(resp.data["chain"])
elif show_tool_use:
if tool_info:
m = f"🔨 调用工具: {tool_info.get('name', 'unknown')}"
else:
m = "🔨 调用工具..."
chain = MessageChain(type="tool_call").message(m)
if show_tool_call_result and isinstance(tool_info, dict):
# Delay tool status notification until tool_call_result.
continue
Comment on lines +179 to +181
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

在开启 show_tool_call_result 时,代码选择跳过发送即时的“正在调用工具”状态消息(第 181 行 continue),直到工具执行完毕并返回结果时才统一发送。这在执行耗时较长的工具(如联网搜索、复杂数据处理等)时,会导致用户侧长时间没有任何反馈,可能让用户误以为机器人卡死或无响应。建议即使开启了结果显示,也保留即时的调用反馈,以提升交互的即时性。虽然这会导致在结果返回时多发一条消息,但对于长耗时任务的交互体验至关重要。

                        chain = MessageChain(type="tool_call").message(
                            _build_tool_call_status_message(tool_info)
                        )
                        await astr_event.send(chain)

chain = MessageChain(type="tool_call").message(
_build_tool_call_status_message(tool_info)
)
await astr_event.send(chain)
continue

Expand Down Expand Up @@ -202,6 +268,7 @@ async def run_live_agent(
tts_provider: TTSProvider | None = None,
max_step: int = 30,
show_tool_use: bool = True,
show_tool_call_result: bool = False,
show_reasoning: bool = False,
) -> AsyncGenerator[MessageChain | None, None]:
"""Live Mode 的 Agent 运行器,支持流式 TTS
Expand All @@ -211,6 +278,7 @@ async def run_live_agent(
tts_provider: TTS Provider 实例
max_step: 最大步数
show_tool_use: 是否显示工具使用
show_tool_call_result: 是否显示工具返回结果
show_reasoning: 是否显示推理过程

Yields:
Expand All @@ -222,6 +290,7 @@ async def run_live_agent(
agent_runner,
max_step=max_step,
show_tool_use=show_tool_use,
show_tool_call_result=show_tool_call_result,
stream_to_general=False,
show_reasoning=show_reasoning,
):
Expand Down Expand Up @@ -250,7 +319,12 @@ async def run_live_agent(
# 1. 启动 Agent Feeder 任务:负责运行 Agent 并将文本分句喂给 text_queue
feeder_task = asyncio.create_task(
_run_agent_feeder(
agent_runner, text_queue, max_step, show_tool_use, show_reasoning
agent_runner,
text_queue,
max_step,
show_tool_use,
show_tool_call_result,
show_reasoning,
)
)

Expand Down Expand Up @@ -336,6 +410,7 @@ async def _run_agent_feeder(
text_queue: asyncio.Queue,
max_step: int,
show_tool_use: bool,
show_tool_call_result: bool,
show_reasoning: bool,
) -> None:
"""运行 Agent 并将文本输出分句放入队列"""
Expand All @@ -345,6 +420,7 @@ async def _run_agent_feeder(
agent_runner,
max_step=max_step,
show_tool_use=show_tool_use,
show_tool_call_result=show_tool_call_result,
stream_to_general=False,
show_reasoning=show_reasoning,
):
Expand Down
13 changes: 13 additions & 0 deletions astrbot/core/config/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
"dequeue_context_length": 1,
"streaming_response": False,
"show_tool_use_status": False,
"show_tool_call_result": False,
"sanitize_context_by_modalities": False,
"max_quoted_fallback_images": 20,
"quoted_message_parser": {
Expand Down Expand Up @@ -2306,6 +2307,9 @@ class ChatProviderTemplate(TypedDict):
"show_tool_use_status": {
"type": "bool",
},
"show_tool_call_result": {
"type": "bool",
},
"unsupported_streaming_strategy": {
"type": "string",
},
Expand Down Expand Up @@ -2994,6 +2998,15 @@ class ChatProviderTemplate(TypedDict):
"provider_settings.agent_runner_type": "local",
},
},
"provider_settings.show_tool_call_result": {
"description": "输出函数调用返回结果",
"type": "bool",
"hint": "仅在输出函数调用状态启用时生效,展示结果前 70 个字符。",
"condition": {
"provider_settings.agent_runner_type": "local",
"provider_settings.show_tool_use_status": True,
},
},
"provider_settings.sanitize_context_by_modalities": {
"description": "按模型能力清理历史上下文",
"type": "bool",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ async def initialize(self, ctx: PipelineContext) -> None:
if isinstance(self.max_step, bool): # workaround: #2622
self.max_step = 30
self.show_tool_use: bool = settings.get("show_tool_use_status", True)
self.show_tool_call_result: bool = settings.get("show_tool_call_result", False)
self.show_reasoning = settings.get("display_reasoning_text", False)
self.sanitize_context_by_modalities: bool = settings.get(
"sanitize_context_by_modalities",
Expand Down Expand Up @@ -240,6 +241,7 @@ async def process(
tts_provider,
self.max_step,
self.show_tool_use,
self.show_tool_call_result,
show_reasoning=self.show_reasoning,
),
),
Expand Down Expand Up @@ -269,6 +271,7 @@ async def process(
agent_runner,
self.max_step,
self.show_tool_use,
self.show_tool_call_result,
show_reasoning=self.show_reasoning,
),
),
Expand Down Expand Up @@ -297,6 +300,7 @@ async def process(
agent_runner,
self.max_step,
self.show_tool_use,
self.show_tool_call_result,
stream_to_general,
show_reasoning=self.show_reasoning,
):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,10 @@
"show_tool_use_status": {
"description": "Output Function Call Status"
},
"show_tool_call_result": {
"description": "Output Tool Call Results",
"hint": "Only takes effect when \"Output Function Call Status\" is enabled, and shows at most 70 characters."
},
"sanitize_context_by_modalities": {
"description": "Sanitize History by Modalities",
"hint": "When enabled, sanitizes contexts before each LLM request by removing image blocks and tool-call structures that the current provider's modalities do not support (this changes what the model sees)."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@
"show_tool_use_status": {
"description": "输出函数调用状态"
},
"show_tool_call_result": {
"description": "输出函数调用返回结果",
"hint": "仅在启用“输出函数调用状态”时生效,且最多展示 70 个字符。"
},
"sanitize_context_by_modalities": {
"description": "按模型能力清理历史上下文",
"hint": "开启后,在每次请求 LLM 前会按当前模型提供商中所选择的模型能力删除对话中不支持的图片/工具调用结构(会改变模型看到的历史)"
Expand Down