Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions agentrun/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@
- Integration: 框架集成 / Framework integration
"""

import os
from typing import TYPE_CHECKING

__version__ = "0.0.16"


# Agent Runtime
from agentrun.agent_runtime import (
AgentRuntime,
Expand Down Expand Up @@ -114,6 +116,7 @@
ResourceAlreadyExistError,
ResourceNotExistError,
)
from agentrun.utils.log import logger
from agentrun.utils.model import Status

# Server - 延迟导入以避免可选依赖问题
Expand Down Expand Up @@ -360,3 +363,23 @@ def __getattr__(name: str):
raise

raise AttributeError(f"module '{__name__}' has no attribute '{name}'")


if not os.getenv("DISABLE_BREAKING_CHANGES_WARNING"):
Copy link

Copilot AI Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

DISABLE_BREAKING_CHANGES_WARNING is treated as “disabled if the variable exists with any value”. That means values like "0"/"false" will still disable the warning, which is inconsistent with how other env flags are parsed in this repo (e.g., AGENTRUN_SDK_DEBUG in agentrun/utils/log.py). Consider normalizing the value and only disabling when it’s truthy ("1", "true", "yes"), or reusing the same allow/deny list pattern used in log.py.

Suggested change
if not os.getenv("DISABLE_BREAKING_CHANGES_WARNING"):
_disable_warning_val = os.getenv("DISABLE_BREAKING_CHANGES_WARNING")
_disable_warning = (
_disable_warning_val is not None
and _disable_warning_val.strip().lower() in ("1", "true", "yes")
)
if not _disable_warning:

Copilot uses AI. Check for mistakes.
Copy link

Copilot AI Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The warning currently runs for all versions, but the message/PR description says it should display for “early versions”. To avoid accidentally warning on future stable releases, gate this by version (e.g., only when major version == 0, or when __version__ < 1.0.0), and keep the env var override as an escape hatch.

Suggested change
if not os.getenv("DISABLE_BREAKING_CHANGES_WARNING"):
def _is_early_version(version: str) -> bool:
"""Return True if the given version string represents an early (pre-1.0.0) version."""
parts = version.split(".")
if not parts:
# Conservatively treat unknown/malformed versions as early
return True
try:
major = int(parts[0])
except ValueError:
# Conservatively treat non-numeric major versions as early
return True
return major == 0
if _is_early_version(__version__) and not os.getenv(
"DISABLE_BREAKING_CHANGES_WARNING"
):

Copilot uses AI. Check for mistakes.
logger.warning(
f"当前您正在使用 AgentRun Python SDK 版本 {__version__}。"
"早期版本通常包含许多新功能,这些功能\033[1;33m 可能引入不兼容的变更"
" \033[0m。为避免潜在问题,我们强烈建议\033[1;32m 将依赖锁定为此版本"
" \033[0m。\nYou are currently using AgentRun Python SDK version"
f" {__version__}. Early versions often include many new features,"
" which\033[1;33m may introduce breaking changes\033[0m. To avoid"
" potential issues, we strongly recommend \033[1;32mpinning the"
" dependency to this version\033[0m.\n\033[2;3m pip install"
Comment on lines +371 to +377
Copy link

Copilot AI Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The warning text embeds raw ANSI escape sequences (e.g., \033[...m) inside the log message. If users redirect logs to files/CI systems or replace the SDK’s formatter/handlers, these control codes can end up as unreadable output. Prefer keeping the message plain and letting the logger formatter handle coloring (or only add ANSI codes when output is a TTY).

Copilot uses AI. Check for mistakes.
f" 'agentrun-sdk=={__version__}' \033[0m\n\n增加\033[2;3m"
" DISABLE_BREAKING_CHANGES_WARNING=1"
" \033[0m到您的环境变量以关闭此警告。\nAdd\033[2;3m"
" DISABLE_BREAKING_CHANGES_WARNING=1 \033[0mto your environment"
" variables to disable this warning.\n\nReleases:\033[2;3m"
" https://github.com/Serverless-Devs/agentrun-sdk-python/releases"
" \033[0m"
)
10 changes: 5 additions & 5 deletions agentrun/integration/langgraph/agent_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def _convert_stream_updates_event(
if tc_id:
# 发送带有完整参数的 TOOL_CALL_CHUNK
args_str = ""
if tc_args:
if tc_args is not None:
args_str = (
AgentRunConverter._safe_json_dumps(tc_args)
if isinstance(tc_args, dict)
Expand Down Expand Up @@ -570,7 +570,7 @@ def _convert_stream_values_event(
if tc_id:
# 发送带有完整参数的 TOOL_CALL_CHUNK
args_str = ""
if tc_args:
if tc_args is not None:
args_str = (
AgentRunConverter._safe_json_dumps(tc_args)
if isinstance(tc_args, dict)
Expand Down Expand Up @@ -694,7 +694,7 @@ def _convert_astream_events_event(
tool_name_to_call_ids[tc_name].append(tc_id)
# 第一个 chunk 包含 id 和 name
args_delta = ""
if tc_args:
if tc_args is not None:
args_delta = (
AgentRunConverter._safe_json_dumps(tc_args)
if isinstance(tc_args, (dict, list))
Expand All @@ -708,7 +708,7 @@ def _convert_astream_events_event(
"args_delta": args_delta,
},
)
elif tc_args:
elif tc_args is not None:
# 后续 chunk 只有 args_delta
args_delta = (
AgentRunConverter._safe_json_dumps(tc_args)
Expand Down Expand Up @@ -765,7 +765,7 @@ def _convert_astream_events_event(
).append(tc_id)

args_delta = ""
if tc_args:
if tc_args is not None:
args_delta = (
AgentRunConverter._safe_json_dumps(
tc_args
Expand Down
5 changes: 4 additions & 1 deletion tests/unittests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,9 +320,12 @@ def shared_mock_server(monkeypatch: Any, respx_mock: Any) -> MockLLMServer:
"""提供共享的 Mock LLM Server
预配置了默认场景。
关键修复:传入 respx_mock fixture 给 MockLLMServer
- 确保 HTTP mock 在所有环境(本地/CI)中一致生效
"""
server = MockLLMServer(expect_tools=True, validate_tools=False)
server.install(monkeypatch)
server.install(monkeypatch, respx_mock)
server.add_default_scenarios()
return server

Expand Down
15 changes: 13 additions & 2 deletions tests/unittests/integration/langchain/test_agent_invoke_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,9 @@ def _normalize_agui_event(event: Dict[str, Any]) -> Dict[str, Any]:
},
{
"type": "TOOL_CALL_ARGS",
"delta": "",
# 空参数在 LangGraph 中表现为 "{}" (Node.js SDK) 或 根据转换逻辑可能为空字符串
# 但当前 mock server 返回 "{}",转换器保留了它
"delta": "{}",
"hasToolCallId": True,
},
{"type": "TOOL_CALL_END", "hasToolCallId": True},
Expand Down Expand Up @@ -551,6 +553,15 @@ def _normalize_openai_stream(
}],
"finish_reason": None,
},
{
"object": "chat.completion.chunk",
"tool_calls": [{
"name": None,
"arguments": "{}",
"has_id": False,
}],
"finish_reason": None,
},
{
"object": "chat.completion.chunk",
"delta_role": "assistant",
Expand Down Expand Up @@ -612,7 +623,7 @@ def _normalize_openai_nonstream(resp: Dict[str, Any]) -> Dict[str, Any]:
"content": "工具结果已收到: 2024-01-01 12:00:00",
"tool_calls": [{
"name": "get_time",
"arguments": "",
"arguments": "{}",
"has_id": True,
}],
"finish_reason": "tool_calls",
Expand Down
63 changes: 55 additions & 8 deletions tests/unittests/integration/mock_llm_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class MockLLMServer:
使用方式:
# 基本用法
server = MockLLMServer()
server.install(monkeypatch)
server.install(monkeypatch, respx_mock) # 需要传入 respx_mock
# 添加自定义场景
server.add_scenario(Scenarios.simple_chat("你好", "你好!"))
Expand All @@ -67,15 +67,22 @@ class MockLLMServer:
validate_tools: bool = True
"""是否验证工具格式(默认 True)"""

def install(self, monkeypatch: Any) -> "MockLLMServer":
_respx_router: Any = field(default=None, init=False, repr=False)
"""内部使用的 respx router 实例"""

def install(
self, monkeypatch: Any, respx_mock: Any = None
) -> "MockLLMServer":
"""安装所有 mock
Args:
monkeypatch: pytest monkeypatch fixture
respx_mock: pytest respx_mock fixture(必须传入以确保 mock 生效)
Returns:
self: 返回自身以便链式调用
"""
self._respx_router = respx_mock
self._patch_model_info(monkeypatch)
self._patch_litellm(monkeypatch)
self._setup_respx()
Expand Down Expand Up @@ -240,7 +247,20 @@ async def fake_acompletion(*args: Any, **kwargs: Any) -> ModelResponse:
pass # google.adk not installed

def _setup_respx(self):
"""设置 respx HTTP mock"""
"""设置 respx HTTP mock
关键修复:使用 pytest-respx fixture 提供的 router 而不是全局 respx
问题背景:
- 之前直接使用全局 respx.route() 在 CI 环境中不生效
- 全局 respx router 在某些环境中可能没有正确初始化
- 导致 HTTP 请求没有被拦截,Google ADK 发送真实请求
解决方案:
- 使用 pytest-respx 提供的 respx_mock fixture
- 通过 install() 方法传入 respx_mock
- 确保 mock 在所有环境中一致生效
"""

def extract_payload(request: Any) -> Dict[str, Any]:
try:
Expand Down Expand Up @@ -274,7 +294,10 @@ def build_response(request: Any, route: Any) -> respx.MockResponse:
)
return respx.MockResponse(status_code=200, json=response_json)

respx.route(url__startswith=self.base_url).mock(
# 关键修复:使用传入的 respx_router 而不是全局 respx
# 如果没有传入 respx_router,回退到全局 respx(向后兼容)
router = self._respx_router if self._respx_router is not None else respx
router.route(url__startswith=self.base_url).mock(
side_effect=build_response
)

Expand Down Expand Up @@ -304,6 +327,27 @@ def _build_response(
tools_payload is not None,
)

# 添加详细的消息日志,帮助调试框架的消息格式
for i, msg in enumerate(messages):
role = msg.get("role", "unknown")
content_preview = str(msg.get("content", ""))[:100]
logger.debug(
"Message[%d] role=%s, content_preview=%s",
i,
role,
content_preview,
)
if "tool_calls" in msg:
logger.debug(
"Message[%d] has tool_calls: %s", i, msg.get("tool_calls")
)
if "tool_call_id" in msg:
logger.debug(
"Message[%d] has tool_call_id: %s",
i,
msg.get("tool_call_id"),
)

# 验证工具格式
if self.validate_tools and self.expect_tools and tools_payload:
self._assert_tools(tools_payload)
Expand All @@ -319,16 +363,19 @@ def _build_response(
turn = scenario.get_response(messages)
return turn.to_response()

# 默认逻辑:根据最后一条消息决定响应
# 默认逻辑:未匹配场景时使用
return self._build_default_response(messages, tools_payload)

def _build_default_response(
self, messages: List[Dict], tools_payload: Optional[List]
) -> Dict[str, Any]:
"""构建默认响应(无场景匹配时使用)"""
last_role = messages[-1].get("role")
# 检查消息历史中是否已经有 tool 结果
# 这是关键修复:不只检查最后一条消息,而是检查整个历史
has_tool_results = any(msg.get("role") == "tool" for msg in messages)

if last_role == "tool":
if has_tool_results:
# 已经有 tool 结果,应该返回最终答案而不是再次调用工具
return {
"id": "chatcmpl-mock-final",
"object": "chat.completion",
Expand All @@ -349,7 +396,7 @@ def _build_default_response(
},
}

# 如果有工具,返回工具调用
# 如果有工具且未调用过,返回工具调用
if tools_payload:
return {
"id": "chatcmpl-mock-tools",
Expand Down
51 changes: 39 additions & 12 deletions tests/unittests/integration/scenarios.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,31 @@ def get_response(self, messages: List[Dict]) -> MockTurn:
- 如果最后一条消息是 tool 类型,说明工具已执行,进入下一轮
- 否则返回当前轮次
"""
import logging

logger = logging.getLogger(__name__)

# 计算当前应该返回哪一轮
tool_rounds = sum(1 for msg in messages if msg.get("role") == "tool")

logger.debug(
"Scenario '%s': Found %d tool messages, total turns: %d",
self.name,
tool_rounds,
len(self.turns),
)

# 根据工具消息数量确定当前轮次
# 每个工具响应对应一个轮次的推进
current_idx = min(tool_rounds, len(self.turns) - 1)

logger.debug(
"Scenario '%s': Returning turn %d, has_tool_calls=%s",
self.name,
current_idx,
self.turns[current_idx].has_tool_calls(),
)

return self.turns[current_idx]

def reset(self):
Expand All @@ -145,12 +164,14 @@ def simple_chat(trigger: str, response: str) -> MockScenario:
"""

def trigger_fn(messages: List[Dict]) -> bool:
# 查找最后一条用户消息
for msg in reversed(messages):
# 检查所有用户消息(任意一条包含trigger即匹配)
# 修复:不只检查最后一条,避免框架插入的额外消息干扰匹配
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
if isinstance(content, str):
return trigger in content
if trigger in content:
return True
elif isinstance(content, list):
# 处理 content 是列表的情况
for item in content:
Expand Down Expand Up @@ -188,11 +209,13 @@ def single_tool_call(
"""

def trigger_fn(messages: List[Dict]) -> bool:
for msg in reversed(messages):
# 检查所有用户消息(任意一条包含trigger即匹配)
# 修复:避免框架插入的额外消息干扰匹配
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
if isinstance(content, str):
return trigger in content
if isinstance(content, str) and trigger in content:
return True
return False

return MockScenario(
Expand Down Expand Up @@ -230,11 +253,13 @@ def multi_tool_calls(
"""

def trigger_fn(messages: List[Dict]) -> bool:
for msg in reversed(messages):
# 检查所有用户消息(任意一条包含trigger即匹配)
# 修复:避免框架插入的额外消息干扰匹配
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
if isinstance(content, str):
return trigger in content
if isinstance(content, str) and trigger in content:
return True
return False

return MockScenario(
Expand Down Expand Up @@ -273,11 +298,13 @@ def multi_round_tools(
"""

def trigger_fn(messages: List[Dict]) -> bool:
for msg in reversed(messages):
# 检查所有用户消息(任意一条包含trigger即匹配)
# 修复:避免框架插入的额外消息干扰匹配
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
if isinstance(content, str):
return trigger in content
if isinstance(content, str) and trigger in content:
return True
return False

turns = []
Expand Down
8 changes: 6 additions & 2 deletions tests/unittests/integration/test_agentscope.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,13 @@ class TestAgentScopeIntegration(AgentScopeTestMixin):

@pytest.fixture
def mock_server(self, monkeypatch: Any, respx_mock: Any) -> MockLLMServer:
"""创建并安装 Mock LLM Server"""
"""创建并安装 Mock LLM Server
关键修复:传入 respx_mock fixture 给 MockLLMServer
- 确保 HTTP mock 在所有环境(本地/CI)中一致生效
"""
server = MockLLMServer(expect_tools=True, validate_tools=False)
server.install(monkeypatch)
server.install(monkeypatch, respx_mock)
server.add_default_scenarios()
return server

Expand Down
8 changes: 6 additions & 2 deletions tests/unittests/integration/test_crewai.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,13 @@ class TestCrewAIIntegration(CrewAITestMixin):

@pytest.fixture
def mock_server(self, monkeypatch: Any, respx_mock: Any) -> MockLLMServer:
"""创建并安装 Mock LLM Server"""
"""创建并安装 Mock LLM Server
关键修复:传入 respx_mock fixture 给 MockLLMServer
- 确保 HTTP mock 在所有环境(本地/CI)中一致生效
"""
server = MockLLMServer(expect_tools=True, validate_tools=False)
server.install(monkeypatch)
server.install(monkeypatch, respx_mock)
server.add_default_scenarios()
return server

Expand Down
Loading
Loading