From 7d6a8d11f58eba3ce02c77d7ea43f1589b4fdee9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:03:36 -0700 Subject: [PATCH 1/8] make projects and openai models private, fix pylint and mypy --- .../agentserver/core/checkpoints/__init__.py | 2 +- .../core/checkpoints/client/__init__.py | 14 +-- .../core/checkpoints/client/_client.py | 1 + .../azure/ai/agentserver/core/logger.py | 96 ++++++++++++++----- .../ai/agentserver/core/models/__init__.py | 3 +- .../core/models/_create_response.py | 7 +- .../models/{openai => _openai}/__init__.py | 0 .../{projects => _projects}/__init__.py | 0 .../models/{projects => _projects}/_enums.py | 0 .../models/{projects => _projects}/_models.py | 0 .../models/{projects => _projects}/_patch.py | 0 .../_patch_evaluations.py | 0 .../_utils/__init__.py | 0 .../_utils/model_base.py | 0 .../_utils/serialization.py | 0 .../core/server/_response_metadata.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 87 +++++++++-------- .../core/server/common/agent_run_context.py | 16 ++-- .../id_generator/foundry_id_generator.py | 86 +++++++++++++---- .../agentserver/core/tools/client/_client.py | 6 +- .../core/tools/runtime/_catalog.py | 2 +- .../core/tools/runtime/_starlette.py | 3 +- .../ai/agentserver/core/utils/_credential.py | 4 +- .../samples/bilingual_weekend_planner/main.py | 2 +- .../samples/mcp_simple/mcp_simple.py | 2 +- .../custom_mock_agent_test.py | 2 +- .../server/test_response_metadata.py | 2 +- .../tests/unit_tests/test_logger.py | 14 +-- 28 files changed, 229 insertions(+), 122 deletions(-) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{openai => _openai}/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_enums.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_models.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_patch.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_patch_evaluations.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/model_base.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/serialization.py (100%) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py index f9d6ed3d8aa8..0ca387146579 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- """Checkpoint storage module for Azure AI Agent Server.""" -from .client import FoundryCheckpointClient +from .client._client import FoundryCheckpointClient from .client._models import ( CheckpointItem, CheckpointItemId, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py index 34f30f16c5d9..901cbb3d70a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py @@ -3,16 +3,4 @@ # --------------------------------------------------------- """Checkpoint client module for Azure AI Agent Server.""" -from ._client import FoundryCheckpointClient -from ._models import ( - CheckpointItem, - CheckpointItemId, - CheckpointSession, -) - -__all__ = [ - "CheckpointItem", - "CheckpointItemId", - "CheckpointSession", - "FoundryCheckpointClient", -] +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py index f7e178d758b4..fc2f45321968 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=client-method-missing-kwargs,client-accepts-api-version-keyword,missing-client-constructor-parameter-kwargs +# ^^^ azure-sdk pylint rules: internal client not intended as a public Azure SDK client """Asynchronous client for Azure AI Foundry checkpoint storage API.""" from typing import Any, AsyncContextManager, List, Optional diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index f15e98986470..2b5f39e964b4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -1,4 +1,3 @@ -# pylint: disable=broad-exception-caught,dangerous-default-value # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -12,13 +11,12 @@ from .constants import Constants def _get_default_log_config() -> dict[str, Any]: - """ - Build default log config with level from environment. - + """Build default log config with level from environment. + :return: A dictionary containing logging configuration. - :rtype: dict + :rtype: dict[str, Any] """ - log_level = get_log_level() + log_level = _get_log_level() return { "version": 1, "disable_existing_loggers": False, @@ -40,7 +38,14 @@ def _get_default_log_config() -> dict[str, Any]: } -def get_log_level(): +def _get_log_level() -> str: + """Read log level from the ``AGENT_LOG_LEVEL`` environment variable. + + Falls back to ``"INFO"`` if the variable is unset or contains an invalid value. + + :return: A valid Python logging level name. + :rtype: str + """ log_level = os.getenv(Constants.AGENT_LOG_LEVEL, "INFO").upper() valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] if log_level not in valid_levels: @@ -54,7 +59,12 @@ def get_log_level(): APPINSIGHT_CONNSTR_ENV_NAME = "APPLICATIONINSIGHTS_CONNECTION_STRING" -def get_dimensions(): +def _get_dimensions() -> dict[str, str]: + """Collect environment-based dimensions for structured logging. + + :return: A mapping of dimension keys to their runtime values. + :rtype: dict[str, str] + """ env_values = {name: value for name, value in vars(Constants).items() if not name.startswith("_")} res = {"azure.ai.agentserver.version": VERSION} for name, env_name in env_values.items(): @@ -65,11 +75,25 @@ def get_dimensions(): return res -def get_project_endpoint(logger=None): +def get_project_endpoint(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Resolve the project endpoint from environment variables. + + Checks ``AZURE_AI_PROJECT_ENDPOINT`` first, then falls back to deriving + an endpoint from ``AGENT_PROJECT_NAME``. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The resolved project endpoint URL, or ``None`` if unavailable. + :rtype: Optional[str] + """ project_endpoint = os.environ.get(Constants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: if logger: - logger.info(f"Using project endpoint from {Constants.AZURE_AI_PROJECT_ENDPOINT}: {project_endpoint}") + logger.info( + "Using project endpoint from %s: %s", + Constants.AZURE_AI_PROJECT_ENDPOINT, + project_endpoint, + ) return project_endpoint project_resource_id = os.environ.get(Constants.AGENT_PROJECT_RESOURCE_ID) if project_resource_id: @@ -78,18 +102,32 @@ def get_project_endpoint(logger=None): parts = last_part.split("@") if len(parts) < 2: if logger: - logger.warning(f"Invalid project resource id format: {project_resource_id}") + logger.warning("Invalid project resource id format: %s", project_resource_id) return None account = parts[0] project = parts[1] endpoint = f"https://{account}.services.ai.azure.com/api/projects/{project}" if logger: - logger.info(f"Using project endpoint derived from {Constants.AGENT_PROJECT_RESOURCE_ID}: {endpoint}") + logger.info( + "Using project endpoint derived from %s: %s", + Constants.AGENT_PROJECT_RESOURCE_ID, + endpoint, + ) return endpoint return None -def get_application_insights_connstr(logger=None): +def _get_application_insights_connstr(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Retrieve or derive the Application Insights connection string. + + Looks in the ``APPLICATIONINSIGHTS_CONNECTION_STRING`` environment variable first, + then attempts to fetch it from the project endpoint. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The connection string, or ``None`` if unavailable. + :rtype: Optional[str] + """ try: conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: @@ -101,22 +139,36 @@ def get_application_insights_connstr(logger=None): project_client = AIProjectClient(credential=DefaultAzureCredential(), endpoint=project_endpoint) conn_str = project_client.telemetry.get_application_insights_connection_string() if not conn_str and logger: - logger.info(f"No Application Insights connection found for project: {project_endpoint}") + logger.info( + "No Application Insights connection found for project: %s", + project_endpoint, + ) elif conn_str: os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str elif logger: logger.info("Application Insights not configured, telemetry export disabled.") return conn_str - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # bootstrap: many failure modes possible if logger: - logger.warning(f"Failed to get Application Insights connection string, telemetry export disabled: {e}") + logger.warning( + "Failed to get Application Insights connection string, telemetry export disabled: %s", + e, + ) return None class CustomDimensionsFilter(logging.Filter): - def filter(self, record): - # Add custom dimensions to every log record - dimensions = get_dimensions() + """Logging filter that attaches environment dimensions and request context to log records.""" + + def filter(self, record: logging.LogRecord) -> bool: + """Inject custom dimensions into *record* and allow it through. + + :param record: The log record to enrich. + :type record: logging.LogRecord + :return: Always ``True`` so the record is never discarded. + :rtype: bool + """ + dimensions = _get_dimensions() for key, value in dimensions.items(): setattr(record, key, value) cur_request_context = request_context.get() @@ -140,7 +192,7 @@ def configure(log_config: Optional[dict[str, Any]] = None): config.dictConfig(log_config) app_logger = logging.getLogger("azure.ai.agentserver") - application_insights_connection_string = get_application_insights_connstr(logger=app_logger) + application_insights_connection_string = _get_application_insights_connstr(logger=app_logger) enable_application_insights_logger = ( os.environ.get(Constants.ENABLE_APPLICATION_INSIGHTS_LOGGER, "true").lower() == "true" ) @@ -169,10 +221,10 @@ def configure(log_config: Optional[dict[str, Any]] = None): handler.addFilter(custom_filter) # Only add to azure.ai.agentserver namespace to avoid infrastructure logs - app_logger.setLevel(get_log_level()) + app_logger.setLevel(_get_log_level()) app_logger.addHandler(handler) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Failed to configure logging: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py index d5622ebe7732..b6a1895a3868 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py @@ -1,7 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# TypedDict module; __all__ cannot be statically typed because the list is built at runtime. from ._create_response import CreateResponse # type: ignore -from .projects import Response, ResponseStreamEvent +from ._projects import Response, ResponseStreamEvent __all__ = ["CreateResponse", "Response", "ResponseStreamEvent"] # type: ignore[var-annotated] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py index 820d54c6cea0..5ec72115734a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py @@ -1,11 +1,12 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=no-name-in-module +# pylint: disable=no-name-in-module # openai re-exports are dynamically generated from typing import Optional -from .openai import response_create_params # type: ignore -from . import projects as _azure_ai_projects_models +# ResponseCreateParamsBase is a TypedDict — mypy cannot verify total=False on mixed bases. +from ._openai import response_create_params # type: ignore +from . import _projects as _azure_ai_projects_models class CreateResponse(response_create_params.ResponseCreateParamsBase, total=False): # type: ignore agent: Optional[_azure_ai_projects_models.AgentReference] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py index 352dfdc9d27b..9b13cfedd636 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py @@ -9,7 +9,7 @@ from ..application._metadata import get_current_app from ..models import Response as OpenAIResponse, ResponseStreamEvent -from ..models.projects import ( +from ..models._projects import ( ResponseCompletedEvent, ResponseCreatedEvent, ResponseInProgressEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 7a9f488227a7..994d9045dd38 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -1,9 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements -# mypy: ignore-errors -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name (false positive on module) import contextlib import inspect import json @@ -13,6 +11,7 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from openai import AsyncOpenAI from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -40,7 +39,11 @@ from .common.agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context -from ..models import Response as OpenAIResponse, ResponseStreamEvent, projects as project_models +from ..models import ( + Response as OpenAIResponse, + ResponseStreamEvent, + _projects as project_models +) from ..tools import UserInfoContextMiddleware, create_tool_runtime from ..utils._credential import AsyncTokenCredentialAdapter @@ -53,19 +56,19 @@ def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) self.agent = agent - async def dispatch(self, request: Request, call_next): + async def dispatch(self, request: Request, call_next): # type: ignore[override] if request.url.path in ("/runs", "/responses"): try: self.set_request_id_to_context_var(request) payload = await request.json() - except Exception as e: - logger.error(f"Invalid JSON payload: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for bad payload + logger.error("Invalid JSON payload: %s", e) return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: request.state.agent_run_context = AgentRunContext(payload) self.set_run_context_to_context_var(request.state.agent_run_context) - except Exception as e: - logger.error(f"Context build failed: {e}.", exc_info=True) + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for context build + logger.error("Context build failed: %s.", e, exc_info=True) return JSONResponse({"error": f"Context build failed: {e}"}, status_code=500) return await call_next(request) @@ -99,7 +102,8 @@ def set_run_context_to_context_var(self, run_context): class FoundryCBAgent: - def __init__(self, + def __init__( # pylint: disable=too-many-statements # Starlette app setup requires sequential route/middleware wiring + self, credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, project_endpoint: Optional[str] = None) -> None: self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() @@ -129,9 +133,9 @@ async def runs_endpoint(request): ex = None resp = await self.agent_run(context) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # top-level agent_run catch-all # TODO: extract status code from exception - logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) + logger.error("Error processing CreateResponse request: %s", e, exc_info=True) ex = e if not context.stream: @@ -172,7 +176,7 @@ async def gen_async(ex): if self._should_store(context): logger.debug("Storing output to conversation.") await self._save_output_events_to_conversation(context, output_events) - except Exception as e: # noqa: BLE001 + except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught logger.error("Error in async generator: %s", e, exc_info=True) ex = e finally: @@ -207,12 +211,12 @@ async def readiness_endpoint(request): ] @contextlib.asynccontextmanager - async def _lifespan(app): + async def _lifespan(app): # pylint: disable=unused-argument import logging # Log server started successfully port = getattr(self, '_port', 'unknown') - logger.info(f"FoundryCBAgent server started successfully on port {port}") + logger.info("FoundryCBAgent server started successfully on port %s", port) # Attach App Insights handler to uvicorn loggers for handler in logger.handlers: @@ -234,9 +238,9 @@ async def _lifespan(app): allow_methods=["*"], allow_headers=["*"], ) - self.app.add_middleware(AgentRunContextMiddleware, agent=self) + self.app.add_middleware(AgentRunContextMiddleware, agent=self) # type: ignore[arg-type] - self.tracer = None + self.tracer: trace.Tracer = trace.get_tracer(__name__) def _should_store(self, context: AgentRunContext) -> bool: """Determine whether conversation artifacts should be persisted. @@ -246,7 +250,7 @@ def _should_store(self, context: AgentRunContext) -> bool: :return: ``True`` when storage is requested and the conversation is scoped to a project. :rtype: bool """ - return context.request.get("store", False) and context.conversation_id and self._project_endpoint + return bool(context.request.get("store", False) and context.conversation_id and self._project_endpoint) def _items_are_equal(self, item1: dict, item2: dict) -> bool: """Compare two conversation items for equality based on type and content. @@ -274,7 +278,7 @@ def _items_are_equal(self, item1: dict, item2: dict) -> bool: return text1 == text2 return content1 == content2 - async def _create_openai_client(self) -> "AsyncOpenAI": + async def _create_openai_client(self) -> AsyncOpenAI: """Create an AsyncOpenAI client for conversation operations. :return: Configured AsyncOpenAI client scoped to the Foundry project endpoint. @@ -303,7 +307,7 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: try: conversation_id = context.conversation_id input_items = context.request.get("input", []) - if not input_items: + if not input_items or not conversation_id: return # Handle string input as a single item @@ -349,19 +353,22 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: all_match = False break if all_match: - logger.debug(f"All {n} input items already exist in " + - f"conversation {conversation_id}, skipping save") + logger.debug( + "All %d input items already exist in conversation %s, skipping save", + n, + conversation_id, + ) return - except Exception as e: - logger.debug(f"Could not check for duplicates: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # best-effort duplicate check + logger.debug("Could not check for duplicates: %s", e) await openai_client.conversations.items.create( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} input items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save input items to conversation: {e}", exc_info=True) + logger.debug("Saved %d input items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save input items to conversation: %s", e, exc_info=True) async def _save_output_to_conversation( self, context: AgentRunContext, response: project_models.Response) -> None: @@ -396,9 +403,9 @@ async def _save_output_to_conversation( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) async def _save_output_events_to_conversation(self, context: AgentRunContext, events: list) -> None: """Persist streaming output events for later retrieval. @@ -433,9 +440,9 @@ async def _save_output_events_to_conversation(self, context: AgentRunContext, ev conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) @abstractmethod async def agent_run( @@ -557,10 +564,10 @@ async def respond_with_oauth_consent_astream(self, context, error) -> AsyncGener }) yield project_models.ResponseCompletedEvent(sequence_number=sequence_number, response=response) - async def agent_liveness(self, request) -> Union[Response, dict]: + async def agent_liveness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return Response(status_code=200) - async def agent_readiness(self, request) -> Union[Response, dict]: + async def agent_readiness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return {"status": "ready"} async def run_async( @@ -577,7 +584,7 @@ async def run_async( config = uvicorn.Config(self.app, host="0.0.0.0", port=port, loop="asyncio") server = uvicorn.Server(config) self._port = port - logger.info(f"Starting FoundryCBAgent server async on port {port}") + logger.info("Starting FoundryCBAgent server async on port %s", port) await server.serve() def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: @@ -593,7 +600,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: """ self.init_tracing() self._port = port - logger.info(f"Starting FoundryCBAgent server on port {port}") + logger.info("Starting FoundryCBAgent server on port %s", port) uvicorn.run(self.app, host="0.0.0.0", port=port) def init_tracing(self): @@ -618,7 +625,9 @@ def get_trace_attributes(self): "service.name": "azure.ai.agentserver", } - def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): + def init_tracing_internal( # pylint: disable=unused-argument # base class hook, params used by subclasses + self, exporter_endpoint=None, app_insights_conn_str=None + ): pass def setup_application_insights_exporter(self, connection_string, provider): @@ -638,7 +647,7 @@ def setup_otlp_exporter(self, endpoint, provider): exporter_instance = OTLPSpanExporter(endpoint=endpoint) processor = BatchSpanProcessor(exporter_instance) provider.add_span_processor(processor) - logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + logger.info("Tracing setup with OTLP exporter: %s", endpoint) def create_response_headers(self) -> dict[str, str]: headers = {} diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 87c32926bde4..174685f652fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,7 @@ from .id_generator.id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse -from ...models.projects import AgentId, AgentReference, ResponseConversation1 +from ...models._projects import AgentId, AgentReference, ResponseConversation1 logger = get_logger() @@ -48,10 +48,10 @@ def conversation_id(self) -> Optional[str]: def stream(self) -> bool: return self._stream - def get_agent_id_object(self) -> AgentId: + def get_agent_id_object(self) -> Optional[AgentId]: agent = self.request.get("agent") if not agent: - return None # type: ignore + return None return AgentId( { "type": agent.type, @@ -60,9 +60,9 @@ def get_agent_id_object(self) -> AgentId: } ) - def get_conversation_object(self) -> ResponseConversation1: + def get_conversation_object(self) -> Optional[ResponseConversation1]: if not self._conversation_id: - return None # type: ignore + return None return ResponseConversation1(id=self._conversation_id) @@ -75,11 +75,11 @@ def _deserialize_create_response(payload: dict) -> CreateResponse: tools = payload.get("tools") if tools: - _deserialized["tools"] = [tool for tool in tools] # pylint: disable=unnecessary-comprehension + _deserialized["tools"] = list(tools) return _deserialized -def _deserialize_agent_reference(payload: dict) -> AgentReference: +def _deserialize_agent_reference(payload: dict) -> Optional[AgentReference]: if not payload: - return None # type: ignore + return None return AgentReference(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 01ac72289e4e..4d9cc741ec81 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -1,4 +1,3 @@ -# pylint: disable=docstring-missing-return,docstring-missing-param,docstring-missing-rtype # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -26,6 +25,13 @@ class FoundryIdGenerator(IdGenerator): """ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): + """Initialize the ID generator. + + :param response_id: An existing response ID, or ``None`` to generate one. + :type response_id: Optional[str] + :param conversation_id: An existing conversation ID, or ``None``. + :type conversation_id: Optional[str] + """ self.response_id = response_id or self._new_id("resp") self.conversation_id = conversation_id partition_source = self.conversation_id or self.response_id @@ -36,6 +42,13 @@ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): @classmethod def from_request(cls, payload: dict) -> "FoundryIdGenerator": + """Create a generator from an incoming request payload. + + :param payload: The raw request payload dictionary. + :type payload: dict + :return: A configured :class:`FoundryIdGenerator` instance. + :rtype: FoundryIdGenerator + """ response_id = payload.get("metadata", {}).get("response_id", None) conv_id_raw = payload.get("conversation", None) if isinstance(conv_id_raw, str): @@ -47,6 +60,13 @@ def from_request(cls, payload: dict) -> "FoundryIdGenerator": return cls(response_id, conv_id) def generate(self, category: Optional[str] = None) -> str: + """Generate a new unique ID for the given category. + + :param category: Optional prefix category (e.g. ``"msg"``, ``"func"``). Defaults to ``"id"``. + :type category: Optional[str] + :return: The generated unique identifier string. + :rtype: str + """ prefix = "id" if not category else category return self._new_id(prefix, partition_key=self._partition_id) @@ -63,12 +83,29 @@ def _new_id( partition_key: Optional[str] = None, partition_key_hint: str = "", ) -> str: - """ - Generates a new ID. - - Format matches the C# logic: - f"{prefix}{delimiter}{infix}{partitionKey}{entropy}" - (i.e., exactly one delimiter after prefix; no delimiter between entropy and partition key) + """Generate a new ID matching the C# FoundryIdGenerator format. + + Format: ``"{prefix}{delimiter}{infix}{partitionKey}{entropy}"`` + + :param prefix: The ID prefix (e.g. ``"resp"``, ``"msg"``). + :type prefix: str + :param string_length: Length of the random entropy portion. + :type string_length: int + :param partition_key_length: Length of the partition key. + :type partition_key_length: int + :param infix: Optional infix inserted between delimiter and partition key. + :type infix: Optional[str] + :param watermark: Optional alphanumeric watermark inserted mid-entropy. + :type watermark: str + :param delimiter: Delimiter between prefix and the rest of the ID. + :type delimiter: str + :param partition_key: Explicit partition key; if ``None``, derived or generated. + :type partition_key: Optional[str] + :param partition_key_hint: ID string to extract a partition key from. + :type partition_key_hint: str + :return: The generated ID string. + :rtype: str + :raises ValueError: If the watermark contains non-alphanumeric characters. """ entropy = FoundryIdGenerator._secure_entropy(string_length) @@ -96,10 +133,16 @@ def _new_id( @staticmethod def _secure_entropy(string_length: int) -> str: - """ - Generates a secure random alphanumeric string of exactly `string_length`. - Re-tries whole generation until the filtered base64 string is exactly the desired length, - matching the C# behavior. + """Generate a cryptographically secure alphanumeric string. + + Uses :func:`os.urandom` and base64 encoding, filtering to alphanumeric + characters and retrying until the exact length is reached. + + :param string_length: Desired length of the output string. + :type string_length: int + :return: A random alphanumeric string of exactly *string_length* characters. + :rtype: str + :raises ValueError: If *string_length* is less than 1. """ if string_length < 1: raise ValueError("Must be greater than or equal to 1") @@ -120,11 +163,22 @@ def _extract_partition_id( partition_key_length: int = 18, delimiter: str = "_", ) -> str: - """ - Extracts partition key from an existing ID. - - Expected shape (per C# logic): "_" - We take the last `partition_key_length` characters from the *second* segment. + """Extract the partition key from an existing ID. + + Expected shape: ``"_"``. + Returns the first *partition_key_length* characters of the second segment. + + :param id_str: The ID string to extract from. + :type id_str: str + :param string_length: Expected entropy length used for validation. + :type string_length: int + :param partition_key_length: Number of characters to extract as partition key. + :type partition_key_length: int + :param delimiter: The delimiter separating ID segments. + :type delimiter: str + :return: The extracted partition key. + :rtype: str + :raises ValueError: If the ID format is invalid. """ if not id_str: raise ValueError("Id cannot be null or empty") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index 12b647d7adc7..0efcf1c6f20b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import itertools from collections import defaultdict from typing import ( @@ -37,7 +37,7 @@ from .._exceptions import ToolInvocationError -class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 +class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 # azure-sdk: client-paging-methods-use-list """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. This client provides access to tools from both MCP (Model Context Protocol) servers @@ -55,7 +55,7 @@ class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: di :type api_version: str or None """ - def __init__( # pylint: disable=C4718 + def __init__( # pylint: disable=C4718 # azure-sdk: client-method-name-no-double-underscore self, endpoint: str, credential: "AsyncTokenCredential", diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py index 2d50089fef8f..c75532f0d3e4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name from abc import ABC, abstractmethod from typing import Any, Awaitable, Collection, List, Mapping, MutableMapping, Optional, Union diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py index 80b25d78b20e..9604124cde9b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -40,8 +40,9 @@ def install(cls, :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] """ + user_info_var : _UserContextType = user_context or ContextVarUserProvider.default_user_info_context app.add_middleware(UserInfoContextMiddleware, # type: ignore[arg-type] - user_info_var=user_context or ContextVarUserProvider.default_user_info_context, + user_info_var=user_info_var, user_resolver=user_resolver or cls._default_user_resolver) @staticmethod diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 398a8c46fd5d..0b6600de7d6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from __future__ import annotations -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import inspect from types import TracebackType from typing import Any, Type, cast @@ -12,7 +12,7 @@ from azure.core.credentials_async import AsyncTokenCredential -async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 +async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 # azure-sdk: client-method-should-not-use-static-method """Compatibility wrapper for asyncio.to_thread (Python 3.8+). :param func: The function to run in a thread. diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py index 099d8dc45181..2cf533eb33fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py @@ -33,7 +33,7 @@ CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py index af9812826941..3831f702564d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py @@ -29,7 +29,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, MCPListToolsItemResource, MCPListToolsTool, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py index f6d2c08bb0b9..f4298d21d39c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py index c2e3bea53287..f01c4977cfb0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py @@ -10,7 +10,7 @@ set_current_app, ) from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ResponseCreatedEvent, ResponseErrorEvent +from azure.ai.agentserver.core.models._projects import ResponseCreatedEvent, ResponseErrorEvent from azure.ai.agentserver.core.server._response_metadata import ( METADATA_KEY, attach_foundry_metadata_to_response, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py index 771ca0a0eb0c..35639ea8ae2c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py @@ -78,31 +78,31 @@ def test_logs_warning_for_invalid_resource_id(self): @pytest.mark.unit class TestGetApplicationInsightsConnstr: - """Tests for get_application_insights_connstr function.""" + """Tests for _get_application_insights_connstr function.""" def test_returns_connstr_from_env_var(self): """Test that connection string is returned from environment variable.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, {"APPLICATIONINSIGHTS_CONNECTION_STRING": "InstrumentationKey=test123"}, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result == "InstrumentationKey=test123" def test_returns_none_when_no_connstr_and_no_project(self): """Test that None is returned when no connection string and no project endpoint.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, { "APPLICATIONINSIGHTS_CONNECTION_STRING": "", "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result is None or result == "" def test_logs_debug_when_not_configured(self): """Test that debug message is logged when not configured.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr mock_logger = MagicMock() @@ -111,7 +111,7 @@ def test_logs_debug_when_not_configured(self): "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr(logger=mock_logger) + result = _get_application_insights_connstr(logger=mock_logger) # Debug should be called when not configured, or result should be None assert mock_logger.debug.called or result is None or result == "" From f724547333cb2004bc8f0153bdf87e8a374d9550 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:04:14 -0700 Subject: [PATCH 2/8] updated doc rst files --- ...ver.core.checkpoints.client.operations.rst | 7 + ...ai.agentserver.core.checkpoints.client.rst | 15 ++ .../azure.ai.agentserver.core.checkpoints.rst | 15 ++ ...zure.ai.agentserver.core.models.openai.rst | 8 -- ...re.ai.agentserver.core.models.projects.rst | 8 -- .../doc/azure.ai.agentserver.core.models.rst | 9 -- .../doc/azure.ai.agentserver.core.rst | 1 + ...azure.ai.agentserver.core.tools.client.rst | 2 +- .../doc/azure.ai.agentserver.core.tools.rst | 3 +- .../azure-ai-agentserver-core/doc/index.md | 136 ++++++++++++++++++ 10 files changed, 176 insertions(+), 28 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/index.md diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst new file mode 100644 index 000000000000..3076ff010e1b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.checkpoints.client.operations package +=============================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client.operations + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst new file mode 100644 index 000000000000..cd6763335948 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints.client package +==================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client.operations diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst new file mode 100644 index 000000000000..99b9dfa2ef50 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints package +============================================= + +.. automodule:: azure.ai.agentserver.core.checkpoints + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst deleted file mode 100644 index dd1cce6eecca..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.openai package -=============================================== - -.. automodule:: azure.ai.agentserver.core.models.openai - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst deleted file mode 100644 index 38e0be4f331b..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.projects package -================================================= - -.. automodule:: azure.ai.agentserver.core.models.projects - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst index 008b280c64de..120b01cccc5a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst @@ -6,12 +6,3 @@ azure.ai.agentserver.core.models package :members: :undoc-members: :ignore-module-all: - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - azure.ai.agentserver.core.models.openai - azure.ai.agentserver.core.models.projects diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst index b8f1dadf3a73..60005f2b04cc 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst @@ -13,6 +13,7 @@ Subpackages :maxdepth: 4 azure.ai.agentserver.core.application + azure.ai.agentserver.core.checkpoints azure.ai.agentserver.core.models azure.ai.agentserver.core.server azure.ai.agentserver.core.tools diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst index 8182914f69f9..14304731f5e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst @@ -2,6 +2,6 @@ azure.ai.agentserver.core.tools.client package ============================================== .. automodule:: azure.ai.agentserver.core.tools.client - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst index c112ec2beabd..6b798851fed2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst @@ -2,10 +2,9 @@ azure.ai.agentserver.core.tools package ======================================= .. automodule:: azure.ai.agentserver.core.tools - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: - :exclude-members: BaseModel,model_json_schema Subpackages ----------- diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/index.md b/sdk/agentserver/azure-ai-agentserver-core/doc/index.md new file mode 100644 index 000000000000..bfce99e40065 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/index.md @@ -0,0 +1,136 @@ +# Azure AI Agent Server Adapter for Python + + +## Getting started + +```bash +pip install azure-ai-agentserver-core +``` + +## Key concepts + +This is the core package for Azure AI Agent server. It hosts your agent as a container on the cloud. + +You can talk to your agent using azure-ai-project sdk. + + +## Examples + +If your agent is not built using a supported framework such as LangGraph and Agent-framework, you can still make it compatible with Microsoft AI Foundry by manually implementing the predefined interface. + +```python +import datetime + +from azure.ai.agentserver.core import FoundryCBAgent +from azure.ai.agentserver.core.models import ( + CreateResponse, + Response as OpenAIResponse, +) +from azure.ai.agentserver.core.models._projects import ( + ItemContentOutputText, + ResponsesAssistantMessageItemResource, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, +) + + +def stream_events(text: str): + assembled = "" + for i, token in enumerate(text.split(" ")): + piece = token if i == len(text.split(" ")) - 1 else token + " " + assembled += piece + yield ResponseTextDeltaEvent(delta=piece) + # Done with text + yield ResponseTextDoneEvent(text=assembled) + + +async def agent_run(request_body: CreateResponse): + agent = request_body.agent + print(f"agent:{agent}") + + if request_body.stream: + return stream_events("I am mock agent with no intelligence in stream mode.") + + # Build assistant output content + output_content = [ + ItemContentOutputText( + text="I am mock agent with no intelligence.", + annotations=[], + ) + ] + + response = OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id="id", + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + status="completed", + content=output_content, + ) + ], + ) + return response + + +my_agent = FoundryCBAgent() +my_agent.agent_run = agent_run + +if __name__ == "__main__": + my_agent.run() + +``` + +## Troubleshooting + +First run your agent with azure-ai-agentserver-core locally. + +If it works on local by failed on cloud. Check your logs in the application insight connected to your Azure AI Foundry Project. + + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. + + +## Next steps + +Please visit [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-core/samples) folder. There are several cases for you to build your agent with azure-ai-agentserver + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +## Indices and tables + +- {ref}`genindex` +- {ref}`modindex` +- {ref}`search` + +```{toctree} +:caption: Developer Documentation +:glob: true +:maxdepth: 5 + +azure.ai.agentserver.core.rst + +``` + From ce4b89ecabe37d43a71bcd424e2dbf9fa72078aa Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:05:04 -0700 Subject: [PATCH 3/8] updated pyproject --- sdk/agentserver/azure-ai-agentserver-core/README.md | 2 +- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/README.md b/sdk/agentserver/azure-ai-agentserver-core/README.md index ff60cf460196..cc420579e5fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/README.md +++ b/sdk/agentserver/azure-ai-agentserver-core/README.md @@ -26,7 +26,7 @@ from azure.ai.agentserver.core.models import ( CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ResponseTextDeltaEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 3829a7356919..dca59dffef43 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -75,5 +75,5 @@ combine-as-imports = true breaking = false # incompatible python version pyright = false verifytypes = false -latestdependency = false -dependencies = false \ No newline at end of file +# latestdependency = false +# dependencies = false \ No newline at end of file From 9c9ea78d34279d825da4296510a1bcd0d49a5ee1 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 21:30:53 -0700 Subject: [PATCH 4/8] fix -core related changes in af --- .../azure/ai/agentserver/agentframework/_agent_framework.py | 2 +- .../models/agent_framework_output_non_streaming_converter.py | 2 +- .../models/agent_framework_output_streaming_converter.py | 2 +- .../ai/agentserver/agentframework/models/agent_id_generator.py | 2 +- .../persistence/_foundry_checkpoint_repository.py | 2 +- .../agentframework/persistence/_foundry_checkpoint_storage.py | 2 +- .../tests/unit_tests/mocks/mock_checkpoint_client.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 837d1675ba05..92d243d46764 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -18,7 +18,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent +from azure.ai.agentserver.core.models._projects import ResponseErrorEvent, ResponseFailedEvent from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint: disable=import-error from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index aac9b24c445c..86094a617991 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -12,7 +12,7 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 22f144a4b7a4..02d11958cf24 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -16,7 +16,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py index abd2dd2c02ef..ca429683a1be 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py @@ -8,7 +8,7 @@ from typing import Optional from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.models import projects +from azure.ai.agentserver.core.models import _projects as projects def generate_agent_id(context: AgentRunContext) -> Optional[projects.AgentId]: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py index acd89f1baef0..3e84763f4e68 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py @@ -8,7 +8,7 @@ from agent_framework import CheckpointStorage -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointSession, FoundryCheckpointClient, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py index 833c3647149a..63ba16dcd1ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py @@ -9,7 +9,7 @@ from agent_framework import WorkflowCheckpoint -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, FoundryCheckpointClient, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, From d6aded2f3ddb8f8ef02287294e4a8055467f2a3c Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 22:16:45 -0700 Subject: [PATCH 5/8] fix langgraph unit tests --- .../langgraph/checkpointer/_foundry_checkpoint_saver.py | 2 +- .../agentserver/langgraph/models/human_in_the_loop_helper.py | 4 ++-- .../langgraph/models/human_in_the_loop_json_helper.py | 4 ++-- .../models/response_api_non_stream_response_converter.py | 2 +- .../langgraph/models/response_api_request_converter.py | 4 +++- .../models/response_event_generators/item_content_helpers.py | 2 +- .../models/response_event_generators/item_resource_helpers.py | 2 +- .../response_content_part_event_generator.py | 2 +- .../response_event_generators/response_event_generator.py | 2 +- .../response_function_call_argument_event_generator.py | 2 +- .../response_output_item_event_generator.py | 2 +- .../response_output_text_event_generator.py | 2 +- .../response_stream_event_generator.py | 2 +- .../tests/unit_tests/mocks/mock_checkpoint_client.py | 2 +- .../tests/unit_tests/test_langgraph_request_converter.py | 2 +- 15 files changed, 19 insertions(+), 17 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py index 999b87dc8fe8..82215640d60c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py @@ -21,7 +21,7 @@ from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py index 9f3c693800a1..bc8890339dca 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py @@ -11,8 +11,8 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import (ResponseInputItemParam, ResponseInputParam) +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import (ResponseInputItemParam, ResponseInputParam) from .._context import LanggraphRunContext INTERRUPT_NODE_NAME = "__interrupt__" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py index e1396ba90577..e3bf49a5ad4b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -11,8 +11,8 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import ( +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import ( ResponseInputItemParam, ) from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py index 7ec8bdf14f1a..cb3ecefb60f5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py @@ -11,7 +11,7 @@ from langchain_core.messages import AnyMessage from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py index 486545ef078a..f718695dbc1e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py @@ -17,7 +17,9 @@ from langchain_core.messages.tool import ToolCall from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import CreateResponse, openai as openai_models, projects as project_models +from azure.ai.agentserver.core.models import ( + CreateResponse, _openai as openai_models, _projects as project_models +) logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py index ae169d866ee5..807d759cb151 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models class ItemContentHelper: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py index 8502ec13069b..9f5f1ac6d55b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py @@ -6,7 +6,7 @@ from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py index 4823de4411ae..8b989bc20da3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py @@ -7,7 +7,7 @@ from langchain_core import messages as langgraph_messages -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from . import item_content_helpers from .response_event_generator import ResponseEventGenerator, StreamEventState diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py index cd161b99d152..843cf18fe2dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py @@ -7,7 +7,7 @@ from langchain_core.messages import AnyMessage -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py index 56c3bde68632..3a556fb70e7b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py @@ -9,7 +9,7 @@ from langchain_core.messages import AnyMessage from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from . import ResponseEventGenerator, StreamEventState from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index 14eee3c571b2..181952077875 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -9,7 +9,7 @@ from langchain_core.messages import AnyMessage from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py index 8d0e62650a2d..dc64f37733bc 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="return-value,assignment" from typing import List -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .response_event_generator import ( ResponseEventGenerator, StreamEventState, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py index f19629eba94b..896e35829d98 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py @@ -8,7 +8,7 @@ from langchain_core import messages as langgraph_messages -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .response_event_generator import ( ResponseEventGenerator, StreamEventState, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index b1894f7350d5..056780cc9903 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -2,7 +2,7 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core import models -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter From b4ce92df0aa7b88b44ccf736a6d3af0fc70deed9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 14:49:02 -0700 Subject: [PATCH 6/8] update to private python files --- .../models/human_in_the_loop_helper.py | 2 +- .../unit_tests/test_human_in_the_loop_helper.py | 2 +- .../azure/ai/agentserver/core/__init__.py | 4 ++-- .../ai/agentserver/core/server/{base.py => _base.py} | 2 +- .../{agent_run_context.py => _agent_run_context.py} | 4 ++-- .../server/common/{constants.py => _constants.py} | 0 ...ndry_id_generator.py => _foundry_id_generator.py} | 2 +- .../{id_generator.py => _id_generator.py} | 0 ...i.agentserver.core.server.common.id_generator.rst | 12 ++++++------ .../doc/azure.ai.agentserver.core.server.common.rst | 12 ++++++------ .../doc/azure.ai.agentserver.core.server.rst | 6 +++--- .../server/common/test_foundry_id_generator.py | 2 +- .../server/test_conversation_persistence.py | 2 +- .../azure/ai/agentserver/langgraph/langgraph.py | 2 +- .../models/human_in_the_loop_json_helper.py | 8 ++++---- .../response_output_item_event_generator.py | 2 +- .../tests/unit_tests/tools/conftest.py | 2 +- .../tests/unit_tests/tools/test_agent_integration.py | 2 +- 18 files changed, 33 insertions(+), 33 deletions(-) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/{base.py => _base.py} (99%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/{agent_run_context.py => _agent_run_context.py} (95%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/{constants.py => _constants.py} (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/{foundry_id_generator.py => _foundry_id_generator.py} (99%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/{id_generator.py => _id_generator.py} (100%) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py index 89bac4ca76c5..0b054dfe9e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -8,7 +8,7 @@ from agent_framework import Content, Message, WorkflowCheckpoint, WorkflowEvent from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py index 4b59922cce9a..c26e716ed4dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py @@ -2,7 +2,7 @@ import pytest from agent_framework import Content, Message, WorkflowEvent -from azure.ai.agentserver.core.server.common.constants import ( +from azure.ai.agentserver.core.server.common._constants import ( HUMAN_IN_THE_LOOP_FUNCTION_NAME, ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py index 88a13741bbac..39de11cefe55 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py @@ -5,8 +5,8 @@ from ._version import VERSION from .logger import configure as config_logging -from .server.base import FoundryCBAgent -from .server.common.agent_run_context import AgentRunContext +from .server._base import FoundryCBAgent +from .server.common._agent_run_context import AgentRunContext from .server._context import AgentServerContext config_logging() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py index 994d9045dd38..e1ce45188c34 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py @@ -36,7 +36,7 @@ build_foundry_agents_metadata_headers, try_attach_foundry_metadata_to_event, ) -from .common.agent_run_context import AgentRunContext +from .common._agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context from ..models import ( diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py similarity index 95% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py index 174685f652fe..750e4209d9e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py @@ -3,8 +3,8 @@ # --------------------------------------------------------- from typing import Optional -from .id_generator.foundry_id_generator import FoundryIdGenerator -from .id_generator.id_generator import IdGenerator +from .id_generator._foundry_id_generator import FoundryIdGenerator +from .id_generator._id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse from ...models._projects import AgentId, AgentReference, ResponseConversation1 diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py index 4d9cc741ec81..0c0f91cbb36d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py @@ -8,7 +8,7 @@ import re from typing import Optional -from .id_generator import IdGenerator +from ._id_generator import IdGenerator _WATERMARK_RE = re.compile(r"^[A-Za-z0-9]*$") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst index cf935aa1d1ed..68f155131f5c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst @@ -9,18 +9,18 @@ azure.ai.agentserver.core.server.common.id\_generator package Submodules ---------- -azure.ai.agentserver.core.server.common.id\_generator.foundry\_id\_generator module ------------------------------------------------------------------------------------ +azure.ai.agentserver.core.server.common.id\_generator.\_foundry\_id\_generator module +------------------------------------------------------------------------------------ -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.id\_generator.id\_generator module --------------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.id\_generator.\_id\_generator module +--------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._id_generator :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 8fb5b52e4465..fd02e856642c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -17,18 +17,18 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.common.agent\_run\_context module ------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_agent\_run\_context module +------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.agent_run_context +.. automodule:: azure.ai.agentserver.core.server.common._agent_run_context :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.constants module --------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_constants module +---------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.constants +.. automodule:: azure.ai.agentserver.core.server.common._constants :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst index b82fa765b839..8363ec9e32d8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst @@ -17,10 +17,10 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.base module --------------------------------------------- +azure.ai.agentserver.core.server.\_base module +---------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.base +.. automodule:: azure.ai.agentserver.core.server._base :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py index a46f45f7c739..fb6dc8858c86 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator import FoundryIdGenerator +from azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator import FoundryIdGenerator def test_conversation_id_none_uses_response_partition(): diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py index a38871197cba..00137abecf15 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py @@ -42,7 +42,7 @@ async def __anext__(self): def create_mock_agent(): """Create a mock FoundryCBAgent without calling __init__.""" - from azure.ai.agentserver.core.server.base import FoundryCBAgent + from azure.ai.agentserver.core.server._base import FoundryCBAgent # Create instance without calling __init__ agent = object.__new__(FoundryCBAgent) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 37f7080ba81f..14b268056a6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -11,7 +11,7 @@ from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.base import FoundryCBAgent +from azure.ai.agentserver.core.server._base import FoundryCBAgent from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py index e3bf49a5ad4b..eed2be358235 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -15,7 +15,7 @@ from azure.ai.agentserver.core.models._openai import ( ResponseInputItemParam, ) -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME from .human_in_the_loop_helper import HumanInTheLoopHelper @@ -43,7 +43,7 @@ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_model status="in_progress", ) - def interrupt_to_function_call(self, interrupt: Interrupt) : + def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str], Optional[str], Optional[str]]: """ Convert an Interrupt to a function call tuple. @@ -51,7 +51,7 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : :type interrupt: Interrupt :return: A tuple of (name, call_id, argument). - :rtype: tuple[str | None, str | None, str | None] + :rtype: tuple[Optional[str], Optional[str], Optional[str]] """ if isinstance(interrupt.value, str): arguments = interrupt.value @@ -63,7 +63,7 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments - def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Union[Command, None]: + def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Optional[Command]: output_str = input_item.get("output") if not isinstance(output_str, str): logger.error("Invalid output type in function call output: %s", input_item) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index 181952077875..5facf7206bda 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -10,7 +10,7 @@ from langgraph.types import Interrupt from azure.ai.agentserver.core.models import _projects as project_models -from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator +from azure.ai.agentserver.core.server.common.id_generator._id_generator import IdGenerator from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py index 7efc298559c1..f4e962d9d4c5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py @@ -21,7 +21,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py index eea917e54fd4..5f9115db66cf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py @@ -23,7 +23,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools import use_foundry_tools from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext From 57768dccd54518747dc29706ab2b27fea1e5955d Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 16:53:55 -0700 Subject: [PATCH 7/8] update private file path --- .../ai/agentserver/langgraph/__init__.py | 4 +- .../langgraph/{langgraph.py => _langgraph.py} | 6 +-- .../agentserver/langgraph/models/__init__.py | 17 +++++++ ...helper.py => _human_in_the_loop_helper.py} | 0 ...r.py => _human_in_the_loop_json_helper.py} | 2 +- ...onverter.py => _response_api_converter.py} | 0 ....py => _response_api_default_converter.py} | 14 +++--- ...onse_api_non_stream_response_converter.py} | 4 +- ....py => _response_api_request_converter.py} | 0 ...response_api_stream_response_converter.py} | 2 +- .../langgraph/models/{utils.py => _utils.py} | 0 .../response_event_generators/__init__.py | 4 +- ...nt_helpers.py => _item_content_helpers.py} | 0 ...e_helpers.py => _item_resource_helpers.py} | 4 +- ..._response_content_part_event_generator.py} | 6 +-- ...erator.py => _response_event_generator.py} | 0 ...function_call_argument_event_generator.py} | 4 +- ... _response_output_item_event_generator.py} | 8 ++-- ... _response_output_text_event_generator.py} | 2 +- ...py => _response_stream_event_generator.py} | 4 +- ...graph.models.response_event_generators.rst | 48 +++++++++---------- .../azure.ai.agentserver.langgraph.models.rst | 48 +++++++++---------- .../doc/azure.ai.agentserver.langgraph.rst | 6 +-- .../samples/custom_state/main.py | 3 +- .../test_conversation_id_optional.py | 2 +- .../unit_tests/test_historical_items_fetch.py | 12 ++--- .../test_langgraph_request_converter.py | 2 +- 27 files changed, 109 insertions(+), 93 deletions(-) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/{langgraph.py => _langgraph.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{human_in_the_loop_helper.py => _human_in_the_loop_helper.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{human_in_the_loop_json_helper.py => _human_in_the_loop_json_helper.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_converter.py => _response_api_converter.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_default_converter.py => _response_api_default_converter.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_non_stream_response_converter.py => _response_api_non_stream_response_converter.py} (99%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_request_converter.py => _response_api_request_converter.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_stream_response_converter.py => _response_api_stream_response_converter.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{utils.py => _utils.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{item_content_helpers.py => _item_content_helpers.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{item_resource_helpers.py => _item_resource_helpers.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_content_part_event_generator.py => _response_content_part_event_generator.py} (96%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_event_generator.py => _response_event_generator.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_function_call_argument_event_generator.py => _response_function_call_argument_event_generator.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_output_item_event_generator.py => _response_output_item_event_generator.py} (95%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_output_text_event_generator.py => _response_output_text_event_generator.py} (99%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_stream_event_generator.py => _response_stream_event_generator.py} (97%) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 18e4d6bfbdc2..fc9aaf43880c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -10,11 +10,11 @@ from ._context import LanggraphRunContext from ._version import VERSION -from .langgraph import LangGraphAdapter +from ._langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover from langgraph.graph.state import CompiledStateGraph - from .models.response_api_converter import ResponseAPIConverter + from .models._response_api_converter import ResponseAPIConverter from azure.core.credentials_async import AsyncTokenCredential from azure.core.credentials import TokenCredential diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index 14b268056a6a..c05185692972 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -15,9 +15,9 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext -from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter -from .models.response_api_default_converter import ResponseAPIDefaultConverter -from .models.utils import is_state_schema_valid +from .models._response_api_converter import GraphInputArguments, ResponseAPIConverter +from .models._response_api_default_converter import ResponseAPIDefaultConverter +from .models._utils import is_state_schema_valid from .tools._context import FoundryToolContext from .tools._resolver import FoundryLangChainToolResolver diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py index d540fd20468c..c4a276af508a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py @@ -1,3 +1,20 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_default_converter import ResponseAPIDefaultConverter +from ._response_api_request_converter import ( + ResponseAPIMessageRequestConverter, + ResponseAPIRequestConverter, + convert_item_resource_to_message, +) + +__all__ = [ + "ResponseAPIConverter", + "GraphInputArguments", + "ResponseAPIDefaultConverter", + "ResponseAPIRequestConverter", + "ResponseAPIMessageRequestConverter", + "convert_item_resource_to_message", +] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index eed2be358235..ea32d6683232 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -17,7 +17,7 @@ ) from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index b64afc900f9d..448374038f86 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -15,17 +15,17 @@ from azure.ai.agentserver.core.logger import get_logger, get_project_endpoint from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper -from .human_in_the_loop_json_helper import HumanInTheLoopJsonHelper -from .response_api_converter import GraphInputArguments, ResponseAPIConverter -from .response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, - ResponseAPINonStreamResponseConverter) -from .response_api_request_converter import ( +from ._human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_json_helper import HumanInTheLoopJsonHelper +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, + ResponseAPINonStreamResponseConverter) +from ._response_api_request_converter import ( ResponseAPIMessageRequestConverter, ResponseAPIRequestConverter, convert_item_resource_to_message, ) -from .response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter +from ._response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter from .._context import LanggraphRunContext logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index cb3ecefb60f5..5e688e42664c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -12,11 +12,11 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import _projects as project_models -from .human_in_the_loop_helper import ( +from ._human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, ) -from .utils import extract_function_call +from ._utils import extract_function_call from .._context import LanggraphRunContext logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 02f79c589a96..0c6f772dece5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -10,7 +10,7 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper from .response_event_generators import ( ResponseEventGenerator, ResponseStreamEventGenerator, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py index 7b9f0362e4ba..83c9590a3b58 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py @@ -1,8 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_stream_event_generator import ResponseStreamEventGenerator +from ._response_event_generator import ResponseEventGenerator, StreamEventState +from ._response_stream_event_generator import ResponseStreamEventGenerator __all__ = [ "ResponseEventGenerator", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py index 9f5f1ac6d55b..7c97d7adee0a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -8,8 +8,8 @@ from azure.ai.agentserver.core.models import _projects as project_models -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call class ItemResourceHelper: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py similarity index 96% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py index 8b989bc20da3..213a9f78e348 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -9,9 +9,9 @@ from azure.ai.agentserver.core.models import _projects as project_models -from . import item_content_helpers -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_output_text_event_generator import ResponseOutputTextEventGenerator +from . import _item_content_helpers as item_content_helpers +from ._response_event_generator import ResponseEventGenerator, StreamEventState +from ._response_output_text_event_generator import ResponseOutputTextEventGenerator class ResponseContentPartEventGenerator(ResponseEventGenerator): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py index 3a556fb70e7b..0f2e93fe0ef1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -11,8 +11,8 @@ from azure.ai.agentserver.core.models import _projects as project_models from . import ResponseEventGenerator, StreamEventState -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py similarity index 95% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py index 5facf7206bda..4c98b4b1a18d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -11,10 +11,10 @@ from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.core.server.common.id_generator._id_generator import IdGenerator -from . import ResponseEventGenerator, StreamEventState, item_resource_helpers -from .response_content_part_event_generator import ResponseContentPartEventGenerator -from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator -from ..human_in_the_loop_helper import HumanInTheLoopHelper +from . import ResponseEventGenerator, StreamEventState, _item_resource_helpers as item_resource_helpers +from ._response_content_part_event_generator import ResponseContentPartEventGenerator +from ._response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator +from .._human_in_the_loop_helper import HumanInTheLoopHelper from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py index dc64f37733bc..21772abd1ea7 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -6,7 +6,7 @@ from typing import List from azure.ai.agentserver.core.models import _projects as project_models -from .response_event_generator import ( +from ._response_event_generator import ( ResponseEventGenerator, StreamEventState, ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index 896e35829d98..bb361639601d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -9,11 +9,11 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core.models import _projects as project_models -from .response_event_generator import ( +from ._response_event_generator import ( ResponseEventGenerator, StreamEventState, ) -from .response_output_item_event_generator import ResponseOutputItemEventGenerator +from ._response_output_item_event_generator import ResponseOutputItemEventGenerator from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst index af7cc69bd859..bf53fee7e8fa 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst @@ -9,66 +9,66 @@ azure.ai.agentserver.langgraph.models.response\_event\_generators package Submodules ---------- -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_content\_helpers module ------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._item\_content\_helpers module +------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_content_helpers +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._item_content_helpers :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_resource\_helpers module ------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._item\_resource\_helpers module +-------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_resource_helpers +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._item_resource_helpers :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_content\_part\_event\_generator module ------------------------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_content\_part\_event\_generator module +-------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_content_part_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_content_part_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_event\_generator module ---------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_event\_generator module +----------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_function\_call\_argument\_event\_generator module ------------------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_function\_call\_argument\_event\_generator module +------------------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_function_call_argument_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_function_call_argument_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_item\_event\_generator module ------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_output\_item\_event\_generator module +------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_item_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_output_item_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_text\_event\_generator module ------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_output\_text\_event\_generator module +------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_text_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_output_text_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_stream\_event\_generator module ------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_stream\_event\_generator module +------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_stream_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_stream_event_generator :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst index aba857c3b64a..e5d72b41cd42 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst @@ -17,66 +17,66 @@ Subpackages Submodules ---------- -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_helper module -------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._human\_in\_the\_loop\_helper module +------------------------------------------------------------------------------ -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_helper +.. automodule:: azure.ai.agentserver.langgraph.models._human_in_the_loop_helper :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_json\_helper module -------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._human\_in\_the\_loop\_json\_helper module +--------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_json_helper +.. automodule:: azure.ai.agentserver.langgraph.models._human_in_the_loop_json_helper :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_converter module ---------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_converter module +----------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_default\_converter module ------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_default\_converter module +-------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_default_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_default_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_non\_stream\_response\_converter module --------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_non\_stream\_response\_converter module +---------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_non_stream_response_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_non_stream_response_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_request\_converter module ------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_request\_converter module +-------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_request_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_request_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_stream\_response\_converter module ---------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_stream\_response\_converter module +----------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_stream_response_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_stream_response_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.utils module --------------------------------------------------- +azure.ai.agentserver.langgraph.models._utils module +--------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.utils +.. automodule:: azure.ai.agentserver.langgraph.models._utils :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst index deefeb67fa96..1ca9d33660a9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst @@ -18,10 +18,10 @@ Subpackages Submodules ---------- -azure.ai.agentserver.langgraph.langgraph module ------------------------------------------------ +azure.ai.agentserver.langgraph._langgraph module +------------------------------------------------ -.. automodule:: azure.ai.agentserver.langgraph.langgraph +.. automodule:: azure.ai.agentserver.langgraph._langgraph :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index ec45dceccfc8..5a5b22993a19 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -12,8 +12,7 @@ from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from azure.ai.agentserver.langgraph import LanggraphRunContext, from_langgraph -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIRequestConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter, ResponseAPIRequestConverter load_dotenv() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py index 6f8ff173e6c3..727b501b9b3d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py @@ -5,7 +5,7 @@ import pytest -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter class DummyGraphState: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py index bc7cfbee93e4..2b28ae158c4f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py @@ -4,7 +4,7 @@ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage -from azure.ai.agentserver.langgraph.models.response_api_request_converter import convert_item_resource_to_message +from azure.ai.agentserver.langgraph.models import convert_item_resource_to_message @pytest.mark.unit @@ -113,7 +113,7 @@ def test_convert_message_with_empty_content_list(self): def _create_converter(): """Helper to create a ResponseAPIDefaultConverter with mocked graph.""" - from azure.ai.agentserver.langgraph.models.response_api_default_converter import ( + from azure.ai.agentserver.langgraph.models import ( ResponseAPIDefaultConverter, ) @@ -122,7 +122,7 @@ def _create_converter(): mock_graph.checkpointer = None with patch( - "azure.ai.agentserver.langgraph.models.utils.is_state_schema_valid", + "azure.ai.agentserver.langgraph.models._utils.is_state_schema_valid", return_value=True, ): return ResponseAPIDefaultConverter(graph=mock_graph) @@ -381,7 +381,7 @@ async def test_fetch_returns_empty_when_no_endpoint(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value=None, ): result = await converter._fetch_historical_items("conv_123") @@ -393,7 +393,7 @@ async def test_fetch_returns_empty_on_import_error(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch.dict("sys.modules", {"openai": None}): @@ -421,7 +421,7 @@ async def mock_list(*args, **kwargs): mock_client.conversations.items.list = mock_list with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index 056780cc9903..a68e5be13f13 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import models from azure.ai.agentserver.core.models import _projects as project_models -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIMessageRequestConverter @pytest.mark.unit From b0ea657a972060bf375b840875d36c80123cb6a5 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 20:02:26 -0700 Subject: [PATCH 8/8] fix pylint --- .../ai/agentserver/langgraph/_langgraph.py | 29 ++++----- .../models/_human_in_the_loop_json_helper.py | 6 +- .../models/_response_api_converter.py | 1 - .../models/_response_api_default_converter.py | 64 ++++++++++++------- ...ponse_api_non_stream_response_converter.py | 21 +++--- .../models/_response_api_request_converter.py | 32 +++++----- ..._response_api_stream_response_converter.py | 29 +++++---- .../_item_resource_helpers.py | 19 +++--- .../_response_content_part_event_generator.py | 26 ++++---- .../_response_event_generator.py | 42 +++++++----- ..._function_call_argument_event_generator.py | 23 +++---- .../_response_output_item_event_generator.py | 18 +++--- .../_response_output_text_event_generator.py | 22 +++---- .../_response_stream_event_generator.py | 22 ++++--- 14 files changed, 193 insertions(+), 161 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index c05185692972..6e57f3037cdf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,no-member -# mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, List, Optional, cast from langgraph.graph.state import CompiledStateGraph @@ -49,7 +47,7 @@ def __init__( :param converter: custom response converter. :type converter: Optional[ResponseAPIConverter] """ - super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg + super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg self._graph = graph self._tool_resolver = FoundryLangChainToolResolver() self.azure_ai_tracer = None @@ -103,8 +101,8 @@ def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=No name=self.get_agent_identifier(), ) logger.info("AzureAIOpenTelemetryTracer initialized successfully.") - except Exception as e: - logger.error(f"Failed to import AzureAIOpenTelemetryTracer, ignore: {e}") + except Exception as error: # pylint: disable=broad-except + logger.error("Failed to initialize AzureAIOpenTelemetryTracer, ignore: %s", error) def setup_otlp_exporter(self, endpoint, provider): endpoint = self.format_otlp_endpoint(endpoint) @@ -129,8 +127,8 @@ async def agent_run_non_stream(self, input_arguments: GraphInputArguments): result = await self._graph.ainvoke(**input_arguments) output = await self.converter.convert_response_non_stream(result, input_arguments["context"]) return output - except Exception as e: - logger.error(f"Error during agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during agent run: %s", e, exc_info=True) raise e async def agent_run_astream(self, @@ -145,14 +143,15 @@ async def agent_run_astream(self, :rtype: AsyncGenerator[dict] """ try: - logger.info(f"Starting streaming agent run {input_arguments['context'].agent_run.response_id}") + logger.info("Starting streaming agent run %s", input_arguments["context"].agent_run.response_id) stream = self._graph.astream(**input_arguments) async for output_event in self.converter.convert_response_stream( - stream, - input_arguments["context"]): + stream, + input_arguments["context"], + ): yield output_event - except Exception as e: - logger.error(f"Error during streaming agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during streaming agent run: %s", e, exc_info=True) raise e def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): @@ -171,11 +170,11 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: configurable["thread_id"] = thread_id else: configurable["thread_id"] = f"langgraph-{input_arguments['context'].agent_run.response_id}" - logger.debug(f"Conversation ID not provided, generate one: thread_id={configurable['thread_id']}") + logger.debug("Conversation ID not provided, generate one: thread_id=%s", configurable["thread_id"]) config["configurable"] = configurable context.attach_to_config(config) - callbacks = config.get("callbacks", []) # mypy: ignore-errors + callbacks = cast(List[object], config.get("callbacks") or []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: callbacks.append(self.azure_ai_tracer) config["callbacks"] = callbacks diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index ea32d6683232..a4e868bee152 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- import json -from typing import Optional, Union +from typing import Optional from langgraph.types import ( Command, @@ -58,8 +58,8 @@ def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str else: try: arguments = json.dumps(interrupt.value) - except Exception as e: # pragma: no cover - fallback # pylint: disable=broad-exception-caught - logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, e) + except (TypeError, ValueError) as error: # pragma: no cover - fallback + logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, error) arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py index 32cbf93a4bfb..caf3c95d5994 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# mypy: disable-error-code="call-overload,override" """Base interface for converting between LangGraph internal state and OpenAI-style responses. A ResponseAPIConverter implementation bridges: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index 448374038f86..d18cde162fd8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation from __future__ import annotations import time @@ -180,7 +179,7 @@ async def _convert_request_input_with_history( prev_state, context.agent_run.request.get("input") ) if command is not None: - logger.info(f"HITL command detected for conversation {conversation_id}") + logger.info("HITL command detected for conversation %s", conversation_id) return command # Convert current request input @@ -190,7 +189,7 @@ async def _convert_request_input_with_history( # Check if checkpoint exists has_checkpoint = prev_state is not None and prev_state.values is not None and len(prev_state.values) > 0 if has_checkpoint: - logger.info(f"Checkpoint found for conversation {conversation_id}, using existing state") + logger.info("Checkpoint found for conversation %s, using existing state", conversation_id) return current_input # No checkpoint - try to fetch historical items from AIProjectClient @@ -198,11 +197,11 @@ async def _convert_request_input_with_history( logger.debug("No conversation_id provided, skipping historical items fetch") return current_input - logger.info(f"No checkpoint found for conversation {conversation_id}, fetching historical items") + logger.info("No checkpoint found for conversation %s, fetching historical items", conversation_id) historical_messages = await self._fetch_historical_items(conversation_id) if not historical_messages: - logger.info(f"No historical items found for conversation {conversation_id}") + logger.info("No historical items found for conversation %s", conversation_id) return current_input # Merge historical messages with current input, avoiding duplicates @@ -238,7 +237,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage from openai import AsyncOpenAI from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider - logger.debug(f"Creating AsyncOpenAI client for endpoint: {endpoint}/openai") + logger.debug("Creating AsyncOpenAI client for endpoint: %s/openai", endpoint) credential = DefaultAzureCredential() token_provider = get_bearer_token_provider(credential, "https://ai.azure.com/.default") @@ -252,7 +251,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage items.append(item) items.reverse() - logger.info(f"Fetched {len(items)} historical items from conversation {conversation_id}") + logger.info("Fetched %s historical items from conversation %s", len(items), conversation_id) # Convert items to LangGraph messages messages = [] @@ -267,11 +266,20 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage return messages - except ImportError as e: - logger.warning(f"OpenAI or Azure Identity not available, cannot fetch historical items: {e}", exc_info=True) + except ImportError as error: + logger.warning( + "OpenAI or Azure Identity not available, cannot fetch historical items: %s", + error, + exc_info=True, + ) return [] - except Exception as e: # pylint: disable=broad-except - logger.warning(f"Failed to fetch historical items for conversation {conversation_id}: {e}", exc_info=True) + except Exception as error: # pylint: disable=broad-except + logger.warning( + "Failed to fetch historical items for conversation %s: %s", + conversation_id, + error, + exc_info=True, + ) return [] def _merge_messages_without_duplicates( @@ -298,8 +306,10 @@ def _merge_messages_without_duplicates( if not current_messages or not historical_messages: merged = list(historical_messages) + list(current_messages) logger.info( - f"Merged {len(historical_messages)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(historical_messages), + len(current_messages), + conversation_id, ) return merged @@ -322,31 +332,37 @@ def _merge_messages_without_duplicates( curr_content = self._normalize_content(curr_msg.content if hasattr(curr_msg, 'content') else "") logger.debug( - f"Comparing message {i}: historical({hist_type}, '{hist_content}') " - f"vs current({curr_type}, '{curr_content}')" + "Comparing message %s: historical(%s, '%s') vs current(%s, '%s')", + i, + hist_type, + hist_content, + curr_type, + curr_content, ) # Compare type and content if hist_type != curr_type: - logger.debug(f"Message {i} type mismatch: {hist_type} != {curr_type}") + logger.debug("Message %s type mismatch: %s != %s", i, hist_type, curr_type) all_match = False break if hist_content != curr_content: - logger.debug(f"Message {i} content mismatch") + logger.debug("Message %s content mismatch", i) all_match = False break if all_match: # Remove the last N historical messages (they're duplicates) filtered_historical = filtered_historical[:-n] - logger.info(f"Filtered {n} duplicate items from end of historical items") + logger.info("Filtered %s duplicate items from end of historical items", n) # Prepend historical messages to current messages merged = filtered_historical + list(current_messages) logger.info( - f"Merged {len(filtered_historical)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(filtered_historical), + len(current_messages), + conversation_id, ) return merged @@ -360,12 +376,12 @@ async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnaps configurable={"thread_id": thread_id}, ) if self._graph.checkpointer: - logger.debug(f"Checking for existing checkpoint for conversation {thread_id}") + logger.debug("Checking for existing checkpoint for conversation %s", thread_id) state = await self._graph.aget_state(config=config) if state and state.values: - logger.debug(f"Checkpoint state retrieved for conversation {thread_id}") + logger.debug("Checkpoint state retrieved for conversation %s", thread_id) else: - logger.debug(f"No checkpoint state found for conversation {thread_id}") + logger.debug("No checkpoint state found for conversation %s", thread_id) return state logger.debug("No checkpointer configured for graph, skipping checkpoint lookup") return None @@ -420,7 +436,7 @@ def _filter_incomplete_tool_calls(self, messages: List[AnyMessage]) -> List[AnyM result.append(msg) if removed_count > 0: - logger.info(f"Filtered {removed_count} messages with incomplete tool call sequences") + logger.info("Filtered %s messages with incomplete tool call sequences", removed_count) return result diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index 5e688e42664c..9a050a57a9f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,logging-not-lazy -# mypy: disable-error-code="valid-type,call-overload,attr-defined" import copy from abc import ABC, abstractmethod -from typing import Any, Collection, Iterable, List, Union +from typing import Any, Collection, Iterable, List, Optional, Union from langchain_core import messages from langchain_core.messages import AnyMessage @@ -54,7 +52,7 @@ def __init__(self, def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.ItemResource]: res: list[project_models.ItemResource] = [] if not isinstance(output, list): - logger.error(f"Expected output to be a list, got {type(output)}: {output}") + logger.error("Expected output to be a list, got %s: %s", type(output), output) raise ValueError(f"Invalid output format. Expected a list, got {type(output)}.") for step in output: for node_name, node_output in step.items(): @@ -70,7 +68,7 @@ def _convert_node_output( else: message_arr = node_output.get("messages") if not message_arr or not isinstance(message_arr, Collection): - logger.warning(f"No messages found in node {node_name} output: {node_output}") + logger.warning("No messages found in node %s output: %s", node_name, node_output) return for message in message_arr: @@ -78,10 +76,10 @@ def _convert_node_output( converted = self.convert_output_message(message) if converted: yield converted - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + except (AttributeError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", message, error) - def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements + def convert_output_message(self, output_message: AnyMessage) -> Optional[project_models.ItemResource]: # Implement the conversion logic for inner inputs if isinstance(output_message, messages.HumanMessage): return project_models.ResponsesUserMessageItemResource( @@ -104,8 +102,8 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable # If there are tool calls, we assume there is only ONE function call if len(output_message.tool_calls) > 1: logger.warning( - f"There are {len(output_message.tool_calls)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(output_message.tool_calls), ) tool_call = output_message.tool_calls[0] name, call_id, argument = extract_function_call(tool_call) @@ -129,7 +127,8 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable output=output_message.content, id=self.context.agent_run.id_generator.generate_function_output_id(), ) - logger.warning(f"Unsupported message type: {type(output_message)}, {output_message}") + logger.warning("Unsupported message type: %s, %s", type(output_message), output_message) + return None def convert_MessageContent( self, content, role: project_models.ResponsesMessageRole diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py index f718695dbc1e..1cf94210f543 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation -# mypy: ignore-errors from abc import ABC, abstractmethod import json -from typing import Dict, List +from typing import Dict, List, Optional from langchain_core.messages import ( AIMessage, @@ -40,7 +38,7 @@ } -def convert_item_resource_to_message(item: Dict) -> AnyMessage: +def convert_item_resource_to_message(item: Dict) -> Optional[AnyMessage]: """ Convert an ItemResource (from AIProjectClient conversation items) to a LangGraph message. @@ -69,13 +67,11 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: # Fallback: try to get any text field text_content = content[0].get("text", "") content = text_content - elif isinstance(content, str): - pass # content is already a string - else: + elif not isinstance(content, str): content = str(content) if content else "" if role not in role_mapping: - logger.warning(f"Unknown role '{role}' in item resource, defaulting to USER") + logger.warning("Unknown role '%s' in item resource, defaulting to USER", role) role = project_models.ResponsesMessageRole.USER return role_mapping[role](content=content) @@ -102,8 +98,8 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: output = " ".join(text_parts) return ToolMessage(content=output, tool_call_id=call_id) - logger.warning(f"Unsupported item type '{item_type}' in item resource, skipping") - return None # type: ignore + logger.warning("Unsupported item type '%s' in item resource, skipping", item_type) + return None class ResponseAPIRequestConverter(ABC): @@ -189,17 +185,19 @@ def convert_function_call(self, item: dict) -> AnyMessage: item = openai_models.ResponseFunctionToolCallParam(**item) argument = item.get("arguments", None) args = json.loads(argument) if argument else {} - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON in function call arguments: {item}") from e - except Exception as e: - raise ValueError(f"Invalid function call item: {item}") from e + except json.JSONDecodeError as error: + raise ValueError(f"Invalid JSON in function call arguments: {item}") from error + except (TypeError, ValueError) as error: + raise ValueError(f"Invalid function call item: {item}") from error return AIMessage(tool_calls=[ToolCall(id=item.get("call_id"), name=item.get("name"), args=args)], content="") def convert_function_call_output(self, item: dict) -> ToolMessage: try: - item = openai_models.response_input_item_param.FunctionCallOutput(**item) # pylint: disable=no-member - except Exception as e: - raise ValueError(f"Invalid function call output item: {item}") from e + item_namespace = getattr(openai_models, "response_input_item_param") + function_call_output = getattr(item_namespace, "FunctionCallOutput") + item = function_call_output(**item) + except (AttributeError, TypeError, ValueError) as error: + raise ValueError(f"Invalid function call output item: {item}") from error output = item.get("output", None) if isinstance(output, str): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 0c6f772dece5..71b4c7fbceaa 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -1,10 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,C4751 -# mypy: disable-error-code="assignment,valid-type" +# pylint: disable=C4751 from abc import ABC, abstractmethod -from typing import Any, List, Union +from typing import Any, List, Optional, Union from langchain_core.messages import AnyMessage @@ -57,20 +56,20 @@ def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopH self.hitl_helper = hitl_helper self.stream_state = StreamEventState() - self.current_generator: ResponseEventGenerator = None + self.current_generator: Optional[ResponseEventGenerator] = None def convert(self, event: Union[AnyMessage, dict, Any, None]): try: if self.current_generator is None: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) - if event is None or not hasattr(event, '__getitem__'): + if event is None or not hasattr(event, "__getitem__"): raise ValueError(f"Event is not indexable: {event}") message = event[0] # expect a tuple converted = self.try_process_message(message, self.context) return converted - except Exception as e: - logger.error(f"Error converting message {event}: {e}") - raise ValueError(f"Error converting message {event}") from e + except (IndexError, KeyError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", event, error) + raise ValueError(f"Error converting message {event}") from error def finalize(self, graph_state=None): logger.info("Stream ended, finalizing response.") @@ -91,6 +90,9 @@ def try_process_message( if event and not self.current_generator: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) + if self.current_generator is None: + return [] + is_processed = False next_processor = self.current_generator returned_events = [] @@ -101,14 +103,17 @@ def try_process_message( returned_events.extend(processed_events) if not is_processed and next_processor == self.current_generator: logger.warning( - f"Message can not be processed by current generator {type(self.current_generator).__name__}:" - + f" {type(event)}: {event}" + "Message can not be processed by current generator %s: %s: %s", + type(self.current_generator).__name__, + type(event), + event, ) break if next_processor != self.current_generator: logger.info( - f"Switching processor from {type(self.current_generator).__name__} " - + f"to {type(next_processor).__name__}" + "Switching processor from %s to %s", + type(self.current_generator).__name__, + type(next_processor).__name__ if next_processor is not None else "NoneType", ) self.current_generator = next_processor return returned_events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py index 7c97d7adee0a..8e95d241e23c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# mypy: disable-error-code="assignment" +from abc import ABC, abstractmethod from typing import Optional from langgraph.types import Interrupt @@ -12,19 +12,22 @@ from .._utils import extract_function_call -class ItemResourceHelper: +class ItemResourceHelper(ABC): def __init__(self, item_type: str, item_id: Optional[str] = None): self.item_type = item_type self.item_id = item_id + @abstractmethod def create_item_resource(self, is_done: bool): - pass + raise NotImplementedError + @abstractmethod def add_aggregate_content(self, item): - pass + raise NotImplementedError + @abstractmethod def get_aggregated_content(self): - pass + raise NotImplementedError class FunctionCallItemResourceHelper(ItemResourceHelper): @@ -77,12 +80,12 @@ def create_item_resource(self, is_done: bool): return None item_resource = self.hitl_helper.convert_interrupt(self.interrupt) if item_resource is not None and not is_done: - if hasattr(item_resource, 'arguments'): - item_resource.arguments = "" # type: ignore[union-attr] + if getattr(item_resource, "arguments", None) is not None: + item_resource.arguments = "" return item_resource def add_aggregate_content(self, item): - pass + return None def get_aggregated_content(self): return self.create_item_resource(is_done=True) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py index 213a9f78e348..432fac54a4a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,consider-using-in,consider-merging-isinstance -# mypy: ignore-errors from typing import List from langchain_core import messages as langgraph_messages @@ -41,7 +39,7 @@ def try_process_message( if not self.item_content_helper: if not self.try_create_item_content_helper(message): # cannot create item content, skip this message - self.logger.warning(f"Cannot create item content helper for message: {message}") + self.logger.warning("Cannot create item content helper for message: %s", message) return True, self, [] if self.item_content_helper and not self.started: self.started, start_events = self.on_start(message, context, stream_state) @@ -63,8 +61,8 @@ def try_process_message( return is_processed, next_processor, events - def on_start( # mypy: ignore[override] - self, event, run_details, stream_state: StreamEventState + def on_start( + self, _event, _run_details, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return False, [] @@ -82,8 +80,8 @@ def on_start( # mypy: ignore[override] return True, [start_event] def on_end( - self, message, context, stream_state: StreamEventState - ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] + self, _message, _context, stream_state: StreamEventState + ) -> List[project_models.ResponseStreamEvent]: aggregated_content = self.item_content_helper.create_item_content() done_event = project_models.ResponseContentPartDoneEvent( item_id=self.item_id, @@ -98,13 +96,11 @@ def on_end( return [done_event] def try_create_item_content_helper(self, message): - if isinstance(message, langgraph_messages.AIMessage) or isinstance(message, langgraph_messages.ToolMessage): + if isinstance(message, (langgraph_messages.AIMessage, langgraph_messages.ToolMessage)): if self.is_text_content(message.content): self.item_content_helper = item_content_helpers.OutputTextItemContentHelper() return True - if isinstance(message, langgraph_messages.HumanMessage) or isinstance( - message, langgraph_messages.SystemMessage - ): + if isinstance(message, (langgraph_messages.HumanMessage, langgraph_messages.SystemMessage)): if self.is_text_content(message.content): self.item_content_helper = item_content_helpers.InputTextItemContentHelper() return True @@ -120,10 +116,10 @@ def is_text_content(self, content): return True return False - def create_child_processor(self, message) -> ResponseEventGenerator: - if ( - self.item_content_helper.content_type == project_models.ItemContentType.INPUT_TEXT - or self.item_content_helper.content_type == project_models.ItemContentType.OUTPUT_TEXT + def create_child_processor(self, _message) -> ResponseEventGenerator: + if self.item_content_helper.content_type in ( + project_models.ItemContentType.INPUT_TEXT, + project_models.ItemContentType.OUTPUT_TEXT, ): return ResponseOutputTextEventGenerator( logger=self.logger, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py index 843cf18fe2dc..649664f357bd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py @@ -1,8 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,unnecessary-pass -# mypy: disable-error-code="valid-type" +from abc import ABC, abstractmethod from typing import List from langchain_core.messages import AnyMessage @@ -20,7 +19,7 @@ class StreamEventState: sequence_number: int = 0 -class ResponseEventGenerator: +class ResponseEventGenerator(ABC): """ :meta private: Abstract base class for response event generators. @@ -32,12 +31,13 @@ def __init__(self, logger, parent): self.logger = logger self.parent = parent # parent generator + @abstractmethod def try_process_message( self, - message: AnyMessage, # mypy: ignore[valid-type] + message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState, - ): # mypy: ignore[empty-body] + ) -> tuple[bool, "ResponseEventGenerator | None", List[project_models.ResponseStreamEvent]]: """ Try to process the incoming message. @@ -51,30 +51,42 @@ def try_process_message( :return: tuple of (is_processed, next_processor, events) :rtype: tuple[bool, ResponseEventGenerator, List[ResponseStreamEvent]] """ - pass + raise NotImplementedError - def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + def on_start( + self, + _message: AnyMessage, + _context: LanggraphRunContext, + _stream_state: StreamEventState, + ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the starting events for this layer. + :param _message: The incoming message to process. + :type _message: AnyMessage + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState + :return: tuple of (started, events) :rtype: tuple[bool, List[ResponseStreamEvent]] """ return False, [] def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState + self, _message: AnyMessage, _context: LanggraphRunContext, _stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the ending events for this layer. TODO: handle different end conditions, e.g. normal end, error end, etc. - :param message: The incoming message to process. - :type message: AnyMessage - :param context: The agent run context. - :type context: LanggraphRunContext - :param stream_state: The current stream event state. - :type stream_state: StreamEventState + :param _message: The incoming message to process. + :type _message: AnyMessage + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState :return: tuple of (started, events) :rtype: tuple[bool, List[ResponseStreamEvent]] @@ -89,4 +101,4 @@ def aggregate_content(self): :return: content from child processor :rtype: str | dict """ - pass + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py index 0f2e93fe0ef1..685074e668c4 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,name-too-long -# mypy: ignore-errors from typing import List, Union from langchain_core import messages as langgraph_messages @@ -16,7 +14,7 @@ from ..._context import LanggraphRunContext -class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): +class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): # pylint: disable=C4751 def __init__( self, logger, @@ -45,7 +43,7 @@ def try_process_message( is_processed, next_processor, processed_events = self.process(message, context, stream_state) if not is_processed: - self.logger.warning(f"FunctionCallArgumentEventGenerator did not process message: {message}") + self.logger.warning("FunctionCallArgumentEventGenerator did not process message: %s", message) events.extend(processed_events) if self.should_end(message): @@ -58,7 +56,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: AnyMessage, run_details, stream_state: StreamEventState + self, _event: AnyMessage, _run_details, _stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -66,7 +64,10 @@ def on_start( return True, [] def process( - self, message: Union[langgraph_messages.AnyMessage, Interrupt], run_details, stream_state: StreamEventState + self, + message: Union[langgraph_messages.AnyMessage, Interrupt], + _run_details, + stream_state: StreamEventState, ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: if self.should_end(message): return False, self, [] @@ -115,7 +116,7 @@ def should_end(self, event: AnyMessage) -> bool: return False def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState + self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( item_id=self.item_id, @@ -132,16 +133,16 @@ def get_tool_call_info(self, message: Union[langgraph_messages.AnyMessage, Inter if message.tool_call_chunks: if len(message.tool_call_chunks) > 1: self.logger.warning( - f"There are {len(message.tool_call_chunks)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_call_chunks), ) return message.tool_call_chunks[0] elif isinstance(message, langgraph_messages.AIMessage): if message.tool_calls: if len(message.tool_calls) > 1: self.logger.warning( - f"There are {len(message.tool_calls)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_calls), ) return message.tool_calls[0] return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py index 4c98b4b1a18d..ddc61c6e43e8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors from typing import List, Union from langchain_core import messages as langgraph_messages @@ -37,14 +35,14 @@ def try_process_message( if self.item_resource_helper is None: if not self.try_create_item_resource_helper(message, context.agent_run.id_generator): # cannot create item resource, skip this message - self.logger.warning(f"Cannot create item resource helper for message: {message}, skipping.") + self.logger.warning("Cannot create item resource helper for message: %s, skipping.", message) return True, self, [] if self.item_resource_helper and not self.started: self.started, start_events = self.on_start(message, context, stream_state) if not self.started: # could not start processing, skip this message - self.logger.warning(f"Cannot create start events for message: {message}, skipping.") + self.logger.warning("Cannot create start events for message: %s, skipping.", message) return True, self, [] events.extend(start_events) @@ -58,7 +56,7 @@ def try_process_message( child_processor = self.create_child_processor(message) if child_processor: - self.logger.info(f"Created child processor: {child_processor}") + self.logger.info("Created child processor: %s", child_processor) return False, child_processor, events if message: @@ -69,7 +67,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState + self, _event: Union[AnyMessage, Interrupt], _context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -77,7 +75,7 @@ def on_start( item_resource = self.item_resource_helper.create_item_resource(is_done=False) if item_resource is None: # cannot know what item resource to create - return False, None + return False, [] item_added_event = project_models.ResponseOutputItemAddedEvent( output_index=self.output_index, sequence_number=stream_state.sequence_number, @@ -96,8 +94,8 @@ def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: return False def on_end( - self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument + ) -> List[project_models.ResponseStreamEvent]: if not self.started: # should not happen return [] @@ -112,7 +110,7 @@ def on_end( self.parent.aggregate_content(item_resource) # pass aggregated content to parent return [done_event] - def aggregate_content(self, content): + def aggregate_content(self, content) -> None: # aggregate content from child processor self.item_resource_helper.add_aggregate_content(content) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py index 21772abd1ea7..61f5652a6d46 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -1,10 +1,10 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: disable-error-code="return-value,assignment" from typing import List +from langchain_core.messages import AnyMessage + from azure.ai.agentserver.core.models import _projects as project_models from ._response_event_generator import ( ResponseEventGenerator, @@ -31,7 +31,7 @@ def __init__( self.aggregated_content = "" def try_process_message( - self, message, context, stream_state: StreamEventState + self, message: AnyMessage, _context, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False events = [] @@ -40,27 +40,27 @@ def try_process_message( self.started = True if message: - is_processed, next_processor, processed_events = self.process(message, context, stream_state) + is_processed, next_processor, processed_events = self.process(message, stream_state) if not is_processed: - self.logger.warning(f"OutputTextEventGenerator did not process message: {message}") + self.logger.warning("OutputTextEventGenerator did not process message: %s", message) events.extend(processed_events) if self.should_end(message): - is_processed, complete_events = self.on_end(message, context, stream_state) + is_processed, complete_events = self.on_end(message, _context, stream_state) events.extend(complete_events) next_processor = self.parent return is_processed, next_processor, events def process( - self, message, run_details, stream_state: StreamEventState + self, message: AnyMessage, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: if message and message.content: content = [message.content] if isinstance(message.content, str) else message.content res = [] for item in content: if not isinstance(item, str): - self.logger.warning(f"Skipping non-string content item: {item}") + self.logger.warning("Skipping non-string content item: %s", item) continue # create an event for each content item chunk_event = project_models.ResponseTextDeltaEvent( @@ -73,7 +73,7 @@ def process( self.aggregated_content += item stream_state.sequence_number += 1 res.append(chunk_event) - return True, self, res # mypy: ignore[return-value] + return True, self, res return False, self, [] def has_finish_reason(self, message) -> bool: @@ -91,8 +91,8 @@ def should_end(self, message) -> bool: return True return False - def on_end( # mypy: ignore[override] - self, message, context: LanggraphRunContext, stream_state: StreamEventState + def on_end( + self, message, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: return False, [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index bb361639601d..c186dbd329e6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -1,10 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors import time -from typing import List +from typing import List, Optional, Union from langchain_core import messages as langgraph_messages @@ -76,8 +74,11 @@ def should_complete(self, event: langgraph_messages.AnyMessage) -> bool: return False def try_process_message( - self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + self, + message: Optional[langgraph_messages.AnyMessage], + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> tuple[bool, Optional[ResponseEventGenerator], List[project_models.ResponseStreamEvent]]: is_processed = False next_processor = self events = [] @@ -108,8 +109,12 @@ def should_end(self, event: langgraph_messages.AnyMessage) -> bool: return True return False - def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, - stream_state: StreamEventState): + def on_end( + self, + _message: Optional[langgraph_messages.AnyMessage], + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> List[project_models.ResponseStreamEvent]: agent_id = context.agent_run.get_agent_id_object() conversation = context.agent_run.get_conversation_object() response_dict = { @@ -130,11 +135,12 @@ def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunCo self.parent.aggregate_content(self.aggregated_contents) return [done_event] - def aggregate_content(self, content): + def aggregate_content(self, content: Union[List[project_models.ItemResource], project_models.ItemResource]) -> None: # aggregate content from children if isinstance(content, list): for c in content: self.aggregate_content(c) + return if isinstance(content, project_models.ItemResource): self.aggregated_contents.append(content) else: