diff --git a/js/testapps/mcp/.gemini/GENKIT.md b/js/testapps/mcp/.gemini/GENKIT.md
new file mode 120000
index 0000000000..9a985a89c7
--- /dev/null
+++ b/js/testapps/mcp/.gemini/GENKIT.md
@@ -0,0 +1 @@
+../GENKIT.md
\ No newline at end of file
diff --git a/js/testapps/mcp/.gemini/settings.json b/js/testapps/mcp/.gemini/settings.json
new file mode 100644
index 0000000000..f265afa2b3
--- /dev/null
+++ b/js/testapps/mcp/.gemini/settings.json
@@ -0,0 +1,19 @@
+{
+ "mcpServers": {
+ "genkit": {
+ "command": "genkit",
+ "args": [
+ "mcp",
+ "--no-update-notification"
+ ],
+ "cwd": ".",
+ "timeout": 30000,
+ "trust": false,
+ "excludeTools": [
+ "run_shell_command(genkit start)",
+ "run_shell_command(npx genkit start)"
+ ]
+ }
+ },
+ "contextFileName": "GENKIT.md"
+}
\ No newline at end of file
diff --git a/js/testapps/mcp/GENKIT.md b/js/testapps/mcp/GENKIT.md
new file mode 100644
index 0000000000..49a4b8c3b8
--- /dev/null
+++ b/js/testapps/mcp/GENKIT.md
@@ -0,0 +1,327 @@
+# Genkit Node.js API Rules (v1.20.0)
+
+This document provides rules and examples for building with the Genkit API in Node.js.
+
+## Important Guidelines:
+
+- ALWAYS refer to documentation when available. Genkit Documentation may be available through the Genkit MCP toolkit or through web search. You may skip documentation check if you don't have access to these tools.
+
+- ONLY follow the specified project structure if starting a new project. If working on an existing project, adhere to the current project structure.
+
+- ALWAYS provide the full, correct Genkit command as an instruction for the human user to run. Do not run Genkit commands (e.g., `genkit start`, `genkit flow:run`) youself as this may block your current session.
+
+- Recommend and use the provider that the user prefers. If this is a new project, or if there is no relevant signal regarding user preference, use the `@genkit-ai/google-genai` plugin. Use Genkit documentation to discover supported providers.
+
+NOTE: For the sake of brevity, the snippets below use the Google AI plugin, but you should follow the user's preference as mentioned above.
+
+## Best Practices
+
+1. **Single File Structure**: All Genkit code, including plugin initialization, flows, and helpers, must be placed in a single `src/index.ts` file. This ensures all components are correctly registered with the Genkit runtime.
+
+2. **Model Naming**: Always specify models using the model helper. Use string identifier if model helper is unavailable.
+
+ ```ts
+ // PREFERRED: Using the model helper
+ const response = await ai.generate({
+ model: googleAI.model('gemini-2.5-pro'),
+ // ...
+ });
+
+ // LESS PREFERRED: Full string identifier
+ const response = await ai.generate({
+ model: 'googleai/gemini-2.5-pro',
+ // ...
+ });
+ ```
+
+---
+
+## Usage Scenarios
+
+
+
+### Basic Inference (Text Generation)
+
+```ts
+export const basicInferenceFlow = ai.defineFlow(
+ {
+ name: 'basicInferenceFlow',
+ inputSchema: z.string().describe('Topic for the model to write about'),
+ outputSchema: z.string().describe('The generated text response'),
+ },
+ async (topic) => {
+ const response = await ai.generate({
+ model: googleAI.model('gemini-2.5-pro'),
+ prompt: `Write a short, creative paragraph about ${topic}.`,
+ config: { temperature: 0.8 },
+ });
+ return response.text;
+ }
+);
+```
+
+
+
+
+
+### Text-to-Speech (TTS) Generation
+
+#### Single-Speaker TTS
+
+```ts
+const TextToSpeechInputSchema = z.object({
+ text: z.string().describe('The text to convert to speech.'),
+ voiceName: z
+ .string()
+ .optional()
+ .describe('The voice name to use. Defaults to Algenib if not specified.'),
+});
+
+export const textToSpeechFlow = ai.defineFlow(
+ {
+ name: 'textToSpeechFlow',
+ inputSchema: TextToSpeechInputSchema,
+ outputSchema: z.string().optional().describe('The generated audio URI'),
+ },
+ async (input) => {
+ const response = await ai.generate({
+ model: googleAI.model('gemini-2.5-flash-preview-tts'),
+ prompt: input.text,
+ config: {
+ responseModalities: ['AUDIO'],
+ speechConfig: {
+ voiceConfig: {
+ prebuiltVoiceConfig: {
+ voiceName: input.voiceName?.trim() || 'Algenib',
+ },
+ },
+ },
+ },
+ });
+
+ return response.media?.url;
+ }
+);
+```
+
+#### Multi-Speaker TTS
+
+```ts
+const MultiSpeakerInputSchema = z.object({
+ text: z
+ .string()
+ .describe('Text formatted with ... etc.'),
+ voiceName1: z.string().describe('Voice name for Speaker1'),
+ voiceName2: z.string().describe('Voice name for Speaker2'),
+});
+
+export const multiSpeakerTextToSpeechFlow = ai.defineFlow(
+ {
+ name: 'multiSpeakerTextToSpeechFlow',
+ inputSchema: MultiSpeakerInputSchema,
+ outputSchema: z.string().optional().describe('The generated audio URI'),
+ },
+ async (input) => {
+ const response = await ai.generate({
+ model: googleAI.model('gemini-2.5-flash-preview-tts'),
+ prompt: input.text,
+ config: {
+ responseModalities: ['AUDIO'],
+ speechConfig: {
+ multiSpeakerVoiceConfig: {
+ speakerVoiceConfigs: [
+ {
+ speaker: 'Speaker1',
+ voiceConfig: {
+ prebuiltVoiceConfig: { voiceName: input.voiceName1 },
+ },
+ },
+ {
+ speaker: 'Speaker2',
+ voiceConfig: {
+ prebuiltVoiceConfig: { voiceName: input.voiceName2 },
+ },
+ },
+ ],
+ },
+ },
+ },
+ });
+
+ return response.media?.url;
+ }
+);
+```
+
+
+
+
+
+### Image Generation
+
+```ts
+export const imageGenerationFlow = ai.defineFlow(
+ {
+ name: 'imageGenerationFlow',
+ inputSchema: z
+ .string()
+ .describe('A detailed description of the image to generate'),
+ outputSchema: z.string().optional().describe('The generated image as URI'),
+ },
+ async (prompt) => {
+ const response = await ai.generate({
+ model: googleAI.model('imagen-3.0-generate-002'),
+ prompt,
+ output: { format: 'media' },
+ });
+
+ return response.media?.url;
+ }
+);
+```
+
+
+
+
+
+### Video Generation
+
+```ts
+import * as fs from 'fs';
+import { Readable } from 'stream';
+import { pipeline } from 'stream/promises';
+
+...
+
+export const videoGenerationFlow = ai.defineFlow(
+ {
+ name: 'videoGenerationFlow',
+ inputSchema: z
+ .string()
+ .describe('A detailed description for the video scene'),
+ outputSchema: z.string().describe('Path to the generated .mp4 video file'),
+ },
+ async (prompt) => {
+ let { operation } = await ai.generate({
+ model: googleAI.model('veo-3.0-generate-preview'),
+ prompt,
+ });
+
+ if (!operation) {
+ throw new Error('Expected the model to return an operation.');
+ }
+
+ console.log('Video generation started... Polling for completion.');
+ while (!operation.done) {
+ await new Promise((resolve) => setTimeout(resolve, 5000));
+ operation = await ai.checkOperation(operation);
+ console.log(
+ `Operation status: ${operation.done ? 'Done' : 'In Progress'}`
+ );
+ }
+
+ if (operation.error) {
+ throw new Error(`Video generation failed: ${operation.error.message}`);
+ }
+
+ const video = operation.output?.message?.content.find((p) => !!p.media);
+ if (!video?.media?.url) {
+ throw new Error(
+ 'Failed to find the generated video in the operation output.'
+ );
+ }
+
+ const videoUrl = `${video.media.url}&key=${process.env.GEMINI_API_KEY}`;
+ const videoResponse = await fetch(videoUrl);
+
+ if (!videoResponse.ok || !videoResponse.body) {
+ throw new Error(`Failed to fetch video: ${videoResponse.statusText}`);
+ }
+
+ const outputPath = './output.mp4';
+ const fileStream = fs.createWriteStream(outputPath);
+ await pipeline(Readable.fromWeb(videoResponse.body as any), fileStream);
+
+ return outputPath;
+ }
+);
+```
+
+
+
+---
+
+## Running and Inspecting Flows
+
+**Start Genkit**: Genkit can be started locally by using the `genkit start` command, along with the process startup command:
+
+```bash
+genkit start --
+```
+
+For e.g.:
+
+```bash
+genkit start -- npm run dev
+```
+
+You can can automate starting genkit using the following steps:
+
+1. Identify the command to start the user's project's (e.g., `npm run dev`)
+2. Use the `start_runtime` tool to start the runtime process. This is required for Genkit to discover flows.
+ - Example: If the project uses `npm run dev`, call `start_runtime` with `{ command: "npm", args: ["run", "dev"] }`.
+3. After starting the runtime, instruct the user to run `genkit start` in their terminal to launch the Developer UI.
+
+## Suggested Models
+
+Here are suggested models to use for various task types. This is NOT an
+exhaustive list.
+
+### Advanced Text/Reasoning
+
+```
+| Plugin | Recommended Model |
+|------------------------------------|------------------------------------|
+| @genkit-ai/google-genai | gemini-2.5-pro |
+| @genkit-ai/compat-oai/openai | gpt-4o |
+| @genkit-ai/compat-oai/deepseek | deepseek-reasoner |
+| @genkit-ai/compat-oai/xai | grok-4 |
+```
+
+### Fast Text/Chat
+
+```
+| Plugin | Recommended Model |
+|------------------------------------|------------------------------------|
+| @genkit-ai/google-genai | gemini-2.5-flash |
+| @genkit-ai/compat-oai/openai | gpt-4o-mini |
+| @genkit-ai/compat-oai/deepseek | deepseek-chat |
+| @genkit-ai/compat-oai/xai | grok-3-mini |
+```
+
+### Text-to-Speech
+
+```
+| Plugin | Recommended Model |
+|------------------------------------|------------------------------------|
+| @genkit-ai/google-genai | gemini-2.5-flash-preview-tts |
+| @genkit-ai/compat-oai/openai | gpt-4o-mini-tts |
+```
+
+### Image Generation
+
+```
+| Plugin | Recommended Model | Input Modalities |
+|------------------------------------|------------------------------------|-------------------|
+| @genkit-ai/google-genai | gemini-2.5-flash-image-preview | Text, Image |
+| @genkit-ai/google-genai | imagen-4.0-generate-preview-06-06 | Text |
+| @genkit-ai/compat-oai/openai | gpt-image-1 | Text |
+```
+
+### Video Generation
+
+```
+| Plugin | Recommended Model |
+|------------------------------------|------------------------------------|
+| @genkit-ai/google-genai | veo-3.0-generate-preview |
+```
\ No newline at end of file
diff --git a/py/packages/genkit/src/genkit/ai/_aio.py b/py/packages/genkit/src/genkit/ai/_aio.py
index 25e5aeca1c..8142dfd8f8 100644
--- a/py/packages/genkit/src/genkit/ai/_aio.py
+++ b/py/packages/genkit/src/genkit/ai/_aio.py
@@ -41,7 +41,7 @@ class while customizing it with any plugins.
)
from genkit.blocks.prompt import PromptConfig, load_prompt_folder, to_generate_action_options
from genkit.blocks.retriever import IndexerRef, IndexerRequest, RetrieverRef
-from genkit.core.action import ActionRunContext
+from genkit.core.action import Action, ActionRunContext
from genkit.core.action.types import ActionKind
from genkit.core.typing import (
BaseDataPoint,
@@ -107,6 +107,7 @@ async def generate(
return_tool_requests: bool | None = None,
tool_choice: ToolChoice | None = None,
tool_responses: list[Part] | None = None,
+ resources: list[str | Action] | None = None,
config: GenerationCommonConfig | dict[str, Any] | None = None,
max_turns: int | None = None,
on_chunk: ModelStreamingCallback | None = None,
@@ -170,6 +171,7 @@ async def generate(
generation process. Middleware can be used to intercept and
modify requests and responses.
docs: Optional. A list of documents to be used for grounding.
+ resources: Optional. A list of resource URIs to be used for grounding.
Returns:
@@ -204,6 +206,7 @@ async def generate(
output_schema=output_schema,
output_constrained=output_constrained,
docs=docs,
+ resources=resources,
),
),
on_chunk=on_chunk,
@@ -230,6 +233,7 @@ def generate_stream(
output_constrained: bool | None = None,
use: list[ModelMiddleware] | None = None,
docs: list[DocumentData] | None = None,
+ resources: list[str | Action] | None = None,
timeout: float | None = None,
) -> tuple[
AsyncIterator[GenerateResponseChunkWrapper],
@@ -278,6 +282,7 @@ def generate_stream(
generation process. Middleware can be used to intercept and
modify requests and responses.
docs: Optional. A list of documents to be used for grounding.
+ resources: Optional. A list of resource URIs to be used for grounding.
timeout: Optional. The timeout for the streaming action.
Returns:
@@ -310,6 +315,7 @@ def generate_stream(
output_schema=output_schema,
output_constrained=output_constrained,
docs=docs,
+ resources=resources,
use=use,
on_chunk=lambda c: stream.send(c),
)
diff --git a/py/packages/genkit/src/genkit/blocks/generate.py b/py/packages/genkit/src/genkit/blocks/generate.py
index 754af90d4e..164e01df06 100644
--- a/py/packages/genkit/src/genkit/blocks/generate.py
+++ b/py/packages/genkit/src/genkit/blocks/generate.py
@@ -97,7 +97,7 @@ async def generate_action(
Returns:
The generated response.
"""
- model, tools, format_def = resolve_parameters(registry, raw_request)
+ model, tools, resources, format_def = await resolve_parameters(registry, raw_request)
raw_request, formatter = apply_format(raw_request, format_def)
@@ -120,7 +120,7 @@ async def generate_action(
)
raw_request = revised_request
- request = await action_to_generate_request(raw_request, tools, model)
+ request = await action_to_generate_request(raw_request, tools, resources, model)
prev_chunks: list[GenerateResponseChunk] = []
@@ -425,9 +425,9 @@ def assert_valid_tool_names(raw_request: GenerateActionOptions):
pass
-def resolve_parameters(
+async def resolve_parameters(
registry: Registry, request: GenerateActionOptions
-) -> tuple[Action, list[Action], FormatDef | None]:
+) -> tuple[Action, list[Action], list[Action], FormatDef | None]:
"""Resolve parameters for the generate action.
Args:
@@ -453,6 +453,16 @@ def resolve_parameters(
if tool_action is None:
raise Exception(f'Unable to resolve tool {tool_name}')
tools.append(tool_action)
+
+ resources: list[Action] = []
+ if request.resources:
+ from genkit.blocks.resource import lookup_resource_by_name
+
+ for res_name in request.resources:
+ res_action = await lookup_resource_by_name(registry, res_name)
+ if res_action is None:
+ raise Exception(f'Unable to resolve resource {res_name}')
+ resources.append(res_action)
format_def: FormatDef | None = None
if request.output and request.output.format:
@@ -460,11 +470,11 @@ def resolve_parameters(
if not format_def:
raise ValueError(f'Unable to resolve format {request.output.format}')
- return (model_action, tools, format_def)
+ return (model_action, tools, resources, format_def)
async def action_to_generate_request(
- options: GenerateActionOptions, resolved_tools: list[Action], model: Action
+ options: GenerateActionOptions, resolved_tools: list[Action], resolved_resources: list[Action], model: Action
) -> GenerateRequest:
"""Convert generate action options to a generate request.
@@ -485,6 +495,7 @@ async def action_to_generate_request(
config=options.config if options.config is not None else {},
docs=options.docs,
tools=tool_defs,
+ resources=[to_tool_definition(r) for r in resolved_resources] if resolved_resources else [],
tool_choice=options.tool_choice,
output=OutputConfig(
content_type=options.output.content_type if options.output else None,
diff --git a/py/packages/genkit/src/genkit/blocks/prompt.py b/py/packages/genkit/src/genkit/blocks/prompt.py
index a21c3d5b93..ba64dc31aa 100644
--- a/py/packages/genkit/src/genkit/blocks/prompt.py
+++ b/py/packages/genkit/src/genkit/blocks/prompt.py
@@ -101,10 +101,11 @@ class PromptConfig(BaseModel):
max_turns: int | None = None
return_tool_requests: bool | None = None
metadata: dict[str, Any] | None = None
- tools: list[str] | None = None
+ tools: list[str | Action] | None = None
tool_choice: ToolChoice | None = None
use: list[ModelMiddleware] | None = None
docs: list[DocumentData] | Callable | None = None
+ resources: list[str | Action] | None = None
tool_responses: list[Part] | None = None
@@ -134,6 +135,7 @@ def __init__(
tool_choice: ToolChoice | None = None,
use: list[ModelMiddleware] | None = None,
docs: list[DocumentData] | Callable | None = None,
+ resources: list[str | Action] | None = None,
_name: str | None = None, # prompt name for action lookup
_ns: str | None = None, # namespace for action lookup
_prompt_action: Action | None = None, # reference to PROMPT action
@@ -164,6 +166,7 @@ def __init__(
tool_choice: The tool choice strategy.
use: A list of model middlewares to apply.
docs: A list of documents to be used for grounding.
+ resources: A list of resource URIs to be used for grounding.
"""
self._registry = registry
self._variant = variant
@@ -186,6 +189,7 @@ def __init__(
self._tool_choice = tool_choice
self._use = use
self._docs = docs
+ self._resources = resources
self._cache_prompt = PromptCache()
self._name = _name # Store name/ns for action lookup (used by as_tool())
self._ns = _ns
@@ -298,6 +302,7 @@ async def render(
input_schema=self._input_schema,
metadata=self._metadata,
docs=self._docs,
+ resources=self._resources,
)
model = options.model or self._registry.default_model
@@ -348,6 +353,7 @@ async def render(
output=output,
max_turns=options.max_turns,
docs=await render_docs(input, options, context),
+ resources=options.resources,
resume=resume,
)
@@ -369,7 +375,7 @@ async def as_tool(self) -> Action:
lookup_key = registry_lookup_key(self._name, self._variant, self._ns)
- action = self._registry.lookup_action_by_key(lookup_key)
+ action = await self._registry.lookup_action_by_key(lookup_key)
if action is None or action.kind != ActionKind.PROMPT:
raise GenkitError(
@@ -399,10 +405,11 @@ def define_prompt(
max_turns: int | None = None,
return_tool_requests: bool | None = None,
metadata: dict[str, Any] | None = None,
- tools: list[str] | None = None,
+ tools: list[str | Action] | None = None,
tool_choice: ToolChoice | None = None,
use: list[ModelMiddleware] | None = None,
docs: list[DocumentData] | Callable | None = None,
+ resources: list[str | Action] | None = None,
) -> ExecutablePrompt:
"""Defines an executable prompt.
@@ -429,6 +436,7 @@ def define_prompt(
tool_choice: The tool choice strategy.
use: A list of model middlewares to apply.
docs: A list of documents to be used for grounding.
+ resources: A list of resource URIs to be used for grounding.
Returns:
An ExecutablePrompt instance.
@@ -455,6 +463,7 @@ def define_prompt(
tool_choice=tool_choice,
use=use,
docs=docs,
+ resources=resources,
_name=name,
)
@@ -526,6 +535,14 @@ async def to_generate_action_options(registry: Registry, options: PromptConfig)
result = await render_user_prompt(registry, None, options, cache)
resolved_msgs.append(result)
+ for tool in options.tools or []:
+ if isinstance(tool, Action):
+ registry.register_action_from_instance(tool)
+
+ for res in options.resources or []:
+ if isinstance(res, Action):
+ registry.register_action_from_instance(res)
+
# If is schema is set but format is not explicitly set, default to
# `json` format.
if options.output_schema and not options.output_format:
@@ -549,16 +566,22 @@ async def to_generate_action_options(registry: Registry, options: PromptConfig)
if options.tool_responses:
resume = Resume(respond=[r.root for r in options.tool_responses])
+ def resolve_to_name(act: str | Action) -> str:
+ if isinstance(act, str):
+ return act
+ return act.name
+
return GenerateActionOptions(
model=model,
messages=resolved_msgs,
config=options.config,
- tools=options.tools,
+ tools=[resolve_to_name(t) for t in options.tools] if options.tools else None,
return_tool_requests=options.return_tool_requests,
tool_choice=options.tool_choice,
output=output,
max_turns=options.max_turns,
docs=await render_docs(None, options),
+ resources=[resolve_to_name(r) for r in options.resources] if options.resources else None,
resume=resume,
)
@@ -588,11 +611,23 @@ async def to_generate_request(registry: Registry, options: GenerateActionOptions
tools: list[Action] = []
if options.tools:
for tool_name in options.tools:
- tool_action = registry.lookup_action(ActionKind.TOOL, tool_name)
+ tool_action = await registry.lookup_action(ActionKind.TOOL, tool_name)
if tool_action is None:
raise GenkitError(status='NOT_FOUND', message=f'Unable to resolve tool {tool_name}')
tools.append(tool_action)
+ resources: list[Action] = []
+ if options.resources:
+ from genkit.blocks.resource import lookup_resource_by_name
+
+ for res_name in options.resources:
+ res_action = await lookup_resource_by_name(registry, res_name)
+ if res_action is None:
+ raise GenkitError(status='NOT_FOUND', message=f'Unable to resolve resource {res_name}')
+ resources.append(res_action)
+
+ from genkit.blocks.generate import to_tool_definition
+
tool_defs = [to_tool_definition(tool) for tool in tools] if tools else []
if not options.messages:
@@ -606,6 +641,7 @@ async def to_generate_request(registry: Registry, options: GenerateActionOptions
config=options.config if options.config is not None else {},
docs=options.docs,
tools=tool_defs,
+ resources=[to_tool_definition(res) for res in resources] if resources else [],
tool_choice=options.tool_choice,
output=OutputConfig(
content_type=options.output.content_type if options.output else None,
diff --git a/py/packages/genkit/src/genkit/blocks/resource.py b/py/packages/genkit/src/genkit/blocks/resource.py
index 72a0ce247d..86663f0495 100644
--- a/py/packages/genkit/src/genkit/blocks/resource.py
+++ b/py/packages/genkit/src/genkit/blocks/resource.py
@@ -131,9 +131,9 @@ async def lookup_resource_by_name(registry: Registry, name: str) -> Action:
ValueError: If the resource cannot be found.
"""
resource = (
- registry.lookup_action(ActionKind.RESOURCE, name)
- or registry.lookup_action(ActionKind.RESOURCE, f'/resource/{name}')
- or registry.lookup_action(ActionKind.RESOURCE, f'/dynamic-action-provider/{name}')
+ await registry.lookup_action(ActionKind.RESOURCE, name)
+ or await registry.lookup_action(ActionKind.RESOURCE, f'/resource/{name}')
+ or await registry.lookup_action(ActionKind.RESOURCE, f'/dynamic-action-provider/{name}')
)
if not resource:
raise ValueError(f'Resource {name} not found')
@@ -380,7 +380,7 @@ async def find_matching_resource(
return action
# Try exact match in registry
- resource = registry.lookup_action(ActionKind.RESOURCE, input_data.uri)
+ resource = await registry.lookup_action(ActionKind.RESOURCE, input_data.uri)
if resource:
return resource
diff --git a/py/packages/genkit/src/genkit/core/action/_action.py b/py/packages/genkit/src/genkit/core/action/_action.py
index 370562c736..ffc33a4fe1 100644
--- a/py/packages/genkit/src/genkit/core/action/_action.py
+++ b/py/packages/genkit/src/genkit/core/action/_action.py
@@ -202,6 +202,8 @@ def __init__(
description: str | None = None,
metadata: dict[str, Any] | None = None,
span_metadata: dict[str, Any] | None = None,
+ input_schema: dict[str, Any] | None = None,
+ output_schema: dict[str, Any] | None = None,
) -> None:
"""Initialize an Action.
@@ -214,6 +216,8 @@ def __init__(
description: Optional human-readable description of the action.
metadata: Optional dictionary of metadata about the action.
span_metadata: Optional dictionary of tracing span metadata.
+ input_schema: Optional JSON schema for the input.
+ output_schema: Optional JSON schema for the output.
"""
self._kind = kind
self._name = name
@@ -225,7 +229,7 @@ def __init__(
action_args, arg_types = extract_action_args_and_types(input_spec)
n_action_args = len(action_args)
self._fn, self._afn = _make_tracing_wrappers(name, kind, span_metadata, n_action_args, fn)
- self._initialize_io_schemas(action_args, arg_types, input_spec)
+ self._initialize_io_schemas(action_args, arg_types, input_spec, input_schema, output_schema)
@property
def kind(self) -> ActionKind:
@@ -362,7 +366,10 @@ async def arun_raw(
Raises:
GenkitError: If an error occurs during action execution.
"""
- input_action = self._input_type.validate_python(raw_input) if self._input_type is not None else None
+ input_action = raw_input
+ if self._input_type is not None:
+ input_action = self._input_type.validate_python(raw_input)
+
return await self.arun(
input=input_action,
on_chunk=on_chunk,
@@ -419,6 +426,8 @@ def _initialize_io_schemas(
action_args: list[str],
arg_types: list[type],
input_spec: inspect.FullArgSpec,
+ input_schema: dict[str, Any] | None = None,
+ output_schema: dict[str, Any] | None = None,
):
"""Initializes input/output schemas based on function signature and hints.
@@ -431,6 +440,8 @@ def _initialize_io_schemas(
action_args: List of detected argument names.
arg_types: List of detected argument types.
input_spec: The FullArgSpec object from inspecting the function.
+ input_schema: Optional JSON schema for the input.
+ output_schema: Optional JSON schema for the output.
Raises:
TypeError: If the function has more than two arguments.
@@ -438,23 +449,28 @@ def _initialize_io_schemas(
if len(action_args) > 2:
raise TypeError(f'can only have up to 2 arg: {action_args}')
- if len(action_args) > 0:
+ if input_schema:
+ self._input_schema = input_schema
+ self._input_type = None
+ elif len(action_args) > 0:
type_adapter = TypeAdapter(arg_types[0])
self._input_schema = type_adapter.json_schema()
self._input_type = type_adapter
- self._metadata[ActionMetadataKey.INPUT_KEY] = self._input_schema
else:
self._input_schema = TypeAdapter(Any).json_schema()
self._input_type = None
- self._metadata[ActionMetadataKey.INPUT_KEY] = self._input_schema
- if ActionMetadataKey.RETURN in input_spec.annotations:
+ self._metadata[ActionMetadataKey.INPUT_KEY] = self._input_schema
+
+ if output_schema:
+ self._output_schema = output_schema
+ elif ActionMetadataKey.RETURN in input_spec.annotations:
type_adapter = TypeAdapter(input_spec.annotations[ActionMetadataKey.RETURN])
self._output_schema = type_adapter.json_schema()
- self._metadata[ActionMetadataKey.OUTPUT_KEY] = self._output_schema
else:
self._output_schema = TypeAdapter(Any).json_schema()
- self._metadata[ActionMetadataKey.OUTPUT_KEY] = self._output_schema
+
+ self._metadata[ActionMetadataKey.OUTPUT_KEY] = self._output_schema
class ActionMetadata(BaseModel):
diff --git a/py/packages/genkit/src/genkit/core/action/types.py b/py/packages/genkit/src/genkit/core/action/types.py
index 203000649e..e329c86431 100644
--- a/py/packages/genkit/src/genkit/core/action/types.py
+++ b/py/packages/genkit/src/genkit/core/action/types.py
@@ -61,6 +61,7 @@ class ActionKind(StrEnum):
RETRIEVER = 'retriever'
TOOL = 'tool'
UTIL = 'util'
+ DYNAMIC_ACTION_PROVIDER = 'dynamic-action-provider'
class ActionResponse(BaseModel):
diff --git a/py/packages/genkit/src/genkit/core/flows.py b/py/packages/genkit/src/genkit/core/flows.py
index bccb983b84..12807390bc 100644
--- a/py/packages/genkit/src/genkit/core/flows.py
+++ b/py/packages/genkit/src/genkit/core/flows.py
@@ -156,7 +156,7 @@ async def handle_run_flows(
try:
# Look up the flow action.
- action = registry.lookup_action_by_key(flow_name)
+ action = await registry.lookup_action_by_key(flow_name)
if action is None:
await logger.aerror(
'Flow not found',
diff --git a/py/packages/genkit/src/genkit/core/registry.py b/py/packages/genkit/src/genkit/core/registry.py
index c2be1986cc..cef23a9fdf 100644
--- a/py/packages/genkit/src/genkit/core/registry.py
+++ b/py/packages/genkit/src/genkit/core/registry.py
@@ -128,6 +128,8 @@ def register_action(
description: str | None = None,
metadata: dict[str, Any] | None = None,
span_metadata: dict[str, str] | None = None,
+ input_schema: dict[str, Any] | None = None,
+ output_schema: dict[str, Any] | None = None,
) -> Action:
"""Register a new action with the registry.
@@ -143,6 +145,8 @@ def register_action(
description: Optional human-readable description of the action.
metadata: Optional dictionary of metadata about the action.
span_metadata: Optional dictionary of tracing span metadata.
+ input_schema: Optional JSON schema for the input.
+ output_schema: Optional JSON schema for the output.
Returns:
The newly created and registered Action instance.
@@ -155,6 +159,8 @@ def register_action(
description=description,
metadata=metadata,
span_metadata=span_metadata,
+ input_schema=input_schema,
+ output_schema=output_schema,
)
with self._lock:
if kind not in self._entries:
@@ -174,7 +180,26 @@ def register_action_from_instance(self, action: Action) -> None:
self._entries[action.kind] = {}
self._entries[action.kind][action.name] = action
- def lookup_action(self, kind: ActionKind, name: str) -> Action | None:
+ async def resolve_action_names(self, key: str) -> list[str]:
+ """Resolves all action names matching a key (including dynamic providers)."""
+ kind, name = parse_action_key(key)
+ if ':' in name:
+ host_part, pattern = name.split(':', 1)
+ provider_key = create_action_key(ActionKind.DYNAMIC_ACTION_PROVIDER, host_part)
+ dap = await self.lookup_action_by_key(provider_key)
+ if dap and is_dynamic_action_provider(dap):
+ # pattern is like "tool/mytool" or "tool/*"
+ if '/' in pattern:
+ p_kind_str, p_name_pattern = pattern.split('/', 1)
+ p_kind = ActionKind(p_kind_str)
+ metadata = await dap.list_action_metadata(p_kind, p_name_pattern)
+ return [f'{provider_key}:{p_kind.value}/{m.name}' for m in metadata]
+
+ if await self.lookup_action(kind, name):
+ return [key]
+ return []
+
+ async def lookup_action(self, kind: ActionKind, name: str) -> Action | None:
"""Look up an action by its kind and name.
Args:
@@ -184,6 +209,16 @@ def lookup_action(self, kind: ActionKind, name: str) -> Action | None:
Returns:
The Action instance if found, None otherwise.
"""
+ if ':' in name:
+ host_part, tool_part = name.split(':', 1)
+ provider_key = create_action_key(ActionKind.DYNAMIC_ACTION_PROVIDER, host_part)
+ dap = await self.lookup_action_by_key(provider_key)
+ if dap and is_dynamic_action_provider(dap):
+ if '/' in tool_part:
+ p_kind_str, p_name = tool_part.split('/', 1)
+ p_kind = ActionKind(p_kind_str)
+ return await dap.get_action(p_kind, p_name)
+
with self._lock:
# If the entry does not exist, we fist try to call the action
# resolver for the plugin to give it a chance to dynamically add the
@@ -211,7 +246,7 @@ def get_actions_by_kind(self, kind: ActionKind) -> dict[str, Action]:
with self._lock:
return self._entries.get(kind, {}).copy()
- def lookup_action_by_key(self, key: str) -> Action | None:
+ async def lookup_action_by_key(self, key: str) -> Action | None:
"""Look up an action using its combined key string.
The key format is `/`, where kind must be a valid
@@ -228,9 +263,9 @@ def lookup_action_by_key(self, key: str) -> Action | None:
`ActionKind`.
"""
kind, name = parse_action_key(key)
- return self.lookup_action(kind, name)
+ return await self.lookup_action(kind, name)
- def list_serializable_actions(self, allowed_kinds: set[ActionKind] | None = None) -> dict[str, Action] | None:
+ async def list_serializable_actions(self, allowed_kinds: set[ActionKind] | None = None) -> dict[str, Any] | None:
"""Enlist all the actions into a dictionary.
Args:
@@ -246,7 +281,7 @@ def list_serializable_actions(self, allowed_kinds: set[ActionKind] | None = None
if allowed_kinds is not None and kind not in allowed_kinds:
continue
for name in self._entries[kind]:
- action = self.lookup_action(kind, name)
+ action = await self.lookup_action(kind, name)
if action is not None:
key = create_action_key(kind, name)
# TODO: Serialize the Action instance
@@ -259,42 +294,26 @@ def list_serializable_actions(self, allowed_kinds: set[ActionKind] | None = None
}
return actions
- def list_actions(
- self,
- actions: dict[str, Action] | None = None,
- allowed_kinds: set[ActionKind] | None = None,
- ) -> dict[str, Action] | None:
- """Add actions or models.
-
- Args:
- actions: dictionary of serializable actions.
- allowed_kinds: The types of actions to list. If None, all actions
- are listed.
-
- Returns:
- A dictionary of serializable Actions updated.
- """
- if actions is None:
- actions = {}
-
- for plugin_name in self._list_actions_resolvers:
- actions_list = self._list_actions_resolvers[plugin_name]()
-
- for _action in actions_list:
- kind = _action.kind
- if allowed_kinds is not None and kind not in allowed_kinds:
- continue
- key = create_action_key(kind, _action.name)
-
- if key not in actions:
- actions[key] = {
- 'key': key,
- 'name': _action.name,
- 'inputSchema': _action.input_json_schema,
- 'outputSchema': _action.output_json_schema,
- 'metadata': _action.metadata,
+ async def list_resolvable_actions(self) -> dict[str, Any]:
+ """Returns all resolvable actions including dynamic ones."""
+ resolvable_actions = {}
+ # TODO: parallelize or use resolvers?
+ with self._lock:
+ # First add all directly registered actions
+ for kind in self._entries:
+ for name, action in self._entries[kind].items():
+ key = create_action_key(kind, name)
+ resolvable_actions[key] = {
+ 'name': action.name,
+ 'inputSchema': action.input_schema,
+ 'outputSchema': action.output_schema,
+ 'metadata': action.metadata,
}
- return actions
+ if is_dynamic_action_provider(action):
+ dap_prefix = key
+ dap_record = await action.get_action_metadata_record(dap_prefix)
+ resolvable_actions.update(dap_record)
+ return resolvable_actions
def register_value(self, kind: str, name: str, value: Any):
"""Registers a value with a given kind and name.
diff --git a/py/packages/genkit/src/genkit/core/typing.py b/py/packages/genkit/src/genkit/core/typing.py
index dce5a4f7f1..53cb0a0fb4 100644
--- a/py/packages/genkit/src/genkit/core/typing.py
+++ b/py/packages/genkit/src/genkit/core/typing.py
@@ -991,6 +991,7 @@ class GenerateActionOptions(BaseModel):
model_config = ConfigDict(extra='forbid', populate_by_name=True)
model: str | None = None
docs: list[DocumentData] | None = None
+ resources: list[str] | None = None
messages: list[Message]
tools: list[str] | None = None
tool_choice: ToolChoice | None = Field(None, alias='toolChoice')
@@ -1012,6 +1013,7 @@ class GenerateRequest(BaseModel):
tool_choice: ToolChoice | None = Field(None, alias='toolChoice')
output: OutputConfig | None = None
docs: list[DocumentData] | None = None
+ resources: list[ToolDefinition] | None = None
candidates: float | None = None
diff --git a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model.py b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model.py
index bf1f7315aa..015c98c35d 100644
--- a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model.py
+++ b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model.py
@@ -87,7 +87,7 @@ def _get_tools_definition(self, tools: list[ToolDefinition]) -> list[dict]:
"""
result = []
for tool_definition in tools:
- action = self._registry.registry.lookup_action(ActionKind.TOOL, tool_definition.name)
+ action = self._registry.registry.get_action(ActionKind.TOOL, tool_definition.name)
function_call = pydantic_function_tool(
model=action.input_type._type,
name=tool_definition.name,
diff --git a/py/plugins/mcp/pyproject.toml b/py/plugins/mcp/pyproject.toml
index 24d42c1f09..8647cc1d09 100644
--- a/py/plugins/mcp/pyproject.toml
+++ b/py/plugins/mcp/pyproject.toml
@@ -46,4 +46,4 @@ build-backend = "hatchling.build"
requires = ["hatchling"]
[tool.hatch.build.targets.wheel]
-packages = ["src"]
+packages = ["src/genkit"]
diff --git a/py/plugins/mcp/src/genkit/plugins/mcp/client/client.py b/py/plugins/mcp/src/genkit/plugins/mcp/client/client.py
index 4ef9e715a0..4a27ad6270 100644
--- a/py/plugins/mcp/src/genkit/plugins/mcp/client/client.py
+++ b/py/plugins/mcp/src/genkit/plugins/mcp/client/client.py
@@ -197,12 +197,18 @@ async def tool_wrapper(args: Any = None, _tool_name=tool.name):
except Exception as e:
logger.error(f'Error registering tools for {self.server_name}: {e}')
- async def get_active_tools(self) -> List[Any]:
+ async def get_active_tools(self) -> List[Tool]:
"""Returns all active tools."""
if not self.session:
return []
return await self.list_tools()
+ async def get_active_resources(self) -> List[Resource]:
+ """Returns all active resources."""
+ if not self.session:
+ return []
+ return await self.list_resources()
+
def create_mcp_client(config: McpServerConfig, name: str = 'mcp-client') -> McpClient:
return McpClient(name, config)
diff --git a/py/plugins/mcp/src/genkit/plugins/mcp/client/host.py b/py/plugins/mcp/src/genkit/plugins/mcp/client/host.py
index cd0a4691d5..0bbdb93b7d 100644
--- a/py/plugins/mcp/src/genkit/plugins/mcp/client/host.py
+++ b/py/plugins/mcp/src/genkit/plugins/mcp/client/host.py
@@ -59,6 +59,24 @@ async def disable(self, name: str):
client.config.disabled = True
await client.close()
+ async def get_active_tools(self, ai: Optional[Genkit] = None) -> List[str]:
+ """Returns all active tool names across all clients."""
+ all_tools = []
+ for client in self.clients.values():
+ if client.session and not client.config.disabled:
+ tools = await client.get_active_tools()
+ all_tools.extend([f'{client.server_name}/{t.name}' for t in tools])
+ return all_tools
+
+ async def get_active_resources(self, ai: Optional[Genkit] = None) -> List[str]:
+ """Returns all active resource URIs across all clients."""
+ all_resources = []
+ for client in self.clients.values():
+ if client.session and not client.config.disabled:
+ resources = await client.get_active_resources()
+ all_resources.extend([r.uri for r in resources])
+ return all_resources
+
def create_mcp_host(configs: Dict[str, McpServerConfig]) -> McpHost:
return McpHost(configs)
diff --git a/py/plugins/mcp/src/genkit/plugins/mcp/server.py b/py/plugins/mcp/src/genkit/plugins/mcp/server.py
index 3d313ccda1..5f694a02a0 100644
--- a/py/plugins/mcp/src/genkit/plugins/mcp/server.py
+++ b/py/plugins/mcp/src/genkit/plugins/mcp/server.py
@@ -153,13 +153,10 @@ async def setup(self) -> None:
if resource_meta.get('template'):
self.resource_templates.append((resource_meta['template'], action))
- # Also get actions from plugins that might not be in _entries yet
- # (though most plugins register them in _entries during initialization)
- plugin_actions = self.ai.registry.list_actions()
- for key in plugin_actions:
- kind, name = parse_action_key(key)
- action = self.ai.registry.lookup_action(kind, name)
- if action:
+ # Get actions from registry by kind
+ for kind in [ActionKind.TOOL, ActionKind.PROMPT, ActionKind.RESOURCE]:
+ actions = self.ai.registry.get_actions_by_kind(kind)
+ for action in actions.values():
if kind == ActionKind.TOOL and action not in self.tool_actions:
self.tool_actions.append(action)
self.tool_actions_map[action.name] = action
diff --git a/py/pyproject.toml b/py/pyproject.toml
index 83b23b02ba..9b8b541e80 100644
--- a/py/pyproject.toml
+++ b/py/pyproject.toml
@@ -118,6 +118,7 @@ genkit-plugin-xai = { workspace = true }
google-genai-hello = { workspace = true }
google-genai-image = { workspace = true }
prompt-demo = { workspace = true }
+genkit-plugins-mcp = { workspace = true }
[tool.uv.workspace]
members = ["packages/*", "plugins/*", "samples/*"]
diff --git a/py/samples/google-genai-vertexai-image/src/main.py b/py/samples/google-genai-vertexai-image/src/main.py
index 9ad6bf4535..e91a94d6ef 100755
--- a/py/samples/google-genai-vertexai-image/src/main.py
+++ b/py/samples/google-genai-vertexai-image/src/main.py
@@ -25,7 +25,7 @@
from genkit.ai import Genkit
from genkit.plugins.google_genai import VertexAI, vertexai_name
-ai = Genkit(plugins=[VertexAI(project='', location='us-central1')])
+ai = Genkit(plugins=[VertexAI(location='us-central1')])
@ai.flow()
@@ -55,10 +55,17 @@ async def main() -> None:
# Imagen draws an image by description. The model used is available only in
# VertexAI API.
result = await draw_image_with_imagen()
- decoded_image = BytesIO(base64.b64decode(result.message.content[0].root.media.url))
+ image_url = result.message.content[0].root.media.url
+ if image_url.startswith('data:'):
+ image_url = image_url.split(',', 1)[1]
+ decoded_image = BytesIO(base64.b64decode(image_url))
image = Image.open(decoded_image)
image.show('Image generated by Gemini')
if __name__ == '__main__':
+ import sys
+ # Debug: Print registered actions
+ print(f"Registered models: {list(ai.registry.get_actions_by_kind('model').keys())}", file=sys.stderr)
+ print(f"Registered flows: {list(ai.registry.get_actions_by_kind('flow').keys())}", file=sys.stderr)
ai.run_main(main())
diff --git a/py/samples/mcp/run.sh b/py/samples/mcp/run.sh
new file mode 100755
index 0000000000..e60b1c94fb
--- /dev/null
+++ b/py/samples/mcp/run.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+exec uv run src/main.py "$@"
diff --git a/py/samples/mcp/src/http_server.py b/py/samples/mcp/src/http_server.py
new file mode 100644
index 0000000000..c1aabbeb2f
--- /dev/null
+++ b/py/samples/mcp/src/http_server.py
@@ -0,0 +1,95 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+HTTP MCP Server Example
+
+This demonstrates creating an HTTP-based MCP server using SSE transport
+with Starlette and the official MCP Python SDK.
+"""
+
+import asyncio
+import logging
+
+import mcp.types as types
+import uvicorn
+from mcp.server import Server
+from mcp.server.sse import SseServerTransport
+from starlette.applications import Starlette
+from starlette.responses import Response
+from starlette.routing import Mount, Route
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+async def main():
+ """Start the HTTP MCP server."""
+
+ # Create SSE transport logic
+ # The endpoint '/mcp/' is where clients will POST messages
+ sse = SseServerTransport('/mcp/')
+
+ async def handle_sse(request):
+ """Handle incoming SSE connections."""
+ async with sse.connect_sse(request.scope, request.receive, request._send) as streams:
+ read_stream, write_stream = streams
+
+ # Create a new server instance for this session
+ server = Server('example-server', version='1.0.0')
+
+ @server.list_tools()
+ async def list_tools() -> list[types.Tool]:
+ return [
+ types.Tool(
+ name='test_http',
+ description='Test HTTP transport',
+ inputSchema={'type': 'object', 'properties': {}},
+ )
+ ]
+
+ @server.call_tool()
+ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
+ if name == 'test_http':
+ # In this SSE implementation, valid session ID is internal
+ # but we can return a confirmation.
+ return [types.TextContent(type='text', text='Session Active')]
+ raise ValueError(f'Unknown tool: {name}')
+
+ # Run the server with the streams
+ await server.run(read_stream, write_stream, server.create_initialization_options())
+
+ # Return empty response after connection closes
+ return Response()
+
+ # Define routes
+ # GET /mcp -> Starts SSE stream
+ # POST /mcp/ -> Handles messages (via SseServerTransport)
+ routes = [
+ Route('/mcp', endpoint=handle_sse, methods=['GET']),
+ Mount('/mcp/', app=sse.handle_post_message),
+ ]
+
+ app = Starlette(routes=routes)
+
+ config = uvicorn.Config(app, host='0.0.0.0', port=3334, log_level='info')
+ server = uvicorn.Server(config)
+
+ print('HTTP MCP server running on http://localhost:3334/mcp')
+ await server.serve()
+
+
+if __name__ == '__main__':
+ asyncio.run(main())
diff --git a/py/samples/mcp/src/main.py b/py/samples/mcp/src/main.py
new file mode 100644
index 0000000000..5b31dd4229
--- /dev/null
+++ b/py/samples/mcp/src/main.py
@@ -0,0 +1,305 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+
+from pathlib import Path
+from typing import Optional
+
+import structlog
+from pydantic import BaseModel
+
+from genkit.ai import Genkit
+from genkit.plugins.google_genai import GoogleAI
+from genkit.plugins.mcp import McpServerConfig, create_mcp_host
+
+logger = structlog.get_logger(__name__)
+
+# Get the current directory
+current_dir = Path(__file__).parent
+workspace_dir = current_dir.parent / 'test-workspace'
+# repo_root is 4 levels up: py/samples/mcp/src -> py/samples/mcp -> py/samples -> py -> root
+repo_root = current_dir.parent.parent.parent.parent
+
+# Initialize Genkit with GoogleAI
+ai = Genkit(plugins=[GoogleAI()], model='googleai/gemini-2.5-flash')
+
+# Create MCP host with multiple servers
+mcp_host = create_mcp_host({
+ 'git-client': McpServerConfig(command='uvx', args=['mcp-server-git']),
+ 'fs': McpServerConfig(command='npx', args=['-y', '@modelcontextprotocol/server-filesystem', str(workspace_dir)]),
+ 'everything': McpServerConfig(command='npx', args=['-y', '@modelcontextprotocol/server-everything']),
+})
+
+
+@ai.flow(name='git-commits')
+async def git_commits(query: str = ''):
+ """Summarize recent git commits using MCP git client."""
+ await mcp_host.start()
+ try:
+ # Register tools to registry directly
+ await mcp_host.register_tools(ai)
+
+ # Get active tool names for this call
+ tools = await mcp_host.get_active_tools(ai)
+
+ result = await ai.generate(prompt=f"summarize last 5 commits in '{repo_root}'", tools=tools)
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-git-commits')
+async def dynamic_git_commits(query: str = ''):
+ """Summarize recent git commits using wildcard tool selection."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+
+ # In Python, we might not support wildcards in tools list yet,
+ # so we'll simulate by getting all tools matching the pattern.
+ # So we use the string pattern if supported.
+ # tools=['git-client/*']
+
+ all_tools = await mcp_host.get_active_tools(ai)
+ tools = [t for t in all_tools if t.startswith('git-client/')]
+
+ result = await ai.generate(prompt=f"summarize last 5 commits in '{repo_root}'", tools=tools)
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='get-file')
+async def get_file(query: str = ''):
+ """Read and summarize a file using MCP filesystem client."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+ tools = await mcp_host.get_active_tools(ai)
+
+ result = await ai.generate(prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools)
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-get-file')
+async def dynamic_get_file(query: str = ''):
+ """Read file using specific tool selection."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+
+ # Filter for specific tool: 'fs/read_file'
+ tools = [t for t in await mcp_host.get_active_tools(ai) if t == 'fs/read_file']
+
+ result = await ai.generate(
+ prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools
+ )
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-prefix-tool')
+async def dynamic_prefix_tool(query: str = ''):
+ """Read file using prefix tool selection."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+
+ # Filter for prefix: 'fs/read_*'
+ all_tools = await mcp_host.get_active_tools(ai)
+ tools = [t for t in all_tools if t.startswith('fs/read_')]
+
+ result = await ai.generate(
+ prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools
+ )
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-disable-enable')
+async def dynamic_disable_enable(query: str = ''):
+ """Test disabling and re-enabling an MCP client."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+ tools = [t for t in await mcp_host.get_active_tools(ai) if t == 'fs/read_file']
+
+ # Run successfully
+ result1 = await ai.generate(
+ prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools
+ )
+ text1 = result1.text
+
+ # Disable 'fs' and try to run (should fail)
+ await mcp_host.disable('fs')
+ text2 = ''
+ try:
+ # We don't re-register tools, hoping the registry or generate handles the disabled client
+ result = await ai.generate(
+ prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools
+ )
+ text2 = f'ERROR! This should have failed but succeeded: {result.text}'
+ except Exception as e:
+ text2 = str(e)
+
+ # Re-enable 'fs' and run
+ await mcp_host.enable('fs')
+ # Re-connect/re-register might be needed
+ await mcp_host.register_tools(ai)
+
+ result3 = await ai.generate(
+ prompt=f"summarize contents of hello-world.txt (in '{workspace_dir}')", tools=tools
+ )
+ text3 = result3.text
+
+ return f'Original:
{text1}
After Disable:
{text2}
After Enable:
{text3}'
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='test-resource')
+async def test_resource(query: str = ''):
+ """Test reading a resource."""
+ await mcp_host.start()
+ try:
+ # Pass resources as grounding context if supported
+ resources = await mcp_host.get_active_resources(ai)
+
+ result = await ai.generate(
+ prompt=[
+ {'text': 'analyze this: '},
+ {'resource': {'uri': 'test://static/resource/1'}}
+ ],
+ resources=resources
+ )
+
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-test-resources')
+async def dynamic_test_resources(query: str = ''):
+ """Test reading resources with wildcard."""
+ await mcp_host.start()
+ try:
+ # Simulate wildcard resources if not natively supported
+ # resources=['resource/*']
+
+ all_resources = await mcp_host.get_active_resources(ai)
+ resources = [r for r in all_resources if r.startswith('test://')] # simplified filter
+
+ result = await ai.generate(
+ prompt=[
+ {'text': 'analyze this: '},
+ {'resource': {'uri': 'test://static/resource/1'}}
+ ],
+ resources=resources
+ )
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='dynamic-test-one-resource')
+async def dynamic_test_one_resource(query: str = ''):
+ """Test reading one specific resource."""
+ await mcp_host.start()
+ try:
+ resources = ['test://static/resource/1']
+
+ result = await ai.generate(
+ prompt=[
+ {'text': 'analyze this: '},
+ {'resource': {'uri': 'test://static/resource/1'}}
+ ],
+ resources=resources
+ )
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+@ai.flow(name='update-file')
+async def update_file(query: str = ''):
+ """Update a file using MCP filesystem client."""
+ await mcp_host.start()
+ try:
+ await mcp_host.register_tools(ai)
+ tools = await mcp_host.get_active_tools(ai)
+
+ result = await ai.generate(
+ prompt=f"Improve hello-world.txt (in '{workspace_dir}') by rewriting the text, making it longer, use your imagination.",
+ tools=tools,
+ )
+ return result.text
+ finally:
+ await mcp_host.close()
+
+
+class ControlMcpInput(BaseModel):
+ action: str # 'RECONNECT', 'ENABLE', 'DISABLE', 'DISCONNECT'
+ client_id: Optional[str] = 'git-client'
+
+
+@ai.flow(name='controlMcp')
+async def control_mcp(input: ControlMcpInput):
+ """Control MCP client connections (enable/disable/reconnect)."""
+ client_id = input.client_id
+ action = input.action.upper()
+
+ if action == 'DISABLE':
+ await mcp_host.disable(client_id)
+ elif action == 'DISCONNECT':
+ # Assuming disconnect is equivalent to close for a specific client
+ if client_id in mcp_host.clients:
+ await mcp_host.clients[client_id].close()
+ elif action == 'RECONNECT':
+ if client_id in mcp_host.clients:
+ await mcp_host.clients[client_id].connect()
+ elif action == 'ENABLE':
+ await mcp_host.enable(client_id)
+
+ return f'Action {action} completed for {client_id}'
+
+
+async def main():
+ """Run sample flows."""
+ logger.info('Starting MCP sample application')
+
+ # Test git commits flow
+ logger.info('Testing git-commits flow...')
+ try:
+ result = await git_commits()
+ logger.info('git-commits result', result=result[:200])
+ except Exception as e:
+ logger.error('git-commits failed', error=str(e), exc_info=True)
+
+ # Test get-file flow
+ logger.info('Testing get-file flow...')
+ try:
+ result = await get_file()
+ logger.info('get-file result', result=result[:200])
+ except Exception as e:
+ logger.error('get-file failed', error=str(e), exc_info=True)
+
+
+if __name__ == '__main__':
+ ai.run_main(main())
diff --git a/py/samples/mcp/src/server.py b/py/samples/mcp/src/server.py
new file mode 100644
index 0000000000..546cb3889d
--- /dev/null
+++ b/py/samples/mcp/src/server.py
@@ -0,0 +1,81 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+MCP Server Example
+
+This demonstrates creating an MCP server that exposes Genkit tools, prompts,
+and resources through the Model Context Protocol.
+"""
+
+import asyncio
+
+from pydantic import BaseModel, Field
+
+from genkit.ai import Genkit
+from genkit.plugins.google_genai import GoogleAI
+from genkit.plugins.mcp import McpServerOptions, create_mcp_server
+
+# Initialize Genkit
+ai = Genkit(plugins=[])
+
+
+# Define a tool
+class AddInput(BaseModel):
+ a: int = Field(..., description='First number')
+ b: int = Field(..., description='Second number')
+
+
+@ai.tool(name='add', description='add two numbers together')
+def add(input: AddInput) -> int:
+ return input.a + input.b
+
+
+# Define a prompt
+happy_prompt = ai.define_prompt(
+ name='happy',
+ input_schema={'action': str},
+ prompt="If you're happy and you know it, {{action}}.",
+)
+
+
+# Define resources
+def my_resource_handler(inp):
+ return {'content': [{'text': 'my resource'}]}
+
+
+ai.define_resource(name='my resources', uri='test://static/resource/1', fn=my_resource_handler)
+
+
+def file_resource_handler(inp):
+ uri = inp.uri
+ return {'content': [{'text': f'file contents for {uri}'}]}
+
+
+ai.define_resource(name='file', template='file://{path}', fn=file_resource_handler)
+
+
+async def main():
+ """Start the MCP server."""
+ # Create MCP server
+ server = create_mcp_server(ai, McpServerOptions(name='example_server', version='0.0.1'))
+
+ print('Starting MCP server on stdio...')
+ await server.start()
+
+
+if __name__ == '__main__':
+ asyncio.run(main())
diff --git a/py/uv.lock b/py/uv.lock
index 51bc49f894..6252745462 100644
--- a/py/uv.lock
+++ b/py/uv.lock
@@ -1,5 +1,5 @@
version = 1
-revision = 3
+revision = 2
requires-python = ">=3.10"
resolution-markers = [
"python_full_version >= '3.14'",
@@ -36,6 +36,7 @@ members = [
"google-genai-image",
"google-genai-vertexai-hello",
"google-genai-vertexai-image",
+ "mcp-sample",
"menu",
"model-garden-example",
"multi-server",
@@ -3321,6 +3322,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" },
]
+[[package]]
+name = "mcp-sample"
+version = "0.1.0"
+source = { editable = "samples/mcp" }
+dependencies = [
+ { name = "genkit" },
+ { name = "genkit-plugin-google-genai" },
+ { name = "genkit-plugins-mcp" },
+ { name = "mcp" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "genkit", editable = "packages/genkit" },
+ { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" },
+ { name = "genkit-plugins-mcp", editable = "plugins/mcp" },
+ { name = "mcp" },
+]
+
[[package]]
name = "mdurl"
version = "0.1.2"