From 3c02aefe444d5834efcd3c9c769d4e60e5d785c6 Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Fri, 24 Apr 2026 23:43:16 +0000 Subject: [PATCH 1/6] withSupermemory config object signature (#876) --- packages/tools/src/vercel/index.ts | 30 +-- packages/tools/test/ai-sdk-test.ts | 3 +- .../tools/test/chatapp/app/api/chat/route.ts | 3 +- .../test/chatapp/app/api/stream/route.ts | 7 +- .../test/with-supermemory/integration.test.ts | 213 +++++++----------- .../tools/test/with-supermemory/unit.test.ts | 19 +- 6 files changed, 119 insertions(+), 156 deletions(-) diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index ffd446035..582b46af4 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -15,6 +15,8 @@ import type { PromptTemplate, MemoryPromptData } from "./memory-prompt" const DEFAULT_MEMORY_RETRIEVAL_TIMEOUT_MS = 5000 interface WrapVercelLanguageModelOptions { + /** The container tag/identifier for memory search (e.g., user ID, project ID) */ + containerTag: string /** Optional conversation ID to group messages for contextual memory generation */ conversationId?: string /** Enable detailed logging of memory search and injection */ @@ -73,8 +75,8 @@ interface WrapVercelLanguageModelOptions { * detection of `model.specificationVersion`. * * @param model - The language model to wrap with supermemory capabilities (V2 or V3) - * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) - * @param options - Optional configuration options for the middleware + * @param options - Configuration options for Supermemory integration + * @param options.containerTag - Required. The container tag/identifier for memory search (e.g., user ID, project ID) * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) * @param options.mode - Optional mode for memory search: "profile", "query", or "full" (default: "profile") @@ -90,7 +92,8 @@ interface WrapVercelLanguageModelOptions { * import { withSupermemory } from "@supermemory/tools/ai-sdk" * import { openai } from "@ai-sdk/openai" * - * const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123", { + * const modelWithMemory = withSupermemory(openai("gpt-4"), { + * containerTag: "user-123", * conversationId: "conversation-456", * mode: "full", * addMemory: "always" @@ -107,10 +110,9 @@ interface WrapVercelLanguageModelOptions { */ const wrapVercelLanguageModel = ( model: T, - containerTag: string, - options?: WrapVercelLanguageModelOptions, + options: WrapVercelLanguageModelOptions, ): T => { - const providedApiKey = options?.apiKey ?? process.env.SUPERMEMORY_API_KEY + const providedApiKey = options.apiKey ?? process.env.SUPERMEMORY_API_KEY if (!providedApiKey) { throw new Error( @@ -119,18 +121,18 @@ const wrapVercelLanguageModel = ( } const ctx = createSupermemoryContext({ - containerTag, + containerTag: options.containerTag, apiKey: providedApiKey, - conversationId: options?.conversationId, - verbose: options?.verbose ?? false, - mode: options?.mode ?? "profile", - addMemory: options?.addMemory ?? "never", - baseUrl: options?.baseUrl, - promptTemplate: options?.promptTemplate, + conversationId: options.conversationId, + verbose: options.verbose ?? false, + mode: options.mode ?? "profile", + addMemory: options.addMemory ?? "never", + baseUrl: options.baseUrl, + promptTemplate: options.promptTemplate, memoryRetrievalTimeoutMs: DEFAULT_MEMORY_RETRIEVAL_TIMEOUT_MS, }) - const skipMemoryOnError = options?.skipMemoryOnError ?? true + const skipMemoryOnError = options.skipMemoryOnError ?? true // Proxy keeps prototype/getter fields (e.g. provider, modelId) that `{ ...model }` drops. return new Proxy(model, { diff --git a/packages/tools/test/ai-sdk-test.ts b/packages/tools/test/ai-sdk-test.ts index 3d9f13ea6..19bb272da 100644 --- a/packages/tools/test/ai-sdk-test.ts +++ b/packages/tools/test/ai-sdk-test.ts @@ -2,7 +2,8 @@ import { generateText } from "ai" import { withSupermemory } from "../src/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", { +const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user_id_life", verbose: true, mode: "query", // options are profile, query, full (default is profile) addMemory: "always", // options are always, never (default is never) diff --git a/packages/tools/test/chatapp/app/api/chat/route.ts b/packages/tools/test/chatapp/app/api/chat/route.ts index c8f7a8bea..ded727454 100644 --- a/packages/tools/test/chatapp/app/api/chat/route.ts +++ b/packages/tools/test/chatapp/app/api/chat/route.ts @@ -1,7 +1,8 @@ import { gateway, streamText, type ModelMessage } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" -const model = withSupermemory(gateway("google/gemini-2.5-flash"), "user-1", { +const model = withSupermemory(gateway("google/gemini-2.5-flash"), { + containerTag: "user-1", apiKey: process.env.SUPERMEMORY_API_KEY ?? "", mode: "full", addMemory: "always", diff --git a/packages/tools/test/chatapp/app/api/stream/route.ts b/packages/tools/test/chatapp/app/api/stream/route.ts index 5889a5496..a86c39419 100644 --- a/packages/tools/test/chatapp/app/api/stream/route.ts +++ b/packages/tools/test/chatapp/app/api/stream/route.ts @@ -8,6 +8,7 @@ const SUPERMEMORY_USER_ID = "user-1" const gatewayModel = gateway("google/gemini-2.5-flash") const supermemoryOptions = { + containerTag: SUPERMEMORY_USER_ID, apiKey: process.env.SUPERMEMORY_API_KEY ?? "", mode: "full" as const, addMemory: "always" as const, @@ -31,11 +32,7 @@ export async function POST(req: Request) { }) : gatewayModel - const model = withSupermemory( - innerModel, - SUPERMEMORY_USER_ID, - supermemoryOptions, - ) + const model = withSupermemory(innerModel, supermemoryOptions) const result = streamText({ model, diff --git a/packages/tools/test/with-supermemory/integration.test.ts b/packages/tools/test/with-supermemory/integration.test.ts index 74263c19b..13ea59c67 100644 --- a/packages/tools/test/with-supermemory/integration.test.ts +++ b/packages/tools/test/with-supermemory/integration.test.ts @@ -96,14 +96,11 @@ describe.skipIf(!shouldRunIntegration)( const { model, getCapturedGenerateParams } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + }) await wrapped.doGenerate({ prompt: [ @@ -127,16 +124,13 @@ describe.skipIf(!shouldRunIntegration)( const conversationId = `test-generate-${Date.now()}` - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - addMemory: "always", - conversationId, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + addMemory: "always", + conversationId, + }) await wrapped.doGenerate({ prompt: [ @@ -172,15 +166,12 @@ describe.skipIf(!shouldRunIntegration)( const conversationId = `test-conversation-${Date.now()}` - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - conversationId, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + conversationId, + }) await wrapped.doGenerate({ prompt: [ @@ -203,14 +194,11 @@ describe.skipIf(!shouldRunIntegration)( it("should fetch memories and stream response", async () => { const { model, getCapturedStreamParams } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + }) const { stream } = await wrapped.doStream({ prompt: [ @@ -242,16 +230,13 @@ describe.skipIf(!shouldRunIntegration)( const conversationId = `test-stream-${Date.now()}` - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - addMemory: "always", - conversationId, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + addMemory: "always", + conversationId, + }) const { stream } = await wrapped.doStream({ prompt: [ @@ -286,14 +271,11 @@ describe.skipIf(!shouldRunIntegration)( it("should handle text-delta chunks correctly", async () => { const { model } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + }) const { stream } = await wrapped.doStream({ prompt: [ @@ -327,14 +309,11 @@ describe.skipIf(!shouldRunIntegration)( const { model } = createIntegrationMockModel() const fetchSpy = vi.spyOn(globalThis, "fetch") - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + }) await wrapped.doGenerate({ prompt: [ @@ -368,14 +347,11 @@ describe.skipIf(!shouldRunIntegration)( const { model } = createIntegrationMockModel() const fetchSpy = vi.spyOn(globalThis, "fetch") - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "query", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "query", + }) await wrapped.doGenerate({ prompt: [ @@ -409,14 +385,11 @@ describe.skipIf(!shouldRunIntegration)( const { model } = createIntegrationMockModel() const fetchSpy = vi.spyOn(globalThis, "fetch") - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "full", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "full", + }) await wrapped.doGenerate({ prompt: [ @@ -456,15 +429,12 @@ describe.skipIf(!shouldRunIntegration)( generalSearchMemories: string }) => `${data.userMemories}` - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - promptTemplate: customTemplate, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + promptTemplate: customTemplate, + }) await wrapped.doGenerate({ prompt: [ @@ -485,15 +455,12 @@ describe.skipIf(!shouldRunIntegration)( const { model, getCapturedGenerateParams } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - verbose: true, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + verbose: true, + }) await wrapped.doGenerate({ prompt: [ @@ -514,15 +481,12 @@ describe.skipIf(!shouldRunIntegration)( const fetchSpy = vi.spyOn(globalThis, "fetch") // Use the configured base URL (or default) - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - baseUrl: INTEGRATION_CONFIG.baseUrl, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + baseUrl: INTEGRATION_CONFIG.baseUrl, + }) await wrapped.doGenerate({ prompt: [ @@ -556,14 +520,11 @@ describe.skipIf(!shouldRunIntegration)( new Error("Model error"), ) - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: INTEGRATION_CONFIG.apiKey, + mode: "profile", + }) await expect( wrapped.doGenerate({ @@ -581,14 +542,11 @@ describe.skipIf(!shouldRunIntegration)( const { model, getCapturedGenerateParams } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - mode: "profile", - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: "invalid-api-key-12345", + mode: "profile", + }) await wrapped.doGenerate({ prompt: [ @@ -606,15 +564,12 @@ describe.skipIf(!shouldRunIntegration)( it("should reject on invalid API key when skipMemoryOnError is false", async () => { const { model } = createIntegrationMockModel() - const wrapped = withSupermemory( - model, - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - mode: "profile", - skipMemoryOnError: false, - }, - ) + const wrapped = withSupermemory(model, { + containerTag: INTEGRATION_CONFIG.containerTag, + apiKey: "invalid-api-key-12345", + mode: "profile", + skipMemoryOnError: false, + }) await expect( wrapped.doGenerate({ diff --git a/packages/tools/test/with-supermemory/unit.test.ts b/packages/tools/test/with-supermemory/unit.test.ts index dcefc54e8..8461fb97b 100644 --- a/packages/tools/test/with-supermemory/unit.test.ts +++ b/packages/tools/test/with-supermemory/unit.test.ts @@ -73,7 +73,7 @@ describe("Unit: withSupermemory", () => { const mockModel = createMockLanguageModel() expect(() => { - withSupermemory(mockModel, TEST_CONFIG.containerTag) + withSupermemory(mockModel, { containerTag: TEST_CONFIG.containerTag }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) @@ -81,7 +81,9 @@ describe("Unit: withSupermemory", () => { process.env.SUPERMEMORY_API_KEY = "test-key" const mockModel = createMockLanguageModel() - const wrappedModel = withSupermemory(mockModel, TEST_CONFIG.containerTag) + const wrappedModel = withSupermemory(mockModel, { + containerTag: TEST_CONFIG.containerTag, + }) expect(wrappedModel).toBeDefined() expect(wrappedModel.specificationVersion).toBe("v2") @@ -99,7 +101,9 @@ describe("Unit: withSupermemory", () => { doStream: vi.fn(), } const inner = Object.create(proto) as LanguageModelV2 - const wrappedModel = withSupermemory(inner, TEST_CONFIG.containerTag) + const wrappedModel = withSupermemory(inner, { + containerTag: TEST_CONFIG.containerTag, + }) expect(wrappedModel.specificationVersion).toBe("v2") expect(wrappedModel.provider).toBe("gateway") @@ -414,7 +418,8 @@ describe("Unit: withSupermemory", () => { warnings: [], }) - const wrapped = withSupermemory(inner, TEST_CONFIG.containerTag, { + const wrapped = withSupermemory(inner, { + containerTag: TEST_CONFIG.containerTag, apiKey: "k", }) @@ -436,7 +441,8 @@ describe("Unit: withSupermemory", () => { }) const inner = createMockLanguageModel() - const wrapped = withSupermemory(inner, TEST_CONFIG.containerTag, { + const wrapped = withSupermemory(inner, { + containerTag: TEST_CONFIG.containerTag, apiKey: "k", skipMemoryOnError: false, }) @@ -475,7 +481,8 @@ describe("Unit: withSupermemory", () => { warnings: [], }) - const wrapped = withSupermemory(inner, TEST_CONFIG.containerTag, { + const wrapped = withSupermemory(inner, { + containerTag: TEST_CONFIG.containerTag, apiKey: "k", }) From c76d27f284c1885a736c4f9ad2b3f60da7ed807d Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Fri, 24 Apr 2026 23:43:16 +0000 Subject: [PATCH 2/6] custom id implementation (#877) --- README.md | 2 +- apps/docs/ai-sdk/overview.mdx | 11 ++- apps/docs/ai-sdk/user-profiles.mdx | 74 ++++++++++----- apps/docs/install.md | 5 +- apps/docs/integrations/ai-sdk.mdx | 35 ++++--- apps/docs/user-profiles.mdx | 5 +- apps/docs/user-profiles/examples.mdx | 7 +- apps/docs/vibe-coding.mdx | 5 +- .../docs-test/tests/integrations/ai-sdk.ts | 39 +++++--- packages/tools/README.md | 85 +++++++++-------- packages/tools/src/vercel/index.ts | 24 +++-- packages/tools/src/vercel/middleware.ts | 95 +++++-------------- packages/tools/test/ai-sdk-test.ts | 1 + .../tools/test/chatapp/app/api/chat/route.ts | 1 + .../test/chatapp/app/api/stream/route.ts | 1 + .../test/with-supermemory/integration.test.ts | 26 +++-- .../tools/test/with-supermemory/unit.test.ts | 40 +++++++- 17 files changed, 267 insertions(+), 189 deletions(-) diff --git a/README.md b/README.md index 9701d0e23..a134afad1 100644 --- a/README.md +++ b/README.md @@ -232,7 +232,7 @@ Drop-in wrappers for every major AI framework: ```typescript // Vercel AI SDK import { withSupermemory } from "@supermemory/tools/ai-sdk"; -const model = withSupermemory(openai("gpt-4o"), "user_123"); +const model = withSupermemory(openai("gpt-4o"), { containerTag: "user_123", customId: "conv-1" }); // Mastra import { withSupermemory } from "@supermemory/tools/mastra"; diff --git a/apps/docs/ai-sdk/overview.mdx b/apps/docs/ai-sdk/overview.mdx index 0c9a48f49..168be519a 100644 --- a/apps/docs/ai-sdk/overview.mdx +++ b/apps/docs/ai-sdk/overview.mdx @@ -26,7 +26,10 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" // Wrap your model with Supermemory - profiles are automatically injected -const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123") +const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conversation-456", +}) const result = await generateText({ model: modelWithMemory, @@ -39,8 +42,10 @@ const result = await generateText({ **Memory saving is disabled by default.** The middleware only retrieves existing memories. To automatically save new memories from conversations, enable it explicitly: ```typescript - const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123", { - addMemory: "always" + const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conversation-456", + addMemory: "always", }) ``` diff --git a/apps/docs/ai-sdk/user-profiles.mdx b/apps/docs/ai-sdk/user-profiles.mdx index df8c5430f..6d85772e3 100644 --- a/apps/docs/ai-sdk/user-profiles.mdx +++ b/apps/docs/ai-sdk/user-profiles.mdx @@ -20,10 +20,10 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" // Wrap any model with Supermemory middleware -const modelWithMemory = withSupermemory( - openai("gpt-4"), // Your base model - "user-123" // Container tag (user ID) -) +const modelWithMemory = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", +}) // Use normally - profiles are automatically injected! const result = await generateText({ @@ -49,8 +49,10 @@ All of this happens transparently - you write code as if using a normal model, b **Memory saving is disabled by default.** The middleware only retrieves existing memories. To automatically save new memories from conversations, set `addMemory: "always"`: ```typescript - const model = withSupermemory(openai("gpt-5"), "user-123", { - addMemory: "always" + const model = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conversation-456", + addMemory: "always", }) ``` @@ -65,11 +67,16 @@ Retrieves the user's complete profile without query-specific search. Best for ge ```typescript // Default behavior - profile mode -const model = withSupermemory(openai("gpt-4"), "user-123") +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", +}) // Or explicitly specify -const model = withSupermemory(openai("gpt-4"), "user-123", { - mode: "profile" +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + mode: "profile", }) const result = await generateText({ @@ -84,8 +91,10 @@ const result = await generateText({ Searches memories based on the user's specific message. Best for finding relevant information. ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { - mode: "query" +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + mode: "query", }) const result = await generateText({ @@ -103,8 +112,10 @@ const result = await generateText({ Combines profile AND query-based search for comprehensive context. Best for complex interactions. ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { - mode: "full" +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + mode: "full", }) const result = await generateText({ @@ -137,9 +148,11 @@ ${data.generalSearchMemories} `.trim() -const model = withSupermemory(openai("gpt-4"), "user-123", { +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", mode: "full", - promptTemplate: customPrompt + promptTemplate: customPrompt, }) const result = await generateText({ @@ -174,9 +187,11 @@ const claudePrompt = (data: MemoryPromptData) => ` Use the above context to provide personalized responses. `.trim() -const model = withSupermemory(anthropic("claude-3-sonnet"), "user-123", { +const model = withSupermemory(anthropic("claude-3-sonnet"), { + containerTag: "user-123", + customId: "conv-1", mode: "full", - promptTemplate: claudePrompt + promptTemplate: claudePrompt, }) ``` @@ -199,9 +214,11 @@ ${relevant.map((r) => `- ${r.memory}`).join("\n")} `.trim() } -const model = withSupermemory(openai("gpt-4"), "user-123", { +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", mode: "full", - promptTemplate: selectivePrompt + promptTemplate: selectivePrompt, }) ``` @@ -222,8 +239,10 @@ ${data.generalSearchMemories} Use this information to provide personalized and contextually relevant responses. `.trim() -const model = withSupermemory(openai("gpt-4"), "user-123", { - promptTemplate: brandedPrompt +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + promptTemplate: brandedPrompt, }) ``` @@ -241,8 +260,10 @@ const defaultPrompt = (data: MemoryPromptData) => Enable detailed logging to see exactly what's happening: ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { - verbose: true // Enable detailed logging +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + verbose: true, // Enable detailed logging }) const result = await generateText({ @@ -266,8 +287,11 @@ The AI SDK middleware abstracts away the complexity of manual profile management ```typescript - // One line setup - const model = withSupermemory(openai("gpt-4"), "user-123") + // Simple setup + const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + }) // Use normally const result = await generateText({ diff --git a/apps/docs/install.md b/apps/docs/install.md index 665ccc16d..78830cc5d 100644 --- a/apps/docs/install.md +++ b/apps/docs/install.md @@ -106,7 +106,10 @@ const result = await streamText({ // Option 2: Profile middleware (automatic context injection) import { withSupermemory } from '@supermemory/tools/ai-sdk' -const modelWithMemory = withSupermemory(anthropic('claude-3-5-sonnet-20241022'), userId) +const modelWithMemory = withSupermemory(anthropic('claude-3-5-sonnet-20241022'), { + containerTag: userId, + customId: 'conversation-1', +}) const result = await generateText({ model: modelWithMemory, diff --git a/apps/docs/integrations/ai-sdk.mdx b/apps/docs/integrations/ai-sdk.mdx index 301420408..c71a1ec19 100644 --- a/apps/docs/integrations/ai-sdk.mdx +++ b/apps/docs/integrations/ai-sdk.mdx @@ -35,7 +35,10 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123") +const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conversation-456", +}) const result = await generateText({ model: modelWithMemory, @@ -47,8 +50,10 @@ const result = await generateText({ **Memory saving is disabled by default.** The middleware only retrieves existing memories. To automatically save new memories: ```typescript - const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123", { - addMemory: "always" + const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conversation-456", + addMemory: "always", }) ``` @@ -58,19 +63,19 @@ const result = await generateText({ **Profile Mode (Default)** - Retrieves the user's complete profile: ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "profile" }) +const model = withSupermemory(openai("gpt-4"), { containerTag: "user-123", customId: "conv-1", mode: "profile" }) ``` **Query Mode** - Searches memories based on the user's message: ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "query" }) +const model = withSupermemory(openai("gpt-4"), { containerTag: "user-123", customId: "conv-1", mode: "query" }) ``` **Full Mode** - Combines profile AND query-based search: ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "full" }) +const model = withSupermemory(openai("gpt-4"), { containerTag: "user-123", customId: "conv-1", mode: "full" }) ``` ### Custom Prompt Templates @@ -91,17 +96,21 @@ const claudePrompt = (data: MemoryPromptData) => ` `.trim() -const model = withSupermemory(anthropic("claude-3-sonnet"), "user-123", { +const model = withSupermemory(anthropic("claude-3-sonnet"), { + containerTag: "user-123", + customId: "conv-1", mode: "full", - promptTemplate: claudePrompt + promptTemplate: claudePrompt, }) ``` ### Verbose Logging ```typescript -const model = withSupermemory(openai("gpt-4"), "user-123", { - verbose: true +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + verbose: true, }) // Console output shows memory retrieval details ``` @@ -113,8 +122,10 @@ If the Supermemory API returns an error, is unreachable, or retrieval hits the i To **fail the call** when memory retrieval fails instead, set `skipMemoryOnError: false`: ```typescript -const model = withSupermemory(openai("gpt-5"), "user-123", { - skipMemoryOnError: false +const model = withSupermemory(openai("gpt-5"), { + containerTag: "user-123", + customId: "conv-1", + skipMemoryOnError: false, }) ``` diff --git a/apps/docs/user-profiles.mdx b/apps/docs/user-profiles.mdx index ec9ff57d2..e2b3ae461 100644 --- a/apps/docs/user-profiles.mdx +++ b/apps/docs/user-profiles.mdx @@ -228,7 +228,10 @@ ${result.searchResults?.results.map(m => m.memory).join('\n') || 'None'} import { openai } from "@ai-sdk/openai" // Profiles automatically injected - const model = withSupermemory(openai("gpt-4"), "user-123") + const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + }) const result = await generateText({ model, diff --git a/apps/docs/user-profiles/examples.mdx b/apps/docs/user-profiles/examples.mdx index aa3b796b9..bd64b9da7 100644 --- a/apps/docs/user-profiles/examples.mdx +++ b/apps/docs/user-profiles/examples.mdx @@ -352,8 +352,11 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -// One line setup - profiles automatically injected -const model = withSupermemory(openai("gpt-4"), "user-123") +// Simple setup - profiles automatically injected +const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", +}) const result = await generateText({ model, diff --git a/apps/docs/vibe-coding.mdx b/apps/docs/vibe-coding.mdx index bb5cd0e98..def08fdf3 100644 --- a/apps/docs/vibe-coding.mdx +++ b/apps/docs/vibe-coding.mdx @@ -143,7 +143,10 @@ const result = await streamText({ // Option 2: Profile middleware (automatic context injection) import { withSupermemory } from '@supermemory/tools/ai-sdk' -const modelWithMemory = withSupermemory(anthropic('claude-3-5-sonnet-20241022'), userId) +const modelWithMemory = withSupermemory(anthropic('claude-3-5-sonnet-20241022'), { + containerTag: userId, + customId: 'conversation-1', +}) const result = await generateText({ model: modelWithMemory, diff --git a/packages/docs-test/tests/integrations/ai-sdk.ts b/packages/docs-test/tests/integrations/ai-sdk.ts index f6d4f1d2b..c431a2281 100644 --- a/packages/docs-test/tests/integrations/ai-sdk.ts +++ b/packages/docs-test/tests/integrations/ai-sdk.ts @@ -13,17 +13,24 @@ async function testMiddleware() { console.log("=== Middleware ===") // Basic wrapper - const model = withSupermemory(openai("gpt-4"), "user-123") + const model = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", + }) console.log("✓ withSupermemory basic") // With addMemory option - const modelWithAdd = withSupermemory(openai("gpt-4"), "user-123", { + const modelWithAdd = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", addMemory: "always", }) console.log("✓ withSupermemory with addMemory") // With verbose logging - const modelVerbose = withSupermemory(openai("gpt-4"), "user-123", { + const modelVerbose = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", verbose: true, }) console.log("✓ withSupermemory with verbose") @@ -32,17 +39,23 @@ async function testMiddleware() { async function testSearchModes() { console.log("\n=== Search Modes ===") - const profileModel = withSupermemory(openai("gpt-4"), "user-123", { + const profileModel = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", mode: "profile", }) console.log("✓ mode: profile") - const queryModel = withSupermemory(openai("gpt-4"), "user-123", { + const queryModel = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", mode: "query", }) console.log("✓ mode: query") - const fullModel = withSupermemory(openai("gpt-4"), "user-123", { + const fullModel = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conv-1", mode: "full", }) console.log("✓ mode: full") @@ -61,14 +74,12 @@ async function testCustomPrompt() { `.trim() - const model = withSupermemory( - anthropic("claude-3-sonnet-20240229"), - "user-123", - { - mode: "full", - promptTemplate: claudePrompt, - }, - ) + const model = withSupermemory(anthropic("claude-3-sonnet-20240229"), { + containerTag: "user-123", + customId: "conv-1", + mode: "full", + promptTemplate: claudePrompt, + }) console.log("✓ Custom prompt template") } diff --git a/packages/tools/README.md b/packages/tools/README.md index 839a5904b..cf09a4c4e 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -66,27 +66,9 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life") - -const result = await generateText({ - model: modelWithMemory, - messages: [{ role: "user", content: "where do i live?" }], -}) - -console.log(result.text) -``` - -#### Conversation Grouping - -Use the `conversationId` option to group messages into a single document for contextual memory generation: - -```typescript -import { generateText } from "ai" -import { withSupermemory } from "@supermemory/tools/ai-sdk" -import { openai } from "@ai-sdk/openai" - -const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", { - conversationId: "conversation-456" +const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user_id_life", + customId: "conversation-456", }) const result = await generateText({ @@ -106,8 +88,10 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", { - verbose: true +const modelWithMemory = withSupermemory(openai("gpt-5"), { + containerTag: "user_id_life", + customId: "conversation-456", + verbose: true, }) const result = await generateText({ @@ -139,11 +123,16 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" // Uses profile mode by default - gets all user profile memories -const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123") +const modelWithMemory = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", +}) // Explicitly specify profile mode -const modelWithProfile = withSupermemory(openai("gpt-4"), "user-123", { - mode: "profile" +const modelWithProfile = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", + mode: "profile", }) const result = await generateText({ @@ -158,8 +147,10 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithQuery = withSupermemory(openai("gpt-4"), "user-123", { - mode: "query" +const modelWithQuery = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", + mode: "query", }) const result = await generateText({ @@ -174,8 +165,10 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithFull = withSupermemory(openai("gpt-4"), "user-123", { - mode: "full" +const modelWithFull = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", + mode: "full", }) const result = await generateText({ @@ -194,8 +187,10 @@ import { generateText } from "ai" import { withSupermemory } from "@supermemory/tools/ai-sdk" import { openai } from "@ai-sdk/openai" -const modelWithAutoSave = withSupermemory(openai("gpt-4"), "user-123", { - addMemory: "always" +const modelWithAutoSave = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", + addMemory: "always", }) const result = await generateText({ @@ -205,17 +200,23 @@ const result = await generateText({ // This message will be automatically saved as a memory ``` -**Never Save Memories (Default)** - Only retrieves memories without storing new ones: +**Never Save Memories** - Only retrieves memories without storing new ones: ```typescript -const modelWithNoSave = withSupermemory(openai("gpt-4"), "user-123") +const modelWithNoSave = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", + addMemory: "never", // explicit since default is now "always" +}) ``` **Combined Options** - Use verbose logging with specific modes and memory storage: ```typescript -const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", { +const modelWithOptions = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", mode: "profile", addMemory: "always", - verbose: true + verbose: true, }) ``` @@ -239,7 +240,9 @@ ${data.generalSearchMemories} `.trim() -const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), "user-123", { +const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), { + containerTag: "user-123", + customId: "conversation-456", mode: "full", promptTemplate: customPrompt, }) @@ -646,11 +649,12 @@ Without `strict: true`, optional fields like `includeFullDocs` and `limit` won't ### withSupermemory Middleware Options -The `withSupermemory` middleware accepts additional configuration options: +The `withSupermemory` middleware accepts a configuration object as the second argument: ```typescript interface WithSupermemoryOptions { - conversationId?: string + containerTag: string + customId: string verbose?: boolean mode?: "profile" | "query" | "full" addMemory?: "always" | "never" @@ -662,7 +666,8 @@ interface WithSupermemoryOptions { } ``` -- **conversationId**: Optional conversation ID to group messages into a single document for contextual memory generation +- **containerTag**: Required. The container tag/identifier for memory search (e.g., user ID, project ID) +- **customId**: Required. Custom ID to group messages into a single document for contextual memory generation - **verbose**: Enable detailed logging of memory search and injection process (default: false) - **mode**: Memory search mode - "profile" (default), "query", or "full" - **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never") diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index 582b46af4..3dbdcccd3 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -17,8 +17,8 @@ const DEFAULT_MEMORY_RETRIEVAL_TIMEOUT_MS = 5000 interface WrapVercelLanguageModelOptions { /** The container tag/identifier for memory search (e.g., user ID, project ID) */ containerTag: string - /** Optional conversation ID to group messages for contextual memory generation */ - conversationId?: string + /** Custom ID to group messages into a single document. Required. */ + customId: string /** Enable detailed logging of memory search and injection */ verbose?: boolean /** @@ -77,10 +77,10 @@ interface WrapVercelLanguageModelOptions { * @param model - The language model to wrap with supermemory capabilities (V2 or V3) * @param options - Configuration options for Supermemory integration * @param options.containerTag - Required. The container tag/identifier for memory search (e.g., user ID, project ID) - * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation + * @param options.customId - Required. Custom ID to group messages into a single document for contextual memory generation * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) * @param options.mode - Optional mode for memory search: "profile", "query", or "full" (default: "profile") - * @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never") + * @param options.addMemory - Optional mode for memory search: "always", "never" (default: "always") * @param options.apiKey - Optional Supermemory API key to use instead of the environment variable * @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai") * @param options.skipMemoryOnError - When memory retrieval fails or times out: `true` (default) continues without injected memories; `false` throws @@ -94,7 +94,7 @@ interface WrapVercelLanguageModelOptions { * * const modelWithMemory = withSupermemory(openai("gpt-4"), { * containerTag: "user-123", - * conversationId: "conversation-456", + * customId: "conversation-456", * mode: "full", * addMemory: "always" * }) @@ -120,13 +120,19 @@ const wrapVercelLanguageModel = ( ) } + if (!options.customId) { + throw new Error( + "customId is required — provide a non-empty string to group messages into a single document", + ) + } + const ctx = createSupermemoryContext({ containerTag: options.containerTag, apiKey: providedApiKey, - conversationId: options.conversationId, + customId: options.customId, verbose: options.verbose ?? false, mode: options.mode ?? "profile", - addMemory: options.addMemory ?? "never", + addMemory: options.addMemory ?? "always", baseUrl: options.baseUrl, promptTemplate: options.promptTemplate, memoryRetrievalTimeoutMs: DEFAULT_MEMORY_RETRIEVAL_TIMEOUT_MS, @@ -181,7 +187,7 @@ const wrapVercelLanguageModel = ( saveMemoryAfterResponse( ctx.client, ctx.containerTag, - ctx.conversationId, + ctx.customId, assistantResponseText, params, ctx.logger, @@ -256,7 +262,7 @@ const wrapVercelLanguageModel = ( saveMemoryAfterResponse( ctx.client, ctx.containerTag, - ctx.conversationId, + ctx.customId, generatedText, params, ctx.logger, diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 38d506885..144bd7df7 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -12,32 +12,9 @@ import { type PromptTemplate, type MemoryMode, } from "../shared" -import { - type LanguageModelCallOptions, - getLastUserMessage, - filterOutSupermemories, -} from "./util" +import { type LanguageModelCallOptions, getLastUserMessage } from "./util" import { extractQueryText, injectMemoriesIntoParams } from "./memory-prompt" -const getConversationContent = (params: LanguageModelCallOptions) => { - return params.prompt - .filter((msg) => msg.role !== "system" && msg.role !== "tool") - .map((msg) => { - const role = msg.role === "user" ? "User" : "Assistant" - - if (typeof msg.content === "string") { - return `${role}: ${filterOutSupermemories(msg.content)}` - } - - const content = msg.content - .filter((c) => c.type === "text") - .map((c) => (c.type === "text" ? filterOutSupermemories(c.text) : "")) - .join(" ") - return `${role}: ${content}` - }) - .join("\n\n") -} - const convertToConversationMessages = ( params: LanguageModelCallOptions, assistantResponseText: string, @@ -99,58 +76,34 @@ const convertToConversationMessages = ( } export const saveMemoryAfterResponse = async ( - client: Supermemory, + _client: Supermemory, containerTag: string, - conversationId: string | undefined, + customId: string, assistantResponseText: string, params: LanguageModelCallOptions, logger: Logger, apiKey: string, baseUrl: string, ): Promise => { - const customId = conversationId ? `conversation:${conversationId}` : undefined - try { - if (customId && conversationId) { - const conversationMessages = convertToConversationMessages( - params, - assistantResponseText, - ) - - const response = await addConversation({ - conversationId, - messages: conversationMessages, - containerTags: [containerTag], - apiKey, - baseUrl, - }) - - logger.info("Conversation saved successfully via /v4/conversations", { - containerTag, - conversationId, - messageCount: conversationMessages.length, - responseId: response.id, - }) - return - } - - const userMessage = getLastUserMessage(params) - const content = conversationId - ? `${getConversationContent(params)} \n\n Assistant: ${assistantResponseText}` - : `User: ${userMessage} \n\n Assistant: ${assistantResponseText}` - - const response = await client.add({ - content, + const conversationMessages = convertToConversationMessages( + params, + assistantResponseText, + ) + + const response = await addConversation({ + conversationId: customId, + messages: conversationMessages, containerTags: [containerTag], - customId, + apiKey, + baseUrl, }) - logger.info("Memory saved successfully via /v3/documents", { + logger.info("Conversation saved successfully via /v4/conversations", { containerTag, customId, - content, - contentLength: content.length, - memoryId: response.id, + messageCount: conversationMessages.length, + responseId: response.id, }) } catch (error) { logger.error("Error saving memory", { @@ -167,8 +120,8 @@ interface SupermemoryMiddlewareOptions { containerTag: string /** Supermemory API key */ apiKey: string - /** Optional conversation ID to group messages for contextual memory generation */ - conversationId?: string + /** Custom ID to group messages into a single document. Required. */ + customId: string /** Enable detailed logging of memory search and injection */ verbose?: boolean /** @@ -196,7 +149,7 @@ interface SupermemoryMiddlewareContext { client: Supermemory logger: Logger containerTag: string - conversationId?: string + customId: string mode: MemoryMode addMemory: "always" | "never" normalizedBaseUrl: string @@ -216,10 +169,10 @@ export const createSupermemoryContext = ( const { containerTag, apiKey, - conversationId, + customId, verbose = false, mode = "profile", - addMemory = "never", + addMemory = "always", baseUrl, promptTemplate, memoryRetrievalTimeoutMs, @@ -239,7 +192,7 @@ export const createSupermemoryContext = ( client, logger, containerTag, - conversationId, + customId, mode, addMemory, normalizedBaseUrl, @@ -262,7 +215,7 @@ const makeTurnKey = ( ): string => { return MemoryCache.makeTurnKey( ctx.containerTag, - ctx.conversationId, + ctx.customId, ctx.mode, userMessage, ) @@ -303,7 +256,7 @@ export const transformParamsWithMemory = async ( ctx.logger.info("Starting memory search", { containerTag: ctx.containerTag, - conversationId: ctx.conversationId, + customId: ctx.customId, mode: ctx.mode, isNewTurn, cacheHit: false, diff --git a/packages/tools/test/ai-sdk-test.ts b/packages/tools/test/ai-sdk-test.ts index 19bb272da..97d30d308 100644 --- a/packages/tools/test/ai-sdk-test.ts +++ b/packages/tools/test/ai-sdk-test.ts @@ -4,6 +4,7 @@ import { openai } from "@ai-sdk/openai" const modelWithMemory = withSupermemory(openai("gpt-5"), { containerTag: "user_id_life", + customId: "conversation-123", verbose: true, mode: "query", // options are profile, query, full (default is profile) addMemory: "always", // options are always, never (default is never) diff --git a/packages/tools/test/chatapp/app/api/chat/route.ts b/packages/tools/test/chatapp/app/api/chat/route.ts index ded727454..22c3b446f 100644 --- a/packages/tools/test/chatapp/app/api/chat/route.ts +++ b/packages/tools/test/chatapp/app/api/chat/route.ts @@ -3,6 +3,7 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk" const model = withSupermemory(gateway("google/gemini-2.5-flash"), { containerTag: "user-1", + customId: "chat-session", apiKey: process.env.SUPERMEMORY_API_KEY ?? "", mode: "full", addMemory: "always", diff --git a/packages/tools/test/chatapp/app/api/stream/route.ts b/packages/tools/test/chatapp/app/api/stream/route.ts index a86c39419..fb784f917 100644 --- a/packages/tools/test/chatapp/app/api/stream/route.ts +++ b/packages/tools/test/chatapp/app/api/stream/route.ts @@ -9,6 +9,7 @@ const gatewayModel = gateway("google/gemini-2.5-flash") const supermemoryOptions = { containerTag: SUPERMEMORY_USER_ID, + customId: "stream-session", apiKey: process.env.SUPERMEMORY_API_KEY ?? "", mode: "full" as const, addMemory: "always" as const, diff --git a/packages/tools/test/with-supermemory/integration.test.ts b/packages/tools/test/with-supermemory/integration.test.ts index 13ea59c67..d2ffa84dc 100644 --- a/packages/tools/test/with-supermemory/integration.test.ts +++ b/packages/tools/test/with-supermemory/integration.test.ts @@ -98,6 +98,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-generate", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", }) @@ -122,14 +123,14 @@ describe.skipIf(!shouldRunIntegration)( const { model } = createIntegrationMockModel() const fetchSpy = vi.spyOn(globalThis, "fetch") - const conversationId = `test-generate-${Date.now()}` + const customId = `test-generate-${Date.now()}` const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId, apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", addMemory: "always", - conversationId, }) await wrapped.doGenerate({ @@ -160,17 +161,17 @@ describe.skipIf(!shouldRunIntegration)( fetchSpy.mockRestore() }) - it("should work with conversationId for grouped memories", async () => { + it("should work with customId for grouped memories", async () => { const { model, getCapturedGenerateParams } = createIntegrationMockModel() - const conversationId = `test-conversation-${Date.now()}` + const customId = `test-conversation-${Date.now()}` const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId, apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", - conversationId, }) await wrapped.doGenerate({ @@ -196,6 +197,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-stream", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", }) @@ -228,14 +230,14 @@ describe.skipIf(!shouldRunIntegration)( const { model } = createIntegrationMockModel() const fetchSpy = vi.spyOn(globalThis, "fetch") - const conversationId = `test-stream-${Date.now()}` + const customId = `test-stream-${Date.now()}` const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId, apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", addMemory: "always", - conversationId, }) const { stream } = await wrapped.doStream({ @@ -273,6 +275,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-chunks", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", }) @@ -311,6 +314,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-profile", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", }) @@ -349,6 +353,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-query", apiKey: INTEGRATION_CONFIG.apiKey, mode: "query", }) @@ -387,6 +392,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-full", apiKey: INTEGRATION_CONFIG.apiKey, mode: "full", }) @@ -431,6 +437,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-template", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", promptTemplate: customTemplate, @@ -457,6 +464,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-verbose", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", verbose: true, @@ -483,6 +491,7 @@ describe.skipIf(!shouldRunIntegration)( // Use the configured base URL (or default) const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-baseurl", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", baseUrl: INTEGRATION_CONFIG.baseUrl, @@ -522,6 +531,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-error", apiKey: INTEGRATION_CONFIG.apiKey, mode: "profile", }) @@ -544,6 +554,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-invalid-key", apiKey: "invalid-api-key-12345", mode: "profile", }) @@ -566,6 +577,7 @@ describe.skipIf(!shouldRunIntegration)( const wrapped = withSupermemory(model, { containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-invalid-strict", apiKey: "invalid-api-key-12345", mode: "profile", skipMemoryOnError: false, diff --git a/packages/tools/test/with-supermemory/unit.test.ts b/packages/tools/test/with-supermemory/unit.test.ts index 8461fb97b..2653b3454 100644 --- a/packages/tools/test/with-supermemory/unit.test.ts +++ b/packages/tools/test/with-supermemory/unit.test.ts @@ -73,16 +73,41 @@ describe("Unit: withSupermemory", () => { const mockModel = createMockLanguageModel() expect(() => { - withSupermemory(mockModel, { containerTag: TEST_CONFIG.containerTag }) + withSupermemory(mockModel, { + containerTag: TEST_CONFIG.containerTag, + customId: "test-id", + }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) + it("should throw error if customId is missing or empty", () => { + process.env.SUPERMEMORY_API_KEY = "test-key" + + const mockModel = createMockLanguageModel() + + // omitted customId (plain JS caller) + expect(() => { + withSupermemory(mockModel, { + containerTag: TEST_CONFIG.containerTag, + } as any) + }).toThrow("customId is required") + + // empty string + expect(() => { + withSupermemory(mockModel, { + containerTag: TEST_CONFIG.containerTag, + customId: "", + }) + }).toThrow("customId is required") + }) + it("should successfully create wrapped model with valid API key", () => { process.env.SUPERMEMORY_API_KEY = "test-key" const mockModel = createMockLanguageModel() const wrappedModel = withSupermemory(mockModel, { containerTag: TEST_CONFIG.containerTag, + customId: "test-id", }) expect(wrappedModel).toBeDefined() @@ -103,6 +128,7 @@ describe("Unit: withSupermemory", () => { const inner = Object.create(proto) as LanguageModelV2 const wrappedModel = withSupermemory(inner, { containerTag: TEST_CONFIG.containerTag, + customId: "test-id", }) expect(wrappedModel.specificationVersion).toBe("v2") @@ -129,6 +155,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "profile", }) @@ -144,7 +171,7 @@ describe("Unit: withSupermemory", () => { await transformParamsWithMemory(params, ctx) expect(ctx.memoryCache).toBeDefined() - const turnKey = `${TEST_CONFIG.containerTag}::profile:Hello` + const turnKey = `${TEST_CONFIG.containerTag}:test-id:profile:Hello` const cachedMemories = ctx.memoryCache.get(turnKey) expect(cachedMemories).toBeDefined() expect(cachedMemories).toContain("Cached memory") @@ -161,6 +188,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "profile", }) @@ -233,6 +261,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "profile", }) @@ -293,6 +322,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "profile", }) @@ -314,6 +344,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "query", }) @@ -331,6 +362,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "query", }) @@ -358,6 +390,7 @@ describe("Unit: withSupermemory", () => { const ctx = createSupermemoryContext({ containerTag: TEST_CONFIG.containerTag, apiKey: TEST_CONFIG.apiKey, + customId: "test-id", mode: "profile", }) @@ -420,6 +453,7 @@ describe("Unit: withSupermemory", () => { const wrapped = withSupermemory(inner, { containerTag: TEST_CONFIG.containerTag, + customId: "test-id", apiKey: "k", }) @@ -443,6 +477,7 @@ describe("Unit: withSupermemory", () => { const inner = createMockLanguageModel() const wrapped = withSupermemory(inner, { containerTag: TEST_CONFIG.containerTag, + customId: "test-id", apiKey: "k", skipMemoryOnError: false, }) @@ -483,6 +518,7 @@ describe("Unit: withSupermemory", () => { const wrapped = withSupermemory(inner, { containerTag: TEST_CONFIG.containerTag, + customId: "test-id", apiKey: "k", }) From d7ee078ae92a4d24f22af4d4913e4c41b0a1d0ba Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Fri, 24 Apr 2026 23:47:46 +0000 Subject: [PATCH 3/6] mastra object structure udpates (#881) --- apps/docs/integrations/mastra.mdx | 138 ++++--- packages/tools/README.md | 63 +-- packages/tools/src/mastra/processor.ts | 103 +++-- packages/tools/src/mastra/types.ts | 25 +- packages/tools/src/mastra/wrapper.ts | 29 +- .../tools/test/mastra/integration.test.ts | 245 +++++------ packages/tools/test/mastra/unit.test.ts | 387 ++++++++++-------- 7 files changed, 545 insertions(+), 445 deletions(-) diff --git a/apps/docs/integrations/mastra.mdx b/apps/docs/integrations/mastra.mdx index 0874c763e..b74626988 100644 --- a/apps/docs/integrations/mastra.mdx +++ b/apps/docs/integrations/mastra.mdx @@ -34,11 +34,10 @@ const agent = new Agent(withSupermemory( model: openai("gpt-4o"), instructions: "You are a helpful assistant.", }, - "user-123", // containerTag - scopes memories to this user { + containerTag: "user-123", // Required: scopes memories to this user + customId: "conv-456", // Required: groups messages for contextual memory mode: "full", - addMemory: "always", - threadId: "conv-456", } )) @@ -46,15 +45,15 @@ const response = await agent.generate("What do you know about me?") ``` - **Memory saving is disabled by default.** The wrapper only retrieves existing memories. To automatically save conversations: + **Memory saving is enabled by default.** Conversations are automatically saved to Supermemory. To disable saving: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), ... }, - "user-123", { - addMemory: "always", - threadId: "conv-456" // Required for conversation grouping + containerTag: "user-123", + customId: "conv-456", + addMemory: "never", // Disable automatic conversation saving } )) ``` @@ -96,11 +95,12 @@ sequenceDiagram | Option | Type | Default | Description | |--------|------|---------|-------------| +| `containerTag` | `string` | **Required** | User/container tag for scoping memories | +| `customId` | `string` | **Required** | Groups messages into a single document for contextual memory | | `apiKey` | `string` | `SUPERMEMORY_API_KEY` env | Your Supermemory API key | | `baseUrl` | `string` | `https://api.supermemory.ai` | Custom API endpoint | | `mode` | `"profile" \| "query" \| "full"` | `"profile"` | Memory search mode | -| `addMemory` | `"always" \| "never"` | `"never"` | Auto-save conversations | -| `threadId` | `string` | - | Conversation ID for grouping messages | +| `addMemory` | `"always" \| "never"` | `"always"` | Auto-save conversations | | `verbose` | `boolean` | `false` | Enable debug logging | | `promptTemplate` | `function` | - | Custom memory formatting | @@ -111,19 +111,31 @@ sequenceDiagram **Profile Mode (Default)** - Retrieves the user's complete profile without query-based filtering: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "profile" })) +const agent = new Agent(withSupermemory(config, { + containerTag: "user-123", + customId: "conv-456", + mode: "profile", +})) ``` **Query Mode** - Searches memories based on the user's message: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "query" })) +const agent = new Agent(withSupermemory(config, { + containerTag: "user-123", + customId: "conv-456", + mode: "query", +})) ``` **Full Mode** - Combines profile AND query-based search for maximum context: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" })) +const agent = new Agent(withSupermemory(config, { + containerTag: "user-123", + customId: "conv-456", + mode: "full", +})) ### Mode Comparison @@ -137,26 +149,34 @@ const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" })) ## Saving Conversations -Enable automatic conversation saving with `addMemory: "always"`. A `threadId` is required to group messages: +Conversation saving is enabled by default (`addMemory: "always"`). Messages are grouped using the required `customId`: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", { - addMemory: "always", - threadId: "conv-456", + containerTag: "user-123", + customId: "conv-456", // Required: groups messages for contextual memory } )) -// All messages in this conversation are saved +// All messages in this conversation are saved automatically await agent.generate("I prefer TypeScript over JavaScript") await agent.generate("My favorite framework is Next.js") ``` - - Without a `threadId`, the output processor will log a warning and skip saving. Always provide a `threadId` when using `addMemory: "always"`. - +To disable automatic saving: + +```typescript +const agent = new Agent(withSupermemory( + { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, + { + containerTag: "user-123", + customId: "conv-456", + addMemory: "never", // Only retrieve memories, don't save + } +)) +``` --- @@ -182,8 +202,9 @@ const claudePrompt = (data: MemoryPromptData) => ` const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", { + containerTag: "user-123", + customId: "conv-456", mode: "full", promptTemplate: claudePrompt, } @@ -210,8 +231,11 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), inputProcessors: [ - createSupermemoryProcessor("user-123", { + createSupermemoryProcessor({ + containerTag: "user-123", + customId: "conv-456", mode: "full", + addMemory: "never", verbose: true, }), ], @@ -232,9 +256,9 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), outputProcessors: [ - createSupermemoryOutputProcessor("user-123", { - addMemory: "always", - threadId: "conv-456", + createSupermemoryOutputProcessor({ + containerTag: "user-123", + customId: "conv-456", }), ], }) @@ -249,10 +273,10 @@ import { Agent } from "@mastra/core/agent" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" import { openai } from "@ai-sdk/openai" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", mode: "full", - addMemory: "always", - threadId: "conv-456", verbose: true, }) @@ -267,9 +291,9 @@ const agent = new Agent({ --- -## Using RequestContext +## Using RequestContext for Dynamic Thread IDs -Mastra's `RequestContext` can provide `threadId` dynamically: +For server setups where one agent instance handles multiple concurrent conversations, use Mastra's `RequestContext` to provide per-request thread IDs. **RequestContext takes precedence** over the construction-time `customId`: ```typescript import { Agent } from "@mastra/core/agent" @@ -279,21 +303,25 @@ import { openai } from "@ai-sdk/openai" const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", { + containerTag: "user-123", + customId: "fallback-conv", // Used only when RequestContext doesn't provide a threadId mode: "full", - addMemory: "always", - // threadId not set - will use RequestContext } )) -// Set threadId dynamically via RequestContext +// Per-request threadId takes precedence over customId const ctx = new RequestContext() -ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-id") +ctx.set(MASTRA_THREAD_ID_KEY, "user-456-session-789") await agent.generate("Hello!", { requestContext: ctx }) +// This conversation is stored under "user-456-session-789", not "fallback-conv" ``` + + **Server-side usage**: Always use `RequestContext` to pass unique conversation IDs per request. Using a fixed `customId` for all requests will merge conversations from different users. + + --- ## Verbose Logging @@ -303,8 +331,11 @@ Enable detailed logging for debugging: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - { verbose: true } + { + containerTag: "user-123", + customId: "conv-456", + verbose: true, + } )) // Console output: @@ -330,7 +361,10 @@ const agent = new Agent(withSupermemory( inputProcessors: [myLoggingProcessor], outputProcessors: [myAnalyticsProcessor], }, - "user-123" + { + containerTag: "user-123", + customId: "conv-456", + } )) ``` @@ -345,15 +379,13 @@ Enhances a Mastra agent config with memory capabilities. ```typescript function withSupermemory( config: T, - containerTag: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): T ``` **Parameters:** - `config` - The Mastra agent configuration object -- `containerTag` - User/container ID for scoping memories -- `options` - Configuration options +- `options` - Configuration options (includes required `containerTag` and `customId`) **Returns:** Enhanced config with Supermemory processors injected @@ -363,8 +395,7 @@ Creates an input processor for memory injection. ```typescript function createSupermemoryProcessor( - containerTag: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): SupermemoryInputProcessor ``` @@ -374,8 +405,7 @@ Creates an output processor for conversation saving. ```typescript function createSupermemoryOutputProcessor( - containerTag: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): SupermemoryOutputProcessor ``` @@ -385,8 +415,7 @@ Creates both processors with shared configuration. ```typescript function createSupermemoryProcessors( - containerTag: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): { input: SupermemoryInputProcessor output: SupermemoryOutputProcessor @@ -397,11 +426,12 @@ function createSupermemoryProcessors( ```typescript interface SupermemoryMastraOptions { + containerTag: string // Required: User/container tag for scoping memories + customId: string // Required: Groups messages for contextual memory generation apiKey?: string baseUrl?: string mode?: "profile" | "query" | "full" - addMemory?: "always" | "never" - threadId?: string + addMemory?: "always" | "never" // Default: "always" verbose?: boolean promptTemplate?: (data: MemoryPromptData) => string } @@ -423,14 +453,16 @@ Processors gracefully handle errors without breaking the agent: - **API errors** - Logged and skipped; agent continues without memories - **Missing API key** - Throws immediately with helpful error message -- **Missing threadId** - Warns in console; skips saving ```typescript // Missing API key throws immediately const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - { apiKey: undefined } // Will check SUPERMEMORY_API_KEY env + { + containerTag: "user-123", + customId: "conv-456", + apiKey: undefined, // Will check SUPERMEMORY_API_KEY env + } )) // Error: SUPERMEMORY_API_KEY is not set ``` diff --git a/packages/tools/README.md b/packages/tools/README.md index cf09a4c4e..d9ba18f05 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -414,7 +414,7 @@ const addResult = await tools.addMemory({ Add persistent memory to [Mastra](https://mastra.ai) AI agents. The integration provides processors that: - **Input Processor**: Fetches relevant memories and injects them into the system prompt before LLM calls -- **Output Processor**: Optionally saves conversations to Supermemory after responses +- **Output Processor**: Saves conversations to Supermemory after responses (enabled by default) #### Quick Start with `withSupermemory` Wrapper @@ -433,11 +433,10 @@ const agent = new Agent(withSupermemory( model: openai("gpt-4o"), instructions: "You are a helpful assistant.", }, - "user-123", // containerTag - scopes memories to this user { + containerTag: "user-123", // Required: scopes memories to this user + customId: "conv-456", // Required: groups messages for contextual memory mode: "full", - addMemory: "always", - threadId: "conv-456", } )) @@ -454,10 +453,10 @@ import { Agent } from "@mastra/core/agent" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" import { openai } from "@ai-sdk/openai" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", mode: "full", - addMemory: "always", - threadId: "conv-456", verbose: true, // Enable logging }) @@ -484,12 +483,12 @@ import { openai } from "@ai-sdk/openai" async function main() { const userId = "user-alex-123" - const threadId = `thread-${Date.now()}` + const customId = `thread-${Date.now()}` - const { input, output } = createSupermemoryProcessors(userId, { + const { input, output } = createSupermemoryProcessors({ + containerTag: userId, + customId, mode: "profile", // Fetch user profile memories - addMemory: "always", // Save all conversations - threadId, verbose: true, }) @@ -525,13 +524,25 @@ main() ```typescript // Profile mode - good for general personalization -const { input } = createSupermemoryProcessors("user-123", { mode: "profile" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", + mode: "profile", +}) // Query mode - good for specific lookups -const { input } = createSupermemoryProcessors("user-123", { mode: "query" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", + mode: "query", +}) // Full mode - comprehensive context -const { input } = createSupermemoryProcessors("user-123", { mode: "full" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", + mode: "full", +}) ``` #### Custom Prompt Templates @@ -548,7 +559,9 @@ ${data.generalSearchMemories} `.trim() -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", mode: "full", promptTemplate: customTemplate, }) @@ -556,17 +569,17 @@ const { input, output } = createSupermemoryProcessors("user-123", { #### Using RequestContext for Dynamic Thread IDs -Instead of hardcoding `threadId`, use Mastra's RequestContext for dynamic values: +For server setups where one agent instance handles multiple concurrent conversations, use Mastra's `RequestContext` to provide per-request thread IDs. **RequestContext takes precedence** over the construction-time `customId`: ```typescript import { Agent } from "@mastra/core/agent" import { RequestContext, MASTRA_THREAD_ID_KEY } from "@mastra/core/request-context" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "fallback-conv", // Used only when RequestContext doesn't provide a threadId mode: "profile", - addMemory: "always", - // threadId not set here - will be read from RequestContext }) const agent = new Agent({ @@ -577,22 +590,26 @@ const agent = new Agent({ outputProcessors: [output], }) -// Set threadId dynamically per request +// Per-request threadId takes precedence over customId const ctx = new RequestContext() -ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-123") +ctx.set(MASTRA_THREAD_ID_KEY, "user-456-session-789") const response = await agent.generate("Hello!", { requestContext: ctx }) +// This conversation is stored under "user-456-session-789", not "fallback-conv" ``` +> **Server-side usage**: Always use `RequestContext` to pass unique conversation IDs per request. Using a fixed `customId` for all requests will merge conversations from different users. + #### Mastra Configuration Options ```typescript interface SupermemoryMastraOptions { + containerTag: string // Required: User/container tag for scoping memories + customId: string // Required: Groups messages into a single document for contextual memory apiKey?: string // Supermemory API key (or use SUPERMEMORY_API_KEY env var) baseUrl?: string // Custom API endpoint mode?: "profile" | "query" | "full" // Memory search mode (default: "profile") - addMemory?: "always" | "never" // Auto-save conversations (default: "never") - threadId?: string // Conversation ID for grouping messages + addMemory?: "always" | "never" // Auto-save conversations (default: "always") verbose?: boolean // Enable debug logging (default: false) promptTemplate?: (data: MemoryPromptData) => string // Custom memory formatting } diff --git a/packages/tools/src/mastra/processor.ts b/packages/tools/src/mastra/processor.ts index d2975127d..e7a39e96b 100644 --- a/packages/tools/src/mastra/processor.ts +++ b/packages/tools/src/mastra/processor.ts @@ -43,11 +43,11 @@ import type { */ interface ProcessorContext { containerTag: string + customId: string apiKey: string baseUrl: string mode: MemoryMode addMemory: "always" | "never" - threadId?: string logger: Logger promptTemplate?: PromptTemplate memoryCache: MemoryCache @@ -57,20 +57,19 @@ interface ProcessorContext { * Creates the shared processor context from options. */ function createProcessorContext( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): ProcessorContext { const apiKey = validateApiKey(options.apiKey) const baseUrl = normalizeBaseUrl(options.baseUrl) const logger = createLogger(options.verbose ?? false) return { - containerTag, + containerTag: options.containerTag, + customId: options.customId, apiKey, baseUrl, mode: options.mode ?? "profile", - addMemory: options.addMemory ?? "never", - threadId: options.threadId, + addMemory: options.addMemory ?? "always", logger, promptTemplate: options.promptTemplate, memoryCache: new MemoryCache(), @@ -78,19 +77,24 @@ function createProcessorContext( } /** - * Gets the effective threadId from options or RequestContext. + * Gets the effective customId from RequestContext (if provided) or falls back to context. + * Per-request thread ID takes precedence to support dynamic per-conversation IDs in server setups. */ -function getEffectiveThreadId( +function getEffectiveCustomId( ctx: ProcessorContext, requestContext?: RequestContext, -): string | undefined { - if (ctx.threadId) { - return ctx.threadId - } +): string { + // Per-request thread ID takes precedence over construction-time customId if (requestContext) { - return requestContext.get(MASTRA_THREAD_ID_KEY) as string | undefined + const threadId = requestContext.get(MASTRA_THREAD_ID_KEY) as + | string + | undefined + if (threadId) { + return threadId + } } - return undefined + // Fall back to construction-time customId + return ctx.customId } /** @@ -111,7 +115,9 @@ function getEffectiveThreadId( * name: "My Agent", * model: openai("gpt-4o"), * inputProcessors: [ - * new SupermemoryInputProcessor("user-123", { + * new SupermemoryInputProcessor({ + * containerTag: "user-123", + * customId: "conv-456", * mode: "full", * verbose: true, * }), @@ -125,8 +131,8 @@ export class SupermemoryInputProcessor implements Processor { private ctx: ProcessorContext - constructor(containerTag: string, options: SupermemoryMastraOptions = {}) { - this.ctx = createProcessorContext(containerTag, options) + constructor(options: SupermemoryMastraOptions) { + this.ctx = createProcessorContext(options) } async processInput(args: ProcessInputArgs): Promise { @@ -146,7 +152,7 @@ export class SupermemoryInputProcessor implements Processor { return messageList } - const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext) + const effectiveThreadId = getEffectiveCustomId(this.ctx, requestContext) const turnKey = MemoryCache.makeTurnKey( this.ctx.containerTag, effectiveThreadId, @@ -213,9 +219,10 @@ export class SupermemoryInputProcessor implements Processor { * name: "My Agent", * model: openai("gpt-4o"), * outputProcessors: [ - * new SupermemoryOutputProcessor("user-123", { + * new SupermemoryOutputProcessor({ + * containerTag: "user-123", + * customId: "conv-456", * addMemory: "always", - * threadId: "conv-456", * }), * ], * }) @@ -227,26 +234,20 @@ export class SupermemoryOutputProcessor implements Processor { private ctx: ProcessorContext - constructor(containerTag: string, options: SupermemoryMastraOptions = {}) { - this.ctx = createProcessorContext(containerTag, options) + constructor(options: SupermemoryMastraOptions) { + this.ctx = createProcessorContext(options) } async processOutputResult( args: ProcessOutputResultArgs, ): Promise { - const { messages, messageList, requestContext } = args + const { messages, requestContext } = args if (this.ctx.addMemory !== "always") { return messages } - const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext) - if (!effectiveThreadId) { - this.ctx.logger.warn( - "No threadId provided for conversation save. Provide via options.threadId or RequestContext.", - ) - return messages - } + const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext) try { const conversationMessages = this.convertToConversationMessages(messages) @@ -257,7 +258,7 @@ export class SupermemoryOutputProcessor implements Processor { } const response = await addConversation({ - conversationId: effectiveThreadId, + conversationId: effectiveCustomId, messages: conversationMessages, containerTags: [this.ctx.containerTag], apiKey: this.ctx.apiKey, @@ -266,7 +267,7 @@ export class SupermemoryOutputProcessor implements Processor { this.ctx.logger.info("Conversation saved successfully", { containerTag: this.ctx.containerTag, - conversationId: effectiveThreadId, + customId: effectiveCustomId, messageCount: conversationMessages.length, responseId: response.id, }) @@ -323,8 +324,7 @@ export class SupermemoryOutputProcessor implements Processor { /** * Creates a Supermemory input processor for memory injection. * - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options + * @param options - Configuration options including required containerTag and customId * @returns Configured SupermemoryInputProcessor instance * * @example @@ -333,7 +333,9 @@ export class SupermemoryOutputProcessor implements Processor { * import { createSupermemoryProcessor } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const processor = createSupermemoryProcessor("user-123", { + * const processor = createSupermemoryProcessor({ + * containerTag: "user-123", + * customId: "conv-456", * mode: "full", * verbose: true, * }) @@ -347,17 +349,15 @@ export class SupermemoryOutputProcessor implements Processor { * ``` */ export function createSupermemoryProcessor( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): SupermemoryInputProcessor { - return new SupermemoryInputProcessor(containerTag, options) + return new SupermemoryInputProcessor(options) } /** * Creates a Supermemory output processor for saving conversations. * - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options + * @param options - Configuration options including required containerTag and customId * @returns Configured SupermemoryOutputProcessor instance * * @example @@ -366,9 +366,10 @@ export function createSupermemoryProcessor( * import { createSupermemoryOutputProcessor } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const processor = createSupermemoryOutputProcessor("user-123", { + * const processor = createSupermemoryOutputProcessor({ + * containerTag: "user-123", + * customId: "conv-456", * addMemory: "always", - * threadId: "conv-456", * }) * * const agent = new Agent({ @@ -380,10 +381,9 @@ export function createSupermemoryProcessor( * ``` */ export function createSupermemoryOutputProcessor( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): SupermemoryOutputProcessor { - return new SupermemoryOutputProcessor(containerTag, options) + return new SupermemoryOutputProcessor(options) } /** @@ -392,7 +392,6 @@ export function createSupermemoryOutputProcessor( * Use this when you want both memory injection and conversation saving * with consistent settings across both processors. * - * @param containerTag - The container tag/user ID for scoping memories * @param options - Configuration options shared by both processors * @returns Object containing both input and output processors * @@ -402,10 +401,11 @@ export function createSupermemoryOutputProcessor( * import { createSupermemoryProcessors } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const { input, output } = createSupermemoryProcessors("user-123", { + * const { input, output } = createSupermemoryProcessors({ + * containerTag: "user-123", + * customId: "conv-456", * mode: "full", * addMemory: "always", - * threadId: "conv-456", * }) * * const agent = new Agent({ @@ -418,14 +418,13 @@ export function createSupermemoryOutputProcessor( * ``` */ export function createSupermemoryProcessors( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): { input: SupermemoryInputProcessor output: SupermemoryOutputProcessor } { return { - input: new SupermemoryInputProcessor(containerTag, options), - output: new SupermemoryOutputProcessor(containerTag, options), + input: new SupermemoryInputProcessor(options), + output: new SupermemoryOutputProcessor(options), } } diff --git a/packages/tools/src/mastra/types.ts b/packages/tools/src/mastra/types.ts index efe2e7e4e..f1781a516 100644 --- a/packages/tools/src/mastra/types.ts +++ b/packages/tools/src/mastra/types.ts @@ -10,7 +10,6 @@ import type { MemoryMode, AddMemoryMode, MemoryPromptData, - SupermemoryBaseOptions, } from "../shared" // Re-export Mastra core types for consumers @@ -34,14 +33,24 @@ export type { RequestContext } from "@mastra/core/request-context" /** * Configuration options for the Supermemory Mastra processor. - * Extends base options with Mastra-specific settings. */ -export interface SupermemoryMastraOptions extends SupermemoryBaseOptions { - /** - * When using the output processor, set this to enable automatic conversation saving. - * The threadId is used to group messages into a single conversation. - */ - threadId?: string +export interface SupermemoryMastraOptions { + /** Container tag/user ID for scoping memories. Required. */ + containerTag: string + /** Custom ID to group messages into a single document for contextual memory generation. Required. */ + customId: string + /** Supermemory API key (falls back to SUPERMEMORY_API_KEY env var) */ + apiKey?: string + /** Custom Supermemory API base URL */ + baseUrl?: string + /** Memory retrieval mode */ + mode?: MemoryMode + /** Memory persistence mode (default: "always") */ + addMemory?: AddMemoryMode + /** Enable detailed logging of memory search and injection */ + verbose?: boolean + /** Custom function to format memory data into the system prompt */ + promptTemplate?: PromptTemplate } export type { PromptTemplate, MemoryMode, AddMemoryMode, MemoryPromptData } diff --git a/packages/tools/src/mastra/wrapper.ts b/packages/tools/src/mastra/wrapper.ts index 8137e7eed..7900da918 100644 --- a/packages/tools/src/mastra/wrapper.ts +++ b/packages/tools/src/mastra/wrapper.ts @@ -34,11 +34,10 @@ interface AgentConfig { * * The enhanced config includes: * - Input processor: Fetches relevant memories before LLM calls - * - Output processor: Optionally saves conversations after responses + * - Output processor: Saves conversations after responses (when addMemory is "always") * * @param config - The Mastra agent configuration to enhance - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options for memory behavior + * @param options - Configuration options including required containerTag and customId * @returns Enhanced agent config with Supermemory processors injected * * @example @@ -54,11 +53,11 @@ interface AgentConfig { * model: openai("gpt-4o"), * instructions: "You are a helpful assistant.", * }, - * "user-123", * { + * containerTag: "user-123", + * customId: "conv-456", * mode: "full", * addMemory: "always", - * threadId: "conv-456", * } * ) * @@ -69,13 +68,25 @@ interface AgentConfig { */ export function withSupermemory( config: T, - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): T { + // Runtime guard for breaking API change - catch old 3-arg signature usage + if ( + typeof options !== "object" || + options === null || + !options.containerTag || + !options.customId + ) { + throw new Error( + "withSupermemory: options must be an object with required containerTag and customId fields. " + + "The API changed in v2.0.0 — see https://docs.supermemory.ai/integrations/mastra for the new signature.", + ) + } + validateApiKey(options.apiKey) - const inputProcessor = new SupermemoryInputProcessor(containerTag, options) - const outputProcessor = new SupermemoryOutputProcessor(containerTag, options) + const inputProcessor = new SupermemoryInputProcessor(options) + const outputProcessor = new SupermemoryOutputProcessor(options) const existingInputProcessors = config.inputProcessors ?? [] const existingOutputProcessors = config.outputProcessors ?? [] diff --git a/packages/tools/test/mastra/integration.test.ts b/packages/tools/test/mastra/integration.test.ts index f33b974ea..58b79a61d 100644 --- a/packages/tools/test/mastra/integration.test.ts +++ b/packages/tools/test/mastra/integration.test.ts @@ -37,6 +37,7 @@ const INTEGRATION_CONFIG = { apiKey: process.env.SUPERMEMORY_API_KEY || "", baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai", containerTag: "integration-test-mastra", + customId: "integration-test-conversation", } const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY @@ -100,14 +101,13 @@ describe.skipIf(!shouldRunIntegration)( () => { describe("SupermemoryInputProcessor", () => { it("should fetch real memories and inject into messageList", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messageList = createIntegrationMessageList() const messages: MastraDBMessage[] = [ @@ -132,14 +132,13 @@ describe.skipIf(!shouldRunIntegration)( it("should use query mode with user message as search query", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "query", + }) const messageList = createIntegrationMessageList() const args: ProcessInputArgs = { @@ -177,14 +176,13 @@ describe.skipIf(!shouldRunIntegration)( it("should use full mode with both profile and query", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "full", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "full", + }) const messageList = createIntegrationMessageList() const args: ProcessInputArgs = { @@ -217,14 +215,13 @@ describe.skipIf(!shouldRunIntegration)( it("should cache memories for repeated calls with same message", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messages: MastraDBMessage[] = [ createMessage("user", "Cache test message"), @@ -269,15 +266,14 @@ describe.skipIf(!shouldRunIntegration)( generalSearchMemories: string }) => `${data.userMemories}` - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - promptTemplate: customTemplate, - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + promptTemplate: customTemplate, + }) const messageList = createIntegrationMessageList() const args: ProcessInputArgs = { @@ -299,17 +295,15 @@ describe.skipIf(!shouldRunIntegration)( it("should save conversation when addMemory is always", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const threadId = `test-mastra-${Date.now()}` + const customId = `test-mastra-${Date.now()}` - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - threadId, - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) const args: ProcessOutputResultArgs = { messages: [ @@ -336,15 +330,13 @@ describe.skipIf(!shouldRunIntegration)( it("should not save when addMemory is never", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "never", - threadId: "test-thread", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: "test-thread", + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "never", + }) const args: ProcessOutputResultArgs = { messages: [ @@ -368,17 +360,16 @@ describe.skipIf(!shouldRunIntegration)( fetchSpy.mockRestore() }) - it("should use threadId from RequestContext when not in options", async () => { + it("should use threadId from RequestContext when available", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) const contextThreadId = `context-thread-${Date.now()}` const requestContext = new RequestContext() @@ -410,16 +401,14 @@ describe.skipIf(!shouldRunIntegration)( describe("createSupermemoryProcessors", () => { it("should create working input and output processors", async () => { - const { input, output } = createSupermemoryProcessors( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - addMemory: "always", - threadId: `processors-test-${Date.now()}`, - }, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: `processors-test-${Date.now()}`, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + addMemory: "always", + }) const messageList = createIntegrationMessageList() const inputArgs: ProcessInputArgs = { @@ -455,17 +444,14 @@ describe.skipIf(!shouldRunIntegration)( model: "gpt-4o", } - const enhanced = withSupermemory( - config, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - addMemory: "always", - threadId: `wrapper-test-${Date.now()}`, - }, - ) + const enhanced = withSupermemory(config, { + containerTag: INTEGRATION_CONFIG.containerTag, + customId: `wrapper-test-${Date.now()}`, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + addMemory: "always", + }) expect(enhanced.id).toBe("test-mastra-agent") expect(enhanced.name).toBe("Test Mastra Agent") @@ -511,15 +497,13 @@ describe.skipIf(!shouldRunIntegration)( outputProcessors: [existingOutputProcessor], } - const enhanced = withSupermemory( - config, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const enhanced = withSupermemory(config, { + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.outputProcessors).toHaveLength(2) @@ -534,15 +518,14 @@ describe.skipIf(!shouldRunIntegration)( describe("Options", () => { it("verbose mode should not break functionality", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - verbose: true, - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + verbose: true, + }) const messageList = createIntegrationMessageList() const args: ProcessInputArgs = { @@ -561,14 +544,13 @@ describe.skipIf(!shouldRunIntegration)( it("custom baseUrl should be used for API calls", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const args: ProcessInputArgs = { messages: [createMessage("user", "Base URL test")], @@ -595,14 +577,13 @@ describe.skipIf(!shouldRunIntegration)( describe("Error handling", () => { it("should handle invalid API key gracefully", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: INTEGRATION_CONFIG.customId, + apiKey: "invalid-api-key-12345", + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messageList = createIntegrationMessageList() const args: ProcessInputArgs = { @@ -619,15 +600,13 @@ describe.skipIf(!shouldRunIntegration)( }) it("output processor should handle save errors gracefully", async () => { - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - threadId: "error-test", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + customId: "error-test", + apiKey: "invalid-api-key-12345", + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) const args: ProcessOutputResultArgs = { messages: [ diff --git a/packages/tools/test/mastra/unit.test.ts b/packages/tools/test/mastra/unit.test.ts index 0161546dc..82b0cac6c 100644 --- a/packages/tools/test/mastra/unit.test.ts +++ b/packages/tools/test/mastra/unit.test.ts @@ -29,6 +29,7 @@ const TEST_CONFIG = { apiKey: "test-api-key", baseUrl: "https://api.supermemory.ai", containerTag: "test-mastra-user", + customId: "test-conversation", } interface MockAgentConfig { @@ -128,8 +129,11 @@ describe("SupermemoryInputProcessor", () => { }) describe("constructor", () => { - it("should create processor with default options", () => { - const processor = new SupermemoryInputProcessor(TEST_CONFIG.containerTag) + it("should create processor with required options", () => { + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(processor.id).toBe("supermemory-input") expect(processor.name).toBe("Supermemory Memory Injection") }) @@ -138,19 +142,21 @@ describe("SupermemoryInputProcessor", () => { delete process.env.SUPERMEMORY_API_KEY expect(() => { - new SupermemoryInputProcessor(TEST_CONFIG.containerTag) + new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) it("should accept API key via options", () => { delete process.env.SUPERMEMORY_API_KEY - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: "custom-key", + }) expect(processor.id).toBe("supermemory-input") }) }) @@ -168,13 +174,12 @@ describe("SupermemoryInputProcessor", () => { ), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messageList = createMockMessageList() const messages: MastraDBMessage[] = [createMessage("user", "Hello")] @@ -203,13 +208,12 @@ describe("SupermemoryInputProcessor", () => { Promise.resolve(createMockProfileResponse(["Cached memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messages: MastraDBMessage[] = [createMessage("user", "Hello")] @@ -249,13 +253,12 @@ describe("SupermemoryInputProcessor", () => { }) }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) const args1: ProcessInputArgs = { messages: [createMessage("user", "First message")], @@ -281,13 +284,12 @@ describe("SupermemoryInputProcessor", () => { }) it("should return messageList in query mode when no user message", async () => { - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) const messageList = createMockMessageList() const args: ProcessInputArgs = { @@ -313,13 +315,12 @@ describe("SupermemoryInputProcessor", () => { text: () => Promise.resolve("Server error"), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messageList = createMockMessageList() const args: ProcessInputArgs = { @@ -342,14 +343,12 @@ describe("SupermemoryInputProcessor", () => { json: () => Promise.resolve(createMockProfileResponse(["Memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - threadId: "thread-123", - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "thread-123", + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const args: ProcessInputArgs = { messages: [createMessage("user", "Hello")], @@ -370,13 +369,12 @@ describe("SupermemoryInputProcessor", () => { json: () => Promise.resolve(createMockProfileResponse(["Memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const requestContext = new RequestContext() requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-456") @@ -401,13 +399,12 @@ describe("SupermemoryInputProcessor", () => { json: () => Promise.resolve(createMockProfileResponse(["Memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) const messages: MastraDBMessage[] = [ { @@ -464,8 +461,11 @@ describe("SupermemoryOutputProcessor", () => { }) describe("constructor", () => { - it("should create processor with default options", () => { - const processor = new SupermemoryOutputProcessor(TEST_CONFIG.containerTag) + it("should create processor with required options", () => { + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(processor.id).toBe("supermemory-output") expect(processor.name).toBe("Supermemory Conversation Save") }) @@ -478,14 +478,12 @@ describe("SupermemoryOutputProcessor", () => { json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const messages: MastraDBMessage[] = [ createMessage("user", "Hello"), @@ -522,14 +520,12 @@ describe("SupermemoryOutputProcessor", () => { }) it("should not save conversation when addMemory is never", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "never", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "never", + }) const args: ProcessOutputResultArgs = { messages: [ @@ -546,14 +542,18 @@ describe("SupermemoryOutputProcessor", () => { expect(fetchMock).not.toHaveBeenCalled() }) - it("should not save when no threadId provided", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - }, - ) + it("should use customId from options for conversation save", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: () => Promise.resolve(createMockConversationResponse()), + }) + + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "my-custom-id", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const args: ProcessOutputResultArgs = { messages: [ @@ -567,22 +567,25 @@ describe("SupermemoryOutputProcessor", () => { await processor.processOutputResult(args) - expect(fetchMock).not.toHaveBeenCalled() + expect(fetchMock).toHaveBeenCalledTimes(1) + const callBody = JSON.parse( + (fetchMock.mock.calls[0]?.[1] as { body: string }).body, + ) + expect(callBody.conversationId).toBe("my-custom-id") }) - it("should use threadId from requestContext", async () => { + it("should use threadId from requestContext (takes precedence over customId)", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "fallback-custom-id", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const requestContext = new RequestContext() requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-789") @@ -604,23 +607,55 @@ describe("SupermemoryOutputProcessor", () => { const callBody = JSON.parse( (fetchMock.mock.calls[0]?.[1] as { body: string }).body, ) + // RequestContext threadId takes precedence for per-request dynamic IDs expect(callBody.conversationId).toBe("ctx-thread-789") }) - it("should skip system messages when saving", async () => { + it("should fall back to customId when requestContext has no threadId", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "fallback-custom-id", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) + + const args: ProcessOutputResultArgs = { + messages: [ + createMessage("user", "Hello"), + createMessage("assistant", "Hi!"), + ], + messageList: createMockMessageList(), + abort: vi.fn() as never, + retryCount: 0, + } + + await processor.processOutputResult(args) + + expect(fetchMock).toHaveBeenCalledTimes(1) + const callBody = JSON.parse( + (fetchMock.mock.calls[0]?.[1] as { body: string }).body, ) + // Falls back to customId when no RequestContext threadId + expect(callBody.conversationId).toBe("fallback-custom-id") + }) + + it("should skip system messages when saving", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: () => Promise.resolve(createMockConversationResponse()), + }) + + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const messages: MastraDBMessage[] = [ createMessage("system", "You are a helpful assistant"), @@ -652,14 +687,12 @@ describe("SupermemoryOutputProcessor", () => { json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const messages: MastraDBMessage[] = [ { @@ -708,14 +741,12 @@ describe("SupermemoryOutputProcessor", () => { text: () => Promise.resolve("Server error"), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const args: ProcessOutputResultArgs = { messages: [ @@ -732,14 +763,12 @@ describe("SupermemoryOutputProcessor", () => { }) it("should not save when no messages to save", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const args: ProcessOutputResultArgs = { messages: [], @@ -773,13 +802,18 @@ describe("Factory functions", () => { describe("createSupermemoryProcessor", () => { it("should create input processor", () => { - const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag) + const processor = createSupermemoryProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(processor).toBeInstanceOf(SupermemoryInputProcessor) expect(processor.id).toBe("supermemory-input") }) it("should pass options to processor", () => { - const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag, { + const processor = createSupermemoryProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, apiKey: "custom-key", mode: "full", }) @@ -789,45 +823,43 @@ describe("Factory functions", () => { describe("createSupermemoryOutputProcessor", () => { it("should create output processor", () => { - const processor = createSupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - ) + const processor = createSupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(processor).toBeInstanceOf(SupermemoryOutputProcessor) expect(processor.id).toBe("supermemory-output") }) it("should pass options to processor", () => { - const processor = createSupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - addMemory: "always", - threadId: "conv-123", - }, - ) + const processor = createSupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-123", + apiKey: "custom-key", + addMemory: "always", + }) expect(processor).toBeInstanceOf(SupermemoryOutputProcessor) }) }) describe("createSupermemoryProcessors", () => { it("should create both input and output processors", () => { - const { input, output } = createSupermemoryProcessors( - TEST_CONFIG.containerTag, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(input).toBeInstanceOf(SupermemoryInputProcessor) expect(output).toBeInstanceOf(SupermemoryOutputProcessor) }) it("should share options between processors", () => { - const { input, output } = createSupermemoryProcessors( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - mode: "full", - addMemory: "always", - threadId: "conv-123", - }, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: TEST_CONFIG.containerTag, + customId: "conv-123", + apiKey: "custom-key", + mode: "full", + addMemory: "always", + }) expect(input.id).toBe("supermemory-input") expect(output.id).toBe("supermemory-output") }) @@ -857,7 +889,10 @@ describe("withSupermemory", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } expect(() => { - withSupermemory(config, TEST_CONFIG.containerTag) + withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) @@ -865,7 +900,9 @@ describe("withSupermemory", () => { delete process.env.SUPERMEMORY_API_KEY const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, { + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, apiKey: "custom-key", }) @@ -877,7 +914,10 @@ describe("withSupermemory", () => { describe("processor injection", () => { it("should inject input and output processors", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(enhanced.inputProcessors).toHaveLength(1) expect(enhanced.outputProcessors).toHaveLength(1) @@ -892,7 +932,10 @@ describe("withSupermemory", () => { model: "gpt-4", customProp: "value", } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(enhanced.id).toBe("test-agent") expect(enhanced.name).toBe("Test Agent") @@ -911,7 +954,10 @@ describe("withSupermemory", () => { inputProcessors: [existingInputProcessor], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.inputProcessors?.[0]?.id).toBe("supermemory-input") @@ -929,7 +975,10 @@ describe("withSupermemory", () => { outputProcessors: [existingOutputProcessor], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(enhanced.outputProcessors).toHaveLength(2) expect(enhanced.outputProcessors?.[0]?.id).toBe("existing-output") @@ -946,7 +995,10 @@ describe("withSupermemory", () => { outputProcessors: [existingOutput], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: TEST_CONFIG.customId, + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.outputProcessors).toHaveLength(2) @@ -960,10 +1012,11 @@ describe("withSupermemory", () => { describe("options passthrough", () => { it("should pass options to processors", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, { + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + customId: "conv-123", mode: "full", addMemory: "always", - threadId: "conv-123", verbose: true, }) From 589edd343709f45ebc2f9e702b51abc0c7690c31 Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Sat, 25 Apr 2026 01:43:13 +0000 Subject: [PATCH 4/6] updated typescript and python sdk (#878) --- apps/docs/integrations/openai.mdx | 23 +++++--- packages/tools/README.md | 57 ++++++------------- packages/tools/src/openai/index.ts | 40 ++++++++----- packages/tools/src/openai/middleware.ts | 51 +++++++++-------- .../test/chatapp/app/api/openai-chat/route.ts | 5 +- packages/tools/test/openai-responses-test.ts | 4 +- 6 files changed, 88 insertions(+), 92 deletions(-) diff --git a/apps/docs/integrations/openai.mdx b/apps/docs/integrations/openai.mdx index 66b797c19..13cbb58fb 100644 --- a/apps/docs/integrations/openai.mdx +++ b/apps/docs/integrations/openai.mdx @@ -44,9 +44,11 @@ import { withSupermemory } from "@supermemory/tools/openai" const openai = new OpenAI() // Wrap client with memory - memories auto-injected into system prompts -const client = withSupermemory(openai, "user-123", { - mode: "full", // "profile" | "query" | "full" - addMemory: "always", // "always" | "never" +const client = withSupermemory(openai, { + containerTag: "user-123", // Required: identifies the user/container + customId: "conversation-456", // Required: groups messages into the same document + mode: "full", // "profile" | "query" | "full" + addMemory: "always", // "always" (default) | "never" }) // Use normally - memories are automatically included @@ -62,16 +64,19 @@ const response = await client.chat.completions.create({ ### Configuration Options ```typescript -const client = withSupermemory(openai, "user-123", { +const client = withSupermemory(openai, { + // Required: identifies the user/container + containerTag: "user-123", + + // Required: Group messages into the same document + customId: "conv-456", + // Memory search mode mode: "full", // "profile" (user profile only), "query" (search only), "full" (both) - // Auto-save conversations as memories + // Auto-save conversations as memories (default: "always") addMemory: "always", // "always" | "never" - // Group messages into conversations - conversationId: "conv-456", - // Enable debug logging verbose: true, @@ -91,7 +96,7 @@ const client = withSupermemory(openai, "user-123", { ### Works with Responses API Too ```typescript -const client = withSupermemory(openai, "user-123", { mode: "full" }) +const client = withSupermemory(openai, { containerTag: "user-123", customId: "conv-456", mode: "full" }) // Memories injected into instructions const response = await client.responses.create({ diff --git a/packages/tools/README.md b/packages/tools/README.md index d9ba18f05..69856f69c 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -268,10 +268,11 @@ The `withSupermemory` function creates an OpenAI client with SuperMemory middlew import { withSupermemory } from "@supermemory/tools/openai" // Create OpenAI client with supermemory middleware -const openaiWithSupermemory = withSupermemory("user-123", { - conversationId: "conversation-456", +const openaiWithSupermemory = withSupermemory(openai, { + containerTag: "user-123", // Required: identifies the user/container + customId: "conversation-456", // Required: groups messages into the same document mode: "full", - addMemory: "always", + addMemory: "always", // Default: "always" verbose: true, }) @@ -291,37 +292,12 @@ console.log(completion.choices[0]?.message?.content) The middleware supports the same configuration options as the AI SDK version: ```typescript -const openaiWithSupermemory = withSupermemory("user-123", { - conversationId: "conversation-456", // Group messages for contextual memory - mode: "full", // "profile" | "query" | "full" - addMemory: "always", // "always" | "never" - verbose: true, // Enable detailed logging -}) -``` - -#### Advanced Usage with Custom OpenAI Options - -You can also pass custom OpenAI client options: - -```typescript -import { withSupermemory } from "@supermemory/tools/openai" - -const openaiWithSupermemory = withSupermemory( - "user-123", - { - mode: "profile", - addMemory: "always", - }, - { - baseURL: "https://api.openai.com/v1", - organization: "org-123", - }, - "custom-api-key" // Optional: custom API key -) - -const completion = await openaiWithSupermemory.chat.completions.create({ - model: "gpt-4o-mini", - messages: [{ role: "user", content: "Tell me about my preferences" }], +const openaiWithSupermemory = withSupermemory(openai, { + containerTag: "user-123", // Required: identifies the user/container + customId: "conversation-456", // Required: groups messages for contextual memory + mode: "full", // "profile" | "query" | "full" + addMemory: "always", // "always" (default) | "never" + verbose: true, // Enable detailed logging }) ``` @@ -340,8 +316,9 @@ export async function POST(req: Request) { conversationId: string } - const openaiWithSupermemory = withSupermemory("user-123", { - conversationId, + const openaiWithSupermemory = withSupermemory(openai, { + containerTag: "user-123", + customId: conversationId, mode: "full", addMemory: "always", verbose: true, @@ -670,11 +647,11 @@ The `withSupermemory` middleware accepts a configuration object as the second ar ```typescript interface WithSupermemoryOptions { - containerTag: string - customId: string + containerTag: string // Required: identifies the user/container + customId: string // Required: groups messages into the same document verbose?: boolean mode?: "profile" | "query" | "full" - addMemory?: "always" | "never" + addMemory?: "always" | "never" // Default: "always" /** Optional Supermemory API key. Use this in browser environments. */ apiKey?: string baseUrl?: string @@ -687,7 +664,7 @@ interface WithSupermemoryOptions { - **customId**: Required. Custom ID to group messages into a single document for contextual memory generation - **verbose**: Enable detailed logging of memory search and injection process (default: false) - **mode**: Memory search mode - "profile" (default), "query", or "full" -- **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never") +- **addMemory**: Automatic memory storage mode - "always" (default) or "never" - **skipMemoryOnError**: If memory retrieval fails or hits the internal timeout, continue with the original prompt (default: true) ## Available Tools diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts index 17a37a9cb..8923b652c 100644 --- a/packages/tools/src/openai/index.ts +++ b/packages/tools/src/openai/index.ts @@ -15,12 +15,12 @@ import { * the instructions parameter (appends to existing or creates new instructions). * * @param openaiClient - The OpenAI client to wrap with SuperMemory middleware - * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) - * @param options - Optional configuration options for the middleware - * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation + * @param options - Configuration options for the middleware + * @param options.containerTag - Required. The container tag/identifier for memory search (e.g., user ID, project ID) + * @param options.customId - Required. Custom ID to group messages into a single document for contextual memory generation * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) * @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full" - * @param options.addMemory - Optional mode for memory addition: "always", "never" (default) + * @param options.addMemory - Optional mode for memory addition: "always" (default), "never" * * @returns An OpenAI client with SuperMemory middleware injected for both Chat Completions and Responses APIs * @@ -33,8 +33,9 @@ import { * const openai = new OpenAI({ * apiKey: process.env.OPENAI_API_KEY, * }) - * const openaiWithSupermemory = withSupermemory(openai, "user-123", { - * conversationId: "conversation-456", + * const openaiWithSupermemory = withSupermemory(openai, { + * containerTag: "user-123", + * customId: "conversation-456", * mode: "full", * addMemory: "always" * }) @@ -60,28 +61,37 @@ import { */ export function withSupermemory( openaiClient: OpenAI, - containerTag: string, - options?: OpenAIMiddlewareOptions, + options: OpenAIMiddlewareOptions, ) { if (!process.env.SUPERMEMORY_API_KEY) { throw new Error("SUPERMEMORY_API_KEY is not set") } - const conversationId = options?.conversationId - const verbose = options?.verbose ?? false - const mode = options?.mode ?? "profile" - const addMemory = options?.addMemory ?? "never" - const baseUrl = options?.baseUrl + if (!options.containerTag) { + throw new Error( + "containerTag is required — provide a non-empty string to identify the user/container", + ) + } + + if (!options.customId) { + throw new Error( + "customId is required — provide a non-empty string to group messages into a single document", + ) + } + + const { containerTag } = options + const verbose = options.verbose ?? false + const mode = options.mode ?? "profile" + const addMemory = options.addMemory ?? "always" const openaiWithSupermemory = createOpenAIMiddleware( openaiClient, containerTag, { - conversationId, + ...options, verbose, mode, addMemory, - baseUrl, }, ) diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts index bce986d0f..7fc4267e7 100644 --- a/packages/tools/src/openai/middleware.ts +++ b/packages/tools/src/openai/middleware.ts @@ -12,7 +12,10 @@ const normalizeBaseUrl = (url?: string): string => { } export interface OpenAIMiddlewareOptions { - conversationId?: string + /** Container tag/identifier for memory search (e.g., user ID, project ID). Required. */ + containerTag: string + /** Custom ID to group messages into a single document. Required. */ + customId: string verbose?: boolean mode?: "profile" | "query" | "full" addMemory?: "always" | "never" @@ -338,11 +341,13 @@ const addMemoryTool = async ( text: (c as { type: "text"; text: string }).text, })) : "", - ...((msg as any).name && { name: (msg as any).name }), - ...((msg as any).tool_calls && { tool_calls: (msg as any).tool_calls }), - ...((msg as any).tool_call_id && { - tool_call_id: (msg as any).tool_call_id, - }), + ...("name" in msg && msg.name && { name: msg.name }), + ...("tool_calls" in msg && + msg.tool_calls && { tool_calls: msg.tool_calls }), + ...("tool_call_id" in msg && + msg.tool_call_id && { + tool_call_id: msg.tool_call_id, + }), })) const response = await addConversation({ @@ -355,7 +360,7 @@ const addMemoryTool = async ( logger.info("Conversation saved successfully via /v4/conversations", { containerTag, - conversationId, + customId, messageCount: messages.length, responseId: response.id, }) @@ -391,7 +396,7 @@ const addMemoryTool = async ( * * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) * @param options - Optional configuration options for the middleware - * @param options.conversationId - Optional conversation ID to group messages for contextual memory generation + * @param options.customId - Optional conversation ID to group messages for contextual memory generation * @param options.verbose - Enable detailed logging of memory operations (default: false) * @param options.mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) (default: "profile") * @param options.addMemory - Automatic memory storage mode: "always" or "never" (default: "never") @@ -401,7 +406,7 @@ const addMemoryTool = async ( * @example * ```typescript * const openaiWithSupermemory = createOpenAIMiddleware(openai, "user-123", { - * conversationId: "conversation-456", + * customId: "conversation-456", * mode: "full", * addMemory: "always", * verbose: true @@ -421,9 +426,9 @@ export function createOpenAIMiddleware( ...(baseUrl !== "https://api.supermemory.ai" ? { baseURL: baseUrl } : {}), }) - const conversationId = options?.conversationId + const customId = options?.customId const mode = options?.mode ?? "profile" - const addMemory = options?.addMemory ?? "never" + const addMemory = options?.addMemory ?? "always" const originalCreate = openaiClient.chat.completions.create const originalResponsesCreate = openaiClient.responses?.create @@ -534,20 +539,18 @@ export function createOpenAIMiddleware( logger.info("Starting memory search for Responses API", { containerTag, - conversationId, + customId, mode, }) - const operations: Promise[] = [] + const operations: Promise[] = [] if (addMemory === "always" && input?.trim()) { - const content = conversationId ? `Input: ${input}` : input - const customId = conversationId - ? `conversation:${conversationId}` - : undefined + const content = customId ? `Input: ${input}` : input + const memoryCustomId = customId ? `conversation:${customId}` : undefined operations.push( - addMemoryTool(client, containerTag, content, customId, logger), + addMemoryTool(client, containerTag, content, memoryCustomId, logger), ) } @@ -590,28 +593,26 @@ export function createOpenAIMiddleware( logger.info("Starting memory search", { containerTag, - conversationId, + customId, mode, }) - const operations: Promise[] = [] + const operations: Promise[] = [] if (addMemory === "always") { const userMessage = getLastUserMessage(messages) if (userMessage?.trim()) { - const content = conversationId + const content = customId ? getConversationContent(messages) : userMessage - const customId = conversationId - ? `conversation:${conversationId}` - : undefined + const memoryCustomId = customId ? `conversation:${customId}` : undefined operations.push( addMemoryTool( client, containerTag, content, - customId, + memoryCustomId, logger, messages, process.env.SUPERMEMORY_API_KEY, diff --git a/packages/tools/test/chatapp/app/api/openai-chat/route.ts b/packages/tools/test/chatapp/app/api/openai-chat/route.ts index 16b53c235..64165be59 100644 --- a/packages/tools/test/chatapp/app/api/openai-chat/route.ts +++ b/packages/tools/test/chatapp/app/api/openai-chat/route.ts @@ -13,8 +13,9 @@ export async function POST(req: Request) { apiKey: process.env.OPENAI_API_KEY, }) - const openaiWithSupermemory = withSupermemory(openai, "user-123", { - conversationId, + const openaiWithSupermemory = withSupermemory(openai, { + containerTag: "user-123", + customId: conversationId, mode: "full", addMemory: "always", verbose: true, diff --git a/packages/tools/test/openai-responses-test.ts b/packages/tools/test/openai-responses-test.ts index 776e75740..e55abefe1 100644 --- a/packages/tools/test/openai-responses-test.ts +++ b/packages/tools/test/openai-responses-test.ts @@ -5,7 +5,9 @@ const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, }) -const openaiWithSupermemory = withSupermemory(openai, "user_id_life", { +const openaiWithSupermemory = withSupermemory(openai, { + containerTag: "user_id_life", + customId: "test-conversation", verbose: true, mode: "full", addMemory: "always", From 0104a2572c9b34b592be1a1e06539ec302d2370f Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Sat, 25 Apr 2026 01:43:14 +0000 Subject: [PATCH 5/6] update open ai python sdk (#879) --- packages/openai-sdk-python/README.md | 75 +++++++++------- .../src/supermemory_openai/middleware.py | 51 ++++++----- .../openai-sdk-python/test_integration.py | 17 ++-- .../tests/test_middleware.py | 85 +++++++++++-------- 4 files changed, 129 insertions(+), 99 deletions(-) diff --git a/packages/openai-sdk-python/README.md b/packages/openai-sdk-python/README.md index cd771a977..be121f9a6 100644 --- a/packages/openai-sdk-python/README.md +++ b/packages/openai-sdk-python/README.md @@ -44,11 +44,12 @@ async def main(): # Wrap with Supermemory middleware openai_with_memory = with_supermemory( openai, - container_tag="user-123", # Unique identifier for user's memories - options=OpenAIMiddlewareOptions( - mode="full", # "profile", "query", or "full" - verbose=True, # Enable logging - add_memory="always" # Automatically save conversations + OpenAIMiddlewareOptions( + container_tag="user-123", # Required: unique identifier for user's memories + custom_id="chat-123", # Required: groups messages into documents + mode="full", # "profile", "query", or "full" + verbose=True, # Enable logging + add_memory="always" # Automatically save conversations (default) ) ) @@ -122,7 +123,13 @@ from supermemory_openai import with_supermemory # Sync client openai = OpenAI(api_key="your-openai-api-key") -openai_with_memory = with_supermemory(openai, "user-123") +openai_with_memory = with_supermemory( + openai, + OpenAIMiddlewareOptions( + container_tag="user-123", + custom_id="session-456" + ) +) # Works the same way response = openai_with_memory.chat.completions.create( @@ -136,13 +143,21 @@ response = openai_with_memory.chat.completions.create( **Background Task Management**: When `add_memory="always"`, memory storage happens in background tasks. Use context managers or manual cleanup to ensure tasks complete: ```python +from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions + # Async context manager (recommended) -async with with_supermemory(openai, "user-123") as client: +async with with_supermemory( + openai, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456") +) as client: response = await client.chat.completions.create(...) # Background tasks automatically waited for on exit # Manual cleanup -client = with_supermemory(openai, "user-123") +client = with_supermemory( + openai, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456") +) response = await client.chat.completions.create(...) await client.wait_for_background_tasks() # Ensure memory is saved ``` @@ -159,8 +174,7 @@ Injects all static and dynamic profile memories into every request. Best for mai ```python openai_with_memory = with_supermemory( openai, - "user-123", - OpenAIMiddlewareOptions(mode="profile") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456", mode="profile") ) ``` @@ -170,8 +184,7 @@ Only searches for memories relevant to the current user message. More efficient ```python openai_with_memory = with_supermemory( openai, - "user-123", - OpenAIMiddlewareOptions(mode="query") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456", mode="query") ) ``` @@ -181,8 +194,7 @@ Combines both profile and query modes - includes all profile memories plus relev ```python openai_with_memory = with_supermemory( openai, - "user-123", - OpenAIMiddlewareOptions(mode="full") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456", mode="full") ) ``` @@ -191,11 +203,11 @@ openai_with_memory = with_supermemory( Control when conversations are automatically saved as memories: ```python -# Always save conversations as memories -OpenAIMiddlewareOptions(add_memory="always") +# Always save conversations as memories (default in v2.0.0+) +OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456", add_memory="always") -# Never save conversations (default) -OpenAIMiddlewareOptions(add_memory="never") +# Never save conversations +OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456", add_memory="never") ``` ### Complete Configuration Example @@ -205,12 +217,12 @@ from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions openai_with_memory = with_supermemory( openai_client, - container_tag="user-123", - options=OpenAIMiddlewareOptions( - conversation_id="chat-session-456", # Group messages into conversations - verbose=True, # Enable detailed logging - mode="full", # Use both profile and query - add_memory="always" # Auto-save conversations + OpenAIMiddlewareOptions( + container_tag="user-123", # Required: unique user/container identifier + custom_id="chat-session-456", # Required: groups messages into documents + verbose=True, # Enable detailed logging + mode="full", # Use both profile and query + add_memory="always" # Auto-save conversations (default) ) ) ``` @@ -291,14 +303,12 @@ Wraps an OpenAI client with automatic memory injection middleware. ```python def with_supermemory( openai_client: Union[OpenAI, AsyncOpenAI], - container_tag: str, - options: Optional[OpenAIMiddlewareOptions] = None + options: OpenAIMiddlewareOptions ) -> Union[OpenAI, AsyncOpenAI] ``` **Parameters:** - `openai_client`: OpenAI or AsyncOpenAI client instance -- `container_tag`: Unique identifier for memory storage (e.g., user ID) - `options`: Configuration options (see `OpenAIMiddlewareOptions`) #### `OpenAIMiddlewareOptions` @@ -308,10 +318,11 @@ Configuration dataclass for middleware behavior. ```python @dataclass class OpenAIMiddlewareOptions: - conversation_id: Optional[str] = None # Group messages into conversations + container_tag: str # Required: unique identifier for memory storage + custom_id: str # Required: groups messages into documents verbose: bool = False # Enable detailed logging mode: Literal["profile", "query", "full"] = "profile" # Memory injection mode - add_memory: Literal["always", "never"] = "never" # Auto-save behavior + add_memory: Literal["always", "never"] = "always" # Auto-save behavior ``` ### SupermemoryTools @@ -341,6 +352,7 @@ The package provides specific exception types for better error handling: ```python from supermemory_openai import ( with_supermemory, + OpenAIMiddlewareOptions, SupermemoryConfigurationError, SupermemoryAPIError, SupermemoryNetworkError, @@ -349,7 +361,10 @@ from supermemory_openai import ( try: # This will raise SupermemoryConfigurationError if API key is missing - client = with_supermemory(openai_client, "user-123") + client = with_supermemory( + openai_client, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="session-456") + ) response = await client.chat.completions.create( messages=[{"role": "user", "content": "Hello"}], diff --git a/packages/openai-sdk-python/src/supermemory_openai/middleware.py b/packages/openai-sdk-python/src/supermemory_openai/middleware.py index 12100065c..db289cc89 100644 --- a/packages/openai-sdk-python/src/supermemory_openai/middleware.py +++ b/packages/openai-sdk-python/src/supermemory_openai/middleware.py @@ -1,6 +1,7 @@ """Supermemory middleware for OpenAI clients.""" import asyncio +import inspect import os from dataclasses import dataclass from typing import Any, Literal, Optional, Union, cast @@ -32,10 +33,11 @@ class OpenAIMiddlewareOptions: """Configuration options for OpenAI middleware.""" - conversation_id: Optional[str] = None + container_tag: str # Required: identifies the user/container + custom_id: str # Required: groups messages into the same document verbose: bool = False mode: Literal["profile", "query", "full"] = "profile" - add_memory: Literal["always", "never"] = "never" + add_memory: Literal["always", "never"] = "always" class SupermemoryProfileSearch: @@ -225,11 +227,11 @@ async def add_memory_tool( add_params["custom_id"] = custom_id # Handle both sync and async supermemory clients - try: - response = await client.add(**add_params) - except TypeError: - # If it's not awaitable, call it synchronously - response = client.add(**add_params) + result = client.add(**add_params) + if inspect.isawaitable(result): + response = await result + else: + response = result logger.info( "Memory saved successfully", @@ -262,12 +264,11 @@ class SupermemoryOpenAIWrapper: def __init__( self, openai_client: Union[OpenAI, AsyncOpenAI], - container_tag: str, - options: Optional[OpenAIMiddlewareOptions] = None, + options: OpenAIMiddlewareOptions, ): self._client: Union[OpenAI, AsyncOpenAI] = openai_client - self._container_tag: str = container_tag - self._options: OpenAIMiddlewareOptions = options or OpenAIMiddlewareOptions() + self._container_tag: str = options.container_tag + self._options: OpenAIMiddlewareOptions = options self._logger: Logger = create_logger(self._options.verbose) # Track background tasks to ensure they complete @@ -336,12 +337,12 @@ async def _create_with_memory_async( if user_message and user_message.strip(): content = ( get_conversation_content(messages) - if self._options.conversation_id + if self._options.custom_id else user_message ) custom_id = ( - f"conversation:{self._options.conversation_id}" - if self._options.conversation_id + f"conversation:{self._options.custom_id}" + if self._options.custom_id else None ) @@ -399,7 +400,7 @@ def handle_task_exception(task_obj): "Starting memory search", { "container_tag": self._container_tag, - "conversation_id": self._options.conversation_id, + "conversation_id": self._options.custom_id, "mode": self._options.mode, }, ) @@ -430,12 +431,12 @@ def _create_with_memory_sync( if user_message and user_message.strip(): content = ( get_conversation_content(messages) - if self._options.conversation_id + if self._options.custom_id else user_message ) custom_id = ( - f"conversation:{self._options.conversation_id}" - if self._options.conversation_id + f"conversation:{self._options.custom_id}" + if self._options.custom_id else None ) @@ -483,7 +484,7 @@ def _create_with_memory_sync( "Starting memory search", { "container_tag": self._container_tag, - "conversation_id": self._options.conversation_id, + "conversation_id": self._options.custom_id, "mode": self._options.mode, }, ) @@ -617,8 +618,7 @@ def __getattr__(self, name: str) -> Any: def with_supermemory( openai_client: Union[OpenAI, AsyncOpenAI], - container_tag: str, - options: Optional[OpenAIMiddlewareOptions] = None, + options: OpenAIMiddlewareOptions, ) -> Union[OpenAI, AsyncOpenAI]: """ Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories @@ -630,8 +630,7 @@ def with_supermemory( Args: openai_client: The OpenAI client to wrap with SuperMemory middleware - container_tag: The container tag/identifier for memory search (e.g., user ID, project ID) - options: Optional configuration options for the middleware + options: Configuration options for the middleware (container_tag and custom_id are required) Returns: An OpenAI client with SuperMemory middleware injected @@ -645,9 +644,9 @@ def with_supermemory( openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) openai_with_supermemory = with_supermemory( openai, - "user-123", OpenAIMiddlewareOptions( - conversation_id="conversation-456", + container_tag="user-123", + custom_id="conversation-456", mode="full", add_memory="always" ) @@ -666,6 +665,6 @@ def with_supermemory( ValueError: When SUPERMEMORY_API_KEY environment variable is not set Exception: When supermemory API request fails """ - wrapper = SupermemoryOpenAIWrapper(openai_client, container_tag, options) + wrapper = SupermemoryOpenAIWrapper(openai_client, options) # Return the wrapper, which delegates all attributes to the original client return cast(Union[OpenAI, AsyncOpenAI], wrapper) diff --git a/packages/openai-sdk-python/test_integration.py b/packages/openai-sdk-python/test_integration.py index 9bf5880cf..a3580e7ef 100644 --- a/packages/openai-sdk-python/test_integration.py +++ b/packages/openai-sdk-python/test_integration.py @@ -37,8 +37,9 @@ async def test_async_middleware(): # Wrap with Supermemory middleware openai_with_memory = with_supermemory( openai_client, - container_tag="test-user-123", - options=OpenAIMiddlewareOptions( + OpenAIMiddlewareOptions( + container_tag="test-user-123", + custom_id="test-integration", mode="profile", verbose=True, add_memory="never" # Don't save test messages @@ -87,8 +88,9 @@ def test_sync_middleware(): # Wrap with Supermemory middleware openai_with_memory = with_supermemory( openai_client, - container_tag="test-user-sync-123", - options=OpenAIMiddlewareOptions( + OpenAIMiddlewareOptions( + container_tag="test-user-sync-123", + custom_id="test-integration-sync", mode="profile", verbose=True ) @@ -126,7 +128,7 @@ def test_error_handling(): openai_client = OpenAI(api_key="fake-key") # This should raise SupermemoryConfigurationError - with_supermemory(openai_client, "test-user") + with_supermemory(openai_client, OpenAIMiddlewareOptions(container_tag="test-user", custom_id="test-conv")) print("❌ Should have raised SupermemoryConfigurationError") @@ -156,8 +158,9 @@ def test_background_tasks(): # Wrap with memory storage enabled wrapped_client = with_supermemory( openai_client, - container_tag="test-background-tasks", - options=OpenAIMiddlewareOptions( + OpenAIMiddlewareOptions( + container_tag="test-background-tasks", + custom_id="test-background", add_memory="always", verbose=True ) diff --git a/packages/openai-sdk-python/tests/test_middleware.py b/packages/openai-sdk-python/tests/test_middleware.py index a9f73af1b..b1dd80726 100644 --- a/packages/openai-sdk-python/tests/test_middleware.py +++ b/packages/openai-sdk-python/tests/test_middleware.py @@ -103,26 +103,32 @@ class TestMiddlewareInitialization: def test_with_supermemory_basic(self, mock_openai_client): """Test basic middleware initialization.""" with patch.dict(os.environ, {"SUPERMEMORY_API_KEY": "test-key"}): - wrapped_client = with_supermemory(mock_openai_client, "user-123") + options = OpenAIMiddlewareOptions( + container_tag="user-123", + custom_id="test-conv" + ) + wrapped_client = with_supermemory(mock_openai_client, options) assert isinstance(wrapped_client, SupermemoryOpenAIWrapper) assert wrapped_client._container_tag == "user-123" assert wrapped_client._options.mode == "profile" assert wrapped_client._options.verbose is False + assert wrapped_client._options.add_memory == "always" # New default def test_with_supermemory_with_options(self, mock_openai_client): """Test middleware initialization with options.""" options = OpenAIMiddlewareOptions( - conversation_id="conv-456", + container_tag="user-123", + custom_id="conv-456", verbose=True, mode="full", add_memory="always" ) with patch.dict(os.environ, {"SUPERMEMORY_API_KEY": "test-key"}): - wrapped_client = with_supermemory(mock_openai_client, "user-123", options) + wrapped_client = with_supermemory(mock_openai_client, options) - assert wrapped_client._options.conversation_id == "conv-456" + assert wrapped_client._options.custom_id == "conv-456" assert wrapped_client._options.verbose is True assert wrapped_client._options.mode == "full" assert wrapped_client._options.add_memory == "always" @@ -132,15 +138,23 @@ def test_missing_api_key_raises_error(self, mock_openai_client): from supermemory_openai.exceptions import SupermemoryConfigurationError with patch.dict(os.environ, {}, clear=True): + options = OpenAIMiddlewareOptions( + container_tag="user-123", + custom_id="test-conv" + ) with pytest.raises(SupermemoryConfigurationError, match="SUPERMEMORY_API_KEY"): - with_supermemory(mock_openai_client, "user-123") + with_supermemory(mock_openai_client, options) def test_wrapper_delegates_attributes(self, mock_openai_client): """Test that wrapper delegates attributes to wrapped client.""" mock_openai_client.models = Mock() with patch.dict(os.environ, {"SUPERMEMORY_API_KEY": "test-key"}): - wrapped_client = with_supermemory(mock_openai_client, "user-123") + options = OpenAIMiddlewareOptions( + container_tag="user-123", + custom_id="test-conv" + ) + wrapped_client = with_supermemory(mock_openai_client, options) # Should delegate to the original client assert wrapped_client.models is mock_openai_client.models @@ -165,8 +179,7 @@ async def test_memory_injection_profile_mode( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(mode="profile") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", mode="profile") ) messages = [ @@ -207,8 +220,7 @@ async def test_memory_injection_query_mode( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(mode="query") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", mode="query") ) messages = [ @@ -241,8 +253,7 @@ async def test_memory_injection_full_mode( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(mode="full") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", mode="full") ) messages = [ @@ -278,7 +289,10 @@ async def test_existing_system_prompt_enhancement( mock_search.return_value.profile = mock_supermemory_response["profile"] mock_search.return_value.search_results = mock_supermemory_response["searchResults"] - wrapped_client = with_supermemory(mock_async_openai_client, "user-123") + wrapped_client = with_supermemory( + mock_async_openai_client, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv") + ) messages = [ {"role": "system", "content": "You are a helpful assistant."}, @@ -324,8 +338,7 @@ async def test_add_memory_always_mode( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) messages = [ @@ -359,8 +372,7 @@ async def test_add_memory_never_mode( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="never") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="never") ) await wrapped_client.chat.completions.create( @@ -386,7 +398,10 @@ def test_sync_client_compatibility(self, mock_openai_client, mock_openai_respons mock_search.return_value.profile = {"static": [], "dynamic": []} mock_search.return_value.search_results = {"results": []} - wrapped_client = with_supermemory(mock_openai_client, "user-123") + wrapped_client = with_supermemory( + mock_openai_client, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv") + ) # This should work for sync clients too wrapped_client.chat.completions.create( @@ -410,7 +425,10 @@ async def test_in_async(): mock_search.return_value.profile = {"static": [], "dynamic": []} mock_search.return_value.search_results = {"results": []} - wrapped_client = with_supermemory(mock_openai_client, "user-123") + wrapped_client = with_supermemory( + mock_openai_client, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv") + ) # This should work even when called from async context result = wrapped_client.chat.completions.create( @@ -441,8 +459,7 @@ def test_sync_client_memory_addition_error_handling(self, mock_openai_client, mo wrapped_client = with_supermemory( mock_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) # Should not raise exception, should continue with main request @@ -470,7 +487,10 @@ async def test_supermemory_api_error_handling( with patch("supermemory_openai.middleware.supermemory_profile_search") as mock_search: mock_search.side_effect = Exception("API Error") - wrapped_client = with_supermemory(mock_async_openai_client, "user-123") + wrapped_client = with_supermemory( + mock_async_openai_client, + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv") + ) # Should not raise exception, should fall back gracefully with pytest.raises(Exception): @@ -490,8 +510,7 @@ async def test_no_user_message_handling( with patch.dict(os.environ, {"SUPERMEMORY_API_KEY": "test-key"}): wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(mode="query") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", mode="query") ) messages = [ @@ -529,8 +548,7 @@ async def test_verbose_logging( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(verbose=True) + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", verbose=True) ) await wrapped_client.chat.completions.create( @@ -558,8 +576,7 @@ async def test_silent_logging( wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(verbose=False) + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", verbose=False) ) await wrapped_client.chat.completions.create( @@ -597,8 +614,7 @@ async def slow_add_memory(*args, **kwargs): wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) # Make a request that should create a background task @@ -644,8 +660,7 @@ async def slow_add_memory(*args, **kwargs): # Use async context manager async with with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) as wrapped_client: await wrapped_client.chat.completions.create( model="gpt-4", @@ -680,8 +695,7 @@ async def hanging_add_memory(*args, **kwargs): wrapped_client = with_supermemory( mock_async_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) await wrapped_client.chat.completions.create( @@ -714,8 +728,7 @@ def test_sync_context_manager_cleanup( # Use sync context manager with with_supermemory( mock_openai_client, - "user-123", - OpenAIMiddlewareOptions(add_memory="always") + OpenAIMiddlewareOptions(container_tag="user-123", custom_id="test-conv", add_memory="always") ) as wrapped_client: wrapped_client.chat.completions.create( model="gpt-4", From 5ecc3349890d3848898b4f6e6c2f9b078e2d53da Mon Sep 17 00:00:00 2001 From: sreedharsreeram <141047751+sreedharsreeram@users.noreply.github.com> Date: Sat, 25 Apr 2026 01:45:54 +0000 Subject: [PATCH 6/6] Voltagent fixes (#882) --- packages/tools/package.json | 2 +- packages/tools/src/voltagent/middleware.ts | 12 +++++++++++- packages/tools/src/voltagent/types.ts | 17 +++++++++++++++-- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/packages/tools/package.json b/packages/tools/package.json index cfaf5fd8b..b69ac16f7 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.4.6", + "version": "1.4.7", "description": "Memory tools for AI SDK, OpenAI, Voltagent and Mastra with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/voltagent/middleware.ts b/packages/tools/src/voltagent/middleware.ts index 3618b2412..bf7717265 100644 --- a/packages/tools/src/voltagent/middleware.ts +++ b/packages/tools/src/voltagent/middleware.ts @@ -90,6 +90,7 @@ export const createSupermemoryContext = ( metadata, searchMode, entityContext, + verbose = false, } = options // Runtime validation: customId is required @@ -99,7 +100,7 @@ export const createSupermemoryContext = ( ) } - const logger = createLogger(false) // VoltAgent SDK doesn't use verbose + const logger = createLogger(verbose) const normalizedBaseUrl = normalizeBaseUrl(baseUrl) const client = new Supermemory({ @@ -243,6 +244,15 @@ export const enhanceMessagesWithMemories = async ( ctx.include !== undefined || ctx.searchMode !== undefined + // Warn if advanced search params are set but mode is "profile" + // Profile mode only fetches static/dynamic user data, not query-based search + if (useAdvancedSearch && ctx.mode === "profile") { + ctx.logger.warn( + "Advanced search parameters (threshold, limit, rerank, rewriteQuery, filters, include, searchMode) " + + 'are ignored when mode is "profile". Use mode "query" or "full" to enable advanced search.', + ) + } + let memories: string if (useAdvancedSearch && ctx.mode !== "profile") { diff --git a/packages/tools/src/voltagent/types.ts b/packages/tools/src/voltagent/types.ts index 9ee9ebc74..cc5350eac 100644 --- a/packages/tools/src/voltagent/types.ts +++ b/packages/tools/src/voltagent/types.ts @@ -17,8 +17,7 @@ import type { * Configuration options for the Supermemory VoltAgent integration. * Extends base options with VoltAgent-specific settings. */ -export interface SupermemoryVoltAgent - extends Omit { +export interface SupermemoryVoltAgent extends SupermemoryBaseOptions { /** * Custom ID to group messages into a single document. * Ensures related messages are added to the same document for that conversation. @@ -29,34 +28,46 @@ export interface SupermemoryVoltAgent * Threshold / sensitivity for memory selection. 0 is least sensitive (returns * most memories, more results), 1 is most sensitive (returns fewer memories, * more accurate results). Default: 0.1 + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ threshold?: number /** * Maximum number of memory results to return. Default: 10 + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ limit?: number /** * If true, rerank the results based on the query. This helps ensure the most * relevant results are returned. Default: false + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ rerank?: boolean /** * If true, rewrites the query to make it easier to find memories. This increases * latency by about 400ms. Default: false + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ rewriteQuery?: boolean /** * Advanced filters to apply to the search using AND/OR logic. * Example: { OR: [{ metadata: { type: "note" } }, { metadata: { type: "conversation" } }] } + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ filters?: SearchFilters /** * Control what additional data to include in search results + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ include?: IncludeOptions @@ -71,6 +82,8 @@ export interface SupermemoryVoltAgent * - "memories": Search only memory entries (atomic facts) * - "documents": Search only document chunks * - "hybrid": Search both memories AND document chunks (recommended) + * + * Note: Only effective when mode is "query" or "full". Ignored in "profile" mode. */ searchMode?: "memories" | "documents" | "hybrid"