Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions apps/docs/integrations/ai-sdk.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,18 @@ const model = withSupermemory(openai("gpt-4"), "user-123", {
// Console output shows memory retrieval details
```

### When Supermemory errors (optional: continue without memories)

If the Supermemory API returns an error (or is unreachable), memory retrieval fails before the LLM runs. By default that error **propagates** (fails the call).

To continue the LLM request **without** injected memories instead, opt in with `skipMemoryOnError: true`. Use `verbose: true` if you want console output when that happens.

```typescript
const model = withSupermemory(openai("gpt-5"), "user-123", {
skipMemoryOnError: true
})
```

---

## Memory Tools
Expand Down
2 changes: 1 addition & 1 deletion packages/tools/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
"version": "1.4.4",
"version": "1.4.5",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
Expand Down
73 changes: 62 additions & 11 deletions packages/tools/src/vercel/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ interface WrapVercelLanguageModelOptions {
* ```
*/
promptTemplate?: PromptTemplate
/**
* When Supermemory memory retrieval / injection fails:
* - `false` (default): propagate the error.
* - `true`: log and call the base model with the original prompt (no memories).
*/
skipMemoryOnError?: boolean
}

/**
Expand All @@ -72,6 +78,7 @@ interface WrapVercelLanguageModelOptions {
* @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never")
* @param options.apiKey - Optional Supermemory API key to use instead of the environment variable
* @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai")
* @param options.skipMemoryOnError - When memory retrieval fails: `false` (default) throws; `true` continues without injected memories
*
* @returns A wrapped language model that automatically includes relevant memories in prompts
*
Expand All @@ -93,7 +100,7 @@ interface WrapVercelLanguageModelOptions {
* ```
*
* @throws {Error} When neither `options.apiKey` nor `process.env.SUPERMEMORY_API_KEY` are set
* @throws {Error} When supermemory API request fails
* @throws {Error} When supermemory memory retrieval fails unless `skipMemoryOnError` is `true`
*/
const wrapVercelLanguageModel = <T extends LanguageModel>(
model: T,
Expand All @@ -119,19 +126,42 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
promptTemplate: options?.promptTemplate,
})

const skipMemoryOnError = options?.skipMemoryOnError ?? false

// Proxy keeps prototype/getter fields (e.g. provider, modelId) that `{ ...model }` drops.
return new Proxy(model, {
get(target, prop, receiver) {
if (prop === "doGenerate") {
return async (params: LanguageModelCallOptions) => {
let modelParams: LanguageModelCallOptions = params
try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)
modelParams = await transformParamsWithMemory(params, ctx)
} catch (memoryError) {
if (skipMemoryOnError) {
ctx.logger.warn(
"Supermemory retrieval failed; continuing without injected memories",
{
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
},
)
modelParams = params
} else {
ctx.logger.error("Error during memory retrieval for generation", {
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
})
throw memoryError
}
}

try {
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
const result = await target.doGenerate(transformedParams as any)
const result = await target.doGenerate(modelParams as any)

const userMessage = getLastUserMessage(params)
if (
Expand Down Expand Up @@ -168,15 +198,36 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
return async (params: LanguageModelCallOptions) => {
let generatedText = ""

let modelParams: LanguageModelCallOptions = params
try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)
modelParams = await transformParamsWithMemory(params, ctx)
} catch (memoryError) {
if (skipMemoryOnError) {
ctx.logger.warn(
"Supermemory retrieval failed; continuing without injected memories",
{
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
},
)
modelParams = params
} else {
ctx.logger.error("Error during memory retrieval for stream", {
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
})
throw memoryError
}
}

try {
const { stream, ...rest } = await target.doStream(
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
transformedParams as any,
modelParams as any,
)

const transformStream = new TransformStream<
Expand Down
Loading