diff --git a/apps/docs/docs.json b/apps/docs/docs.json index 0626e5639..0f7b54c03 100644 --- a/apps/docs/docs.json +++ b/apps/docs/docs.json @@ -144,6 +144,28 @@ "pages": ["supermemory-mcp/claude-desktop"] } ] + }, + { + "anchor": "SMFS", + "icon": "database", + "pages": [ + "smfs/overview", + "smfs/install", + "smfs/mount", + "smfs/bash-tool", + "smfs/bash-tool-python", + { + "group": "Providers", + "icon": "cloud", + "pages": [ + "smfs/providers/daytona", + "smfs/providers/e2b", + "smfs/providers/vercel", + "smfs/providers/cloudflare" + ] + }, + "smfs/examples" + ] } ], "tab": "Developer Platform" diff --git a/apps/docs/smfs/bash-tool-python.mdx b/apps/docs/smfs/bash-tool-python.mdx new file mode 100644 index 000000000..610928130 --- /dev/null +++ b/apps/docs/smfs/bash-tool-python.mdx @@ -0,0 +1,252 @@ +--- +title: "Bash Tool (Python)" +sidebarTitle: "Bash Tool (Python)" +description: "supermemory-bash. The SMFS idea wrapped as a single agent tool, for Python agents and serverless runtimes." +icon: "terminal" +--- + +`supermemory-bash` is the SMFS idea wrapped as a single agent tool: `run_bash(command)`. The "filesystem" is your Supermemory container. Runs anywhere Python runs. AWS Lambda, Modal, Fly Machines, Cloud Run, your laptop. No mount, no FUSE, no local disk. + +Reach for the Bash Tool when your agent runs somewhere it can't mount a real filesystem. + +## Install + +```bash +pip install supermemory-bash +``` + +Or with uv: + +```bash +uv add supermemory-bash +``` + +## Quickstart + +```python +import asyncio +import os +from supermemory_bash import create_bash + + +async def main() -> None: + result = await create_bash( + api_key=os.environ["SUPERMEMORY_API_KEY"], + container_tag="user_42", + ) + bash = result.bash + r = await bash.exec("ls /") + print(r.stdout) + + +asyncio.run(main()) +``` + +`create_bash` returns a `CreateBashResult` with: + +- `bash`: a `Shell` instance with `.exec(cmd)` +- `tool_description`: a pre-written tool description ready to hand to the model +- `configure_memory_paths(paths)`: scope which paths get extracted into Supermemory +- `refresh()`: re-prime the path index after external writes + +## Use it as a model tool + +### Anthropic SDK + +Pass `tool_description` straight into Claude's tool definition and run a normal agent loop. Each `tool_use` block calls `bash.exec` and the result goes back as a `tool_result`. + +```python +import asyncio +import os + +import anthropic +from supermemory_bash import create_bash + + +async def run_agent(user_message: str) -> str: + result = await create_bash( + api_key=os.environ["SUPERMEMORY_API_KEY"], + container_tag="user_42", + ) + bash = result.bash + + client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) + tools = [ + { + "name": "bash", + "description": result.tool_description, + "input_schema": { + "type": "object", + "properties": { + "cmd": {"type": "string", "description": "The bash command to run."} + }, + "required": ["cmd"], + }, + } + ] + + messages = [{"role": "user", "content": user_message}] + + for _ in range(10): + response = client.messages.create( + model="claude-sonnet-4-20250514", + max_tokens=4096, + tools=tools, + messages=messages, + ) + + if response.stop_reason == "end_turn": + for block in response.content: + if hasattr(block, "text"): + return block.text + return "" + + messages.append({"role": "assistant", "content": response.content}) + tool_results = [] + for block in response.content: + if block.type == "tool_use": + cmd = block.input.get("cmd", "") + r = await bash.exec(cmd) + output = r.stdout + if r.stderr: + output += f"\n[stderr]: {r.stderr}" + if r.exit_code != 0: + output += f"\n[exit_code]: {r.exit_code}" + tool_results.append( + { + "type": "tool_result", + "tool_use_id": block.id, + "content": output or "(no output)", + } + ) + messages.append({"role": "user", "content": tool_results}) + + return "(max steps reached)" + + +asyncio.run(run_agent("What's in my notes about the Q3 launch?")) +``` + +### OpenAI SDK + +Same idea with OpenAI's function-calling format. Define a single `bash` function, dispatch each `tool_calls` entry to `bash.exec`, and feed the output back as a `tool` message. + +```python +import asyncio +import json +import os + +from openai import OpenAI +from supermemory_bash import create_bash + + +async def run_agent(user_message: str) -> str: + result = await create_bash( + api_key=os.environ["SUPERMEMORY_API_KEY"], + container_tag="user_42", + ) + bash = result.bash + + client = OpenAI() + tools = [ + { + "type": "function", + "function": { + "name": "bash", + "description": result.tool_description, + "parameters": { + "type": "object", + "properties": {"cmd": {"type": "string"}}, + "required": ["cmd"], + }, + }, + } + ] + + messages = [{"role": "user", "content": user_message}] + + for _ in range(10): + response = client.chat.completions.create( + model="gpt-4o", + messages=messages, + tools=tools, + ) + message = response.choices[0].message + + if not message.tool_calls: + return message.content or "" + + messages.append(message.model_dump(exclude_none=True)) + for call in message.tool_calls: + args = json.loads(call.function.arguments or "{}") + r = await bash.exec(args.get("cmd", "")) + output = r.stdout + if r.stderr: + output += f"\n[stderr]: {r.stderr}" + if r.exit_code != 0: + output += f"\n[exit_code]: {r.exit_code}" + messages.append( + { + "role": "tool", + "tool_call_id": call.id, + "content": output or "(no output)", + } + ) + + return "(max steps reached)" + + +asyncio.run(run_agent("List my notes")) +``` + +### Claude Agent SDK + +The [Claude Agent SDK](https://docs.claude.com/en/api/agent-sdk/overview) ships with built-in `Bash`, `Read`, and `Write` tools. If your agent runs somewhere SMFS can be mounted (a long-lived process on macOS or Linux), point those built-ins at an SMFS mount and you don't need `supermemory-bash` at all — the agent just sees your container as a directory. + +See [Mount SMFS](/smfs/mount) for setup, or the [provider guides](/smfs/overview#use-smfs-with-your-sandbox-provider) for sandbox-specific instructions. + +## Memory + +The Bash Tool inherits SMFS memory semantics. By default, files named `user.md` or `memory.md` are extracted as memories. Configure additional memory paths after construction: + +```python +result = await create_bash(api_key=api_key, container_tag=container_tag) +bash = result.bash +await result.configure_memory_paths(["/notes/", "/journal.md"]) +``` + +Trailing `/` matches recursively. No slash matches an exact file. Pass `[]` to disable memory generation. + +The container also exposes a virtual `profile.md` at the root: a live digest of everything in the container. Read it once at the start of a session to give the model context without walking every file. + +```python +r = await bash.exec("cat /profile.md") +print(r.stdout) +``` + +## Commands the agent can run + +The Python tool exposes the same command surface as the TypeScript version: standard Unix builtins (`pwd`, `cd`, `ls`, `cat`, `stat`, `mkdir`, `rm`, `mv`, `cp`, `echo`), search and text utilities (`grep`, `find`, `head`, `tail`, `wc`, `sort`, `sed`, `awk`), plus the custom `sgrep [path]` for semantic search across the container. Pipes, redirects, conditionals, loops, and file tests all work. + +See the [TypeScript Bash Tool reference](/smfs/bash-tool#commands-the-agent-can-run) for the full list. + +## Configuration + +| Option | Default | Purpose | +| --- | --- | --- | +| `api_key` | required | Supermemory API key | +| `container_tag` | required | Container to expose as the filesystem | +| `base_url` | `None` | Override the API endpoint | +| `eager_load` | `True` | Warm the path index when the instance starts | +| `eager_content` | `True` | Also warm the content cache during eager load | +| `cwd` | `"/home/user"` | Initial working directory | +| `env` | `None` | Extra environment variables | +| `cache_ttl_ms` | `150_000` | Content cache TTL in ms. `None` = never expires (single-writer). `0` = no cache. | + +The container is what defines the filesystem; setting `cwd` or extra `env` from the host doesn't change the files the agent sees. + +## Limitations + +- `chmod`, `utimes`, and symlinks (`ln -s`, `readlink`) raise `ENOSYS`. +- `/dev/null` as a redirect target isn't supported. Write to `/tmp/discard.log` instead. +- Binary uploads aren't supported. Text is extracted server-side. diff --git a/apps/docs/smfs/bash-tool.mdx b/apps/docs/smfs/bash-tool.mdx new file mode 100644 index 000000000..19aadd600 --- /dev/null +++ b/apps/docs/smfs/bash-tool.mdx @@ -0,0 +1,203 @@ +--- +title: "Bash Tool" +sidebarTitle: "Bash Tool" +description: "@supermemory/bash. The SMFS idea wrapped as a single agent tool, for serverless and edge runtimes." +icon: "terminal" +--- + +`@supermemory/bash` is the SMFS idea wrapped as a single agent tool: `run_bash(command)`. The "filesystem" is your Supermemory container. Runs anywhere TypeScript runs. Cloudflare Workers, AWS Lambda, Vercel, Node, the browser. No mount, no FUSE, no local disk. + +Reach for the Bash Tool when your agent runs somewhere it can't mount a real filesystem. + +## Install + +```bash +npm install @supermemory/bash +``` + +Or with bun: + +```bash +bun add @supermemory/bash +``` + +## Quickstart + +```typescript +import { createBash } from "@supermemory/bash"; + +const { bash, toolDescription } = await createBash({ + apiKey: process.env.SUPERMEMORY_API_KEY!, + containerTag: "user_42", +}); + +const result = await bash.exec("ls /"); +console.log(result.stdout); +``` + +`createBash` returns: + +- `bash`: the instance with `.exec(cmd)` +- `toolDescription`: a pre-written tool description ready to hand to the model +- `configureMemoryPaths(paths)`: scope which paths get extracted into Supermemory +- `refresh()`: re-prime the path index after external writes + +## Use it as a model tool + +### Vercel AI SDK + +```typescript +import { generateText, tool } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const { bash, toolDescription } = await createBash({ + apiKey: process.env.SUPERMEMORY_API_KEY!, + containerTag: "user_42", +}); + +const result = await generateText({ + model: openai("gpt-4o"), + tools: { + bash: tool({ + description: toolDescription, + inputSchema: z.object({ cmd: z.string() }), + execute: async ({ cmd }) => bash.exec(cmd), + }), + }, + prompt: "What's in my notes about the Q3 launch?", +}); +``` + +### Anthropic SDK + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY! }); +const { bash, toolDescription } = await createBash({ + apiKey: process.env.SUPERMEMORY_API_KEY!, + containerTag: "user_42", +}); + +const response = await client.messages.create({ + model: "claude-opus-4-7", + max_tokens: 4096, + tools: [ + { + name: "bash", + description: toolDescription, + input_schema: { + type: "object", + properties: { cmd: { type: "string" } }, + required: ["cmd"], + }, + }, + ], + messages: [{ role: "user", content: "List my notes" }], +}); +``` + +### OpenAI SDK + +```typescript +import OpenAI from "openai"; + +const client = new OpenAI(); +const { bash, toolDescription } = await createBash({ + apiKey: process.env.SUPERMEMORY_API_KEY!, + containerTag: "user_42", +}); + +const response = await client.chat.completions.create({ + model: "gpt-4o", + messages: [{ role: "user", content: "List my notes" }], + tools: [ + { + type: "function", + function: { + name: "bash", + description: toolDescription, + parameters: { + type: "object", + properties: { cmd: { type: "string" } }, + required: ["cmd"], + }, + }, + }, + ], +}); +``` + +## Memory + +The Bash Tool inherits SMFS memory semantics. By default, files named `user.md` or `memory.md` are extracted as memories. Configure additional memory paths after construction: + +```typescript +const { configureMemoryPaths } = await createBash({ apiKey, containerTag }); + +await configureMemoryPaths(["/notes/", "/journal.md"]); +``` + +Trailing `/` matches recursively. No slash matches an exact file. Pass `[]` to disable memory generation. + +The container also exposes a virtual `profile.md` at the root: a live digest of everything in the container. Read it once at the start of a session to give the model context without walking every file. + +```typescript +const { stdout } = await bash.exec("cat /profile.md"); +``` + +## Commands the agent can run + +Standard Unix surface, plus one custom command. Each does what you'd expect. + +### Filesystem + +- `pwd`: print working directory +- `cd`: change directory +- `ls`, `ls -la`: list +- `cat`: read a file +- `stat`: file metadata +- `mkdir`: create directory +- `rm`, `rm -rf`: delete +- `rmdir`: delete empty directory +- `mv`: move or rename +- `cp`: copy +- `echo`: write or append (`echo "x" > file`, `echo "x" >> file`) + +### Search and text + +- `grep`: literal substring match against a known path +- `sgrep [path]`: **semantic** search across the container. Trailing `/` on path scopes to a directory. No path searches everything. +- `find`: search by name or properties +- `head`, `tail`: first or last N lines +- `wc`: word, line, byte counts +- `sort`: sort lines +- `sed`, `awk`: text transformation + +### Shell features + +- Pipes (`|`) +- Redirects (`>`, `>>`) +- Conditionals (`&&`, `||`) +- Loops (`for`, `while`) +- File tests (`[ -f ]`, `[ -d ]`, `[ -e ]`) + +## Configuration + +| Option | Default | Purpose | +| --- | --- | --- | +| `apiKey` | required | Supermemory API key | +| `containerTag` | required | Container to expose as the filesystem | +| `baseURL` | SDK default | Override the API endpoint | +| `eagerLoad` | `true` | Warm the path index when the instance starts | +| `eagerContent` | `true` | Also warm the content cache during eager load | +| `cacheTtlMs` | `150_000` | Content cache TTL in ms. `null` = never expires (single-writer). `0` = no cache. | + +Other options (`customCommands`, `logger`, plus `just-bash` pass-throughs like `executionLimits`, `network`, `python`, `javascript`, `cwd`, `env`) exist but aren't part of the supported surface for the SMFS use case. The container is what defines the filesystem; setting `cwd` or extra `env` from the host doesn't change that. + +## Limitations + +- `chmod`, `utimes`, and symlinks (`ln -s`, `readlink`) throw `ENOSYS`. +- `/dev/null` as a redirect target isn't supported. Write to `/tmp/discard.log` instead. +- Binary uploads aren't supported. Text is extracted server-side. diff --git a/apps/docs/smfs/examples.mdx b/apps/docs/smfs/examples.mdx new file mode 100644 index 000000000..59cf60ff7 --- /dev/null +++ b/apps/docs/smfs/examples.mdx @@ -0,0 +1,48 @@ +--- +title: "Examples" +sidebarTitle: "Examples" +description: "Full web-based demo apps you can clone and run." +icon: "code" +--- + +Web-based example apps showing SMFS in realistic use cases. Each one is a +complete project with its own README, dependencies, and a working UI. + + + + Upload documents and chat with an AI that can search and cite them. + Next.js + TypeScript + `@supermemory/bash`. + + + Add notes and chat with an AI that can search your knowledge base. + FastAPI + Python + `supermemory-bash`. + + + Write and run code in an E2B sandbox with persistent AI memory. + Next.js + E2B SDK + SMFS mount. + + + +The Research Assistant and Knowledge Base examples use the +[Bash Tool](/smfs/bash-tool) — the serverless-friendly way to give an agent a +Supermemory-backed filesystem. The Code Sandbox example uses an +[E2B](/smfs/providers/e2b) sandbox with a real SMFS mount. + +## Running an example + +1. Clone the [examples repo](https://github.com/supermemoryai/examples) +2. `cd` into the example you want +3. Follow the README — typically: install deps, copy `.env.example` to `.env`, + fill in your API keys, and start the dev server diff --git a/apps/docs/smfs/install.mdx b/apps/docs/smfs/install.mdx new file mode 100644 index 000000000..64d20d0b1 --- /dev/null +++ b/apps/docs/smfs/install.mdx @@ -0,0 +1,68 @@ +--- +title: "Install SMFS" +sidebarTitle: "Install" +description: "Install, log in, mount." +icon: "download" +--- + +## 1. Install the binary + +```bash +curl -fsSL https://smfs.ai/install | bash +``` + +Drops `smfs` into `~/.local/bin`. Works on macOS (arm64, x64) and Linux (arm64, x64). + +If `smfs` isn't on your `PATH` after install, add `~/.local/bin` to your shell profile and reopen the terminal. + +## 2. Log in + +```bash +smfs login +``` + +One-time. Prompts you for your Supermemory API key and stores it in your global credentials. Get a key at [console.supermemory.ai](https://console.supermemory.ai). + +You can also pass the key directly: + +```bash +smfs login --key sm_... +``` + +## 3. Mount a container + +```bash +smfs mount agent_memory +``` + +`agent_memory` is your container tag. SMFS creates a folder named `agent_memory/` in the current directory and mounts the container there. + +That's it. Read it with `ls`, `cat`, `grep`. See [Mount](/smfs/mount) for memory paths, sync modes, flags, and every subcommand. + +To mount somewhere else, pass `--path`: + +```bash +smfs mount agent_memory --path ~/memory +``` + +## Optional: refresh the semantic grep wrapper + +`smfs mount` installs the shell wrapper automatically the first time you mount. If you ever need to force a clean reinstall (after upgrading the binary, for example): + +```bash +smfs init +``` + +It writes the wrapper into your `~/.zshrc` directly. Then reopen your terminal (or `source ~/.zshrc`) so the new shell picks it up. + +Inside any mount, plain `grep` becomes semantic. Outside a mount, your normal `grep` is untouched. Pass any flag (`grep -r`, `grep -i`, anything) and you get the real `grep` back. + +## Refresh the binary + +If anything ever feels broken: + +```bash +smfs install +``` + +Re-copies the binary into `~/.local/bin` and resets permissions. diff --git a/apps/docs/smfs/mount.mdx b/apps/docs/smfs/mount.mdx new file mode 100644 index 000000000..1c31bcb23 --- /dev/null +++ b/apps/docs/smfs/mount.mdx @@ -0,0 +1,289 @@ +--- +title: "Mount" +sidebarTitle: "Mount" +description: "Mount a Supermemory container, generate memories, and sync." +icon: "hard-drive" +--- + +A mount turns a Supermemory container into a directory on your machine. macOS uses NFSv3, Linux uses FUSE. Both are handled for you. + +```bash +smfs mount +``` + +Example: + +```bash +smfs mount agent_memory +``` + +`agent_memory` is the container tag. SMFS creates a folder named `agent_memory/` in the current directory and mounts the container there. The mount runs as a background daemon. A marker file `.smfs` is written at the mount root so other tools (and the semantic `grep` wrapper from `smfs init`) can find the mount. + +To mount at a different path: + +```bash +smfs mount agent_memory --path ~/memory +``` + +## Memory + +This is the part most people miss. SMFS isn't a normal filesystem. It generates **memories** from files at specific paths. Memories are extracted, summarized, and indexed by Supermemory. + +Files outside those paths are still semantically searchable; they're indexed through **SuperRAG** by default. Nothing in the mount is dead weight. + +### Defaults + +By default, files named `user.md` or `memory.md` are treated as memory paths. Drop those files anywhere in your mount and Supermemory generates memories from them automatically. + +### Configure your own memory paths + +Pass `--memory-paths` at mount time to control which files become memories: + +```bash +smfs mount agent_memory --memory-paths "/notes/,/journal.md" +``` + +Rules: + +- Paths are **absolute**, anchored at the mount root. Always start with `/`. +- Trailing `/` matches every file inside that folder, recursively (`/notes/` covers `/notes/foo.md`, `/notes/2026/march.md`, etc.). +- No trailing slash matches one exact file (`/journal.md`). +- Comma-separated. Multiple paths are fine. +- Empty string disables memory generation entirely (`--memory-paths ""`). +- Omit the flag and Supermemory keeps whatever the container tag already has, falling back to `user.md` and `memory.md`. + +### profile.md + +Every mount has a virtual file at the root called `profile.md`. It's auto-generated, read-only, and backed by Supermemory. The model can `cat profile.md` to get a live digest of everything in the container without walking every file. Useful as a first call at the start of a session. + +```bash +cat agent_memory/profile.md +``` + +You can't write to it. As the underlying memories change, Supermemory regenerates it. + +## Sync modes + +Three modes plus a force-sync command. Pick by what your agent actually needs. + +### Bidirectional (default) + +Local reads hit the cache. Local writes queue and push to Supermemory in the background. Remote changes are pulled on a poll. This is what you get if you pass no flags. + +```bash +smfs mount agent_memory +``` + +Use this when more than one writer (you, another agent, the dashboard) might touch the container. + +### No-sync + +Writes still push to Supermemory. Polling for remote changes is off. The agent sees a view that doesn't shift under it mid-task. + +```bash +smfs mount agent_memory --no-sync +``` + +Use this when your agent is the only writer, or when you want predictable reads. + +### Ephemeral + +Cache is in memory only. Nothing persists after unmount. Writes still push. + +```bash +smfs mount agent_memory --ephemeral +``` + +Use this for short-lived sandboxes. CI jobs, throwaway containers, one-shot agent runs. + +### Force a sync now + +```bash +smfs sync +``` + +Pushes pending writes and pulls remote changes immediately. Useful right before tearing down a sandbox. + +## All mount flags + +| Flag | What it does | +| --- | --- | +| `--path ` | Override the default mount path (`.//`). | +| `--memory-paths ` | Scope which files become memories. See [Memory](#memory). | +| `--no-sync` | Stop polling for remote changes. Writes still push. | +| `--clean` | Wipe local cache before mounting. Pulls fresh from the API. | +| `--ephemeral` | In-memory cache. Nothing persists after unmount. | +| `--sync-interval ` | Remote-change poll interval. Default `30`. | +| `--drain-timeout ` | Max time to flush pending writes during unmount. Default `30`. | +| `--foreground` | Run the daemon inline instead of detaching. | +| `--backend ` | Linux only. `fuse` (default) or `nfs`. | +| `--key ` | Pass an API key explicitly. Saved to project credentials. | + +## Multiple agents and multiple containers + +- **Different devices, same container tag**: fully supported. Many agents can mount the same container concurrently from different machines. +- **Same device, same container tag, mounted twice**: not supported. Use one mount per container per device. +- **Same device, different containers**: mount as many as you want in parallel. + +## Commands + +Every `smfs` subcommand. Click any one to expand. + + + + Mount a container. Defaults to `.//`; pass `--path` to mount elsewhere. + + ```bash + smfs mount agent_memory + smfs mount agent_memory --path ~/memory + ``` + + See the flags table above for everything you can pass. + + + + Unmount a running mount. Drains pending writes up to `--drain-timeout`, then exits the daemon. Anything not drained resumes on the next mount. + + ```bash + smfs unmount agent_memory + ``` + + Inside the mount, you can omit the tag and let SMFS resolve it from the nearest `.smfs` marker. + + ```bash + smfs unmount + smfs unmount --force + ``` + + + + List every SMFS mount running on this machine. + + ```bash + smfs list + ``` + + + + Show daemon status for a mount: connectivity, queue depth, last sync. Auto-detects the tag via the nearest `.smfs` marker. + + ```bash + smfs status + smfs status agent_memory + smfs status --json + ``` + + + + Tail the daemon log for a mount. Auto-detects the tag via the nearest `.smfs` marker. + + ```bash + smfs logs + smfs logs -f + smfs logs -n 500 + ``` + + + + Force an immediate sync cycle. Push pending writes, pull remote changes. + + ```bash + smfs sync agent_memory + ``` + + Inside the mount, the tag is optional (resolved from the nearest `.smfs` marker). + + ```bash + smfs sync + ``` + + + + Semantic search across a container without being inside the mount. The optional second argument scopes the search to a subpath inside the container. Inside a mount, plain `grep` already does this; `smfs grep` is the explicit form for scripts. + + ```bash + smfs grep "deadline" + smfs grep "deadline" /notes/ + ``` + + + + One-time auth. Prompts for your Supermemory API key and stores it in your global credentials. You can also pass it directly with `--key`. + + ```bash + smfs login + smfs login --key sm_... + ``` + + + + Print the currently-authenticated user, organization, and API endpoint. + + ```bash + smfs whoami + ``` + + + + Remove stored credentials. Active mounts keep running until you `smfs unmount` them. + + ```bash + smfs logout + ``` + + + + Force-installs the shell wrapper that makes plain `grep` semantic inside mounts. Writes directly to `~/.zshrc`. `smfs mount` also installs it automatically the first time, so you only need this to refresh after an upgrade. + + ```bash + smfs init + ``` + + Reopen your terminal (or `source ~/.zshrc`) after running it. + + + + Self-install. Copies the running binary to `~/.local/bin` and resets permissions. Run this if your `smfs` install ever feels broken. + + ```bash + smfs install + ``` + + + +## FAQ + + + + Run `smfs init` to force-install the shell wrapper. It writes directly to `~/.zshrc`. Then reopen your terminal so the new shell picks it up. + + The wrapper only triggers when you're inside a mount (it looks for the `.smfs` marker file at the mount root). Outside a mount, `grep` stays normal. Inside a mount, any flag you pass falls through to the real `grep`. + + + + Not yet. SMFS supports macOS (arm64, x64) and Linux (arm64, x64) for now. Windows isn't on the v0 roadmap. + + On Windows, use the [Bash Tool](/smfs/bash-tool) instead. It runs anywhere TypeScript runs and gives your agent the same `ls`, `cat`, `grep`, `sgrep` surface without needing a mount. + + + + Re-mount with `--clean` to wipe the local cache and pull everything fresh from the API: + + ```bash + smfs unmount agent_memory + smfs mount agent_memory --clean + ``` + + Nothing on the server changes; only the local SQLite cache gets reset. + + + + Yes. Once a container is mounted, anything on that machine can read and write through the mount path. The constraint is one mount per container tag per device. Mount it once, point both agents at the same folder. + + + + Yes, absolutely. Mount the same container tag from each sandbox. Bidirectional sync keeps everything in step as either side writes, so Agent A in sandbox 1 sees Agent B's writes from sandbox 2 within a sync interval. + + To avoid stepping on each other, give each agent its own subdirectory (`/agent_a/`, `/agent_b/`, etc.). They can still read across the whole mount, cross-reference each other's findings, and build on each other's work. The shared container is the point. + + diff --git a/apps/docs/smfs/overview.mdx b/apps/docs/smfs/overview.mdx new file mode 100644 index 000000000..74884a128 --- /dev/null +++ b/apps/docs/smfs/overview.mdx @@ -0,0 +1,71 @@ +--- +title: "SMFS" +sidebarTitle: "Overview" +description: "Memory your agent can grep." +icon: "database" +--- + +**SMFS** mounts your Supermemory container as a real directory. Agents read it with `ls`, `cat`, and `grep`. No SDK to learn, no client to wire up, no embeddings to think about. + +SMFS is open source and free for everyone. + +## Why a filesystem + +Every model already knows how a filesystem works. It can `ls`, `cat`, `grep`, `find`, redirect with `>`, pipe with `|`. You don't have to teach it a new API surface, and the grammar carries across runtimes. + +The catch: a filesystem on its own isn't great for memory. Search means walking the tree. Long files burn through context. The model has to hold the directory structure in its head. None of that scales as memory grows. + +SMFS fixes the catch. The shell is real, but underneath: + +- **Semantic `grep` by default.** One call surfaces what matters across the whole container, ranked by meaning. Pass any flag and you fall through to the real `grep` for exact matches. +- **Memory paths get distilled.** Files marked as memory paths are extracted and indexed by Supermemory. They don't bloat the model's context. +- **Virtual `profile.md`.** A live digest of the container at the mount root. The model can `cat profile.md` for a one-shot summary instead of walking every file. +- **Bidirectional sync** runs in the background. Local reads hit cache; writes push to Supermemory. + +You get filesystem ergonomics without paying the filesystem tax in tokens. + +## Two ways to use SMFS + +Pick by where your agent runs. + + + + For agents and tools with a real filesystem. Claude Code, Cursor, devcontainers, Docker, Codespaces. NFSv3 on macOS, FUSE on Linux. + + + For agents running serverless or at the edge. Cloudflare Workers, AWS Lambda, Vercel, Modal. A virtual bash where the filesystem is your container. Available as [`@supermemory/bash`](/smfs/bash-tool) for TypeScript and [`supermemory-bash`](/smfs/bash-tool-python) for Python. + + + +## Use SMFS with your sandbox provider + +Already using a sandbox or agent platform? Jump straight to the guide for your provider. + + + + Isolated Linux sandboxes with millisecond boot times. Mount SMFS inside or use the bash tool from your orchestrating code. + + + Firecracker microVMs for AI code execution. Install SMFS directly or use a custom template with it pre-installed. + + + The most popular TypeScript agent framework. Add memory as a tool with one function call. + + + Edge-first agents. Use the bash tool in Workers, or mount SMFS in Cloudflare Containers. + + + +## Next steps + + + + One curl, one mount, you're done. + + + Drop SMFS into a TypeScript or Python agent without mounting anything. + + + Full working apps you can clone and run — legal docs, support agents, and more. + + diff --git a/apps/docs/smfs/providers/cloudflare.mdx b/apps/docs/smfs/providers/cloudflare.mdx new file mode 100644 index 000000000..ec0e581ad --- /dev/null +++ b/apps/docs/smfs/providers/cloudflare.mdx @@ -0,0 +1,337 @@ +--- +title: "Cloudflare" +description: "Give your AI agent persistent memory inside a Cloudflare Container using SMFS" +--- + +Mount a Supermemory container inside a +[Cloudflare Container](https://developers.cloudflare.com/containers/) so your +agent can read and write memory using standard filesystem commands. + +## How it works + +There are two ways to wire SMFS into a Cloudflare Container — pick the one that +fits your architecture. + +### Agent inside the container + +The agent process runs inside the container with direct access to the SMFS +mount. The entrypoint sets up the mount and starts the agent. + +```mermaid +graph LR + subgraph Cloudflare Container + Agent["Claude Agent"] -->|"cat, ls, echo"| Mount["/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +### Agent outside the container + +The agent runs in a Cloudflare Worker and sends commands to the container over +HTTP. The container exposes a simple exec endpoint. + +```mermaid +graph LR + Agent["Worker
(agent logic)"] -->|"containerFetch('/exec')"| Container + subgraph Container ["Cloudflare Container"] + Mount["/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +## Prerequisites + +- A [Supermemory API key](https://supermemory.ai) +- An [Anthropic API key](https://console.anthropic.com) +- A [Cloudflare account](https://dash.cloudflare.com) with Containers enabled (Workers Paid plan) +- [Wrangler CLI](https://developers.cloudflare.com/workers/wrangler/install-and-update/) +- The [`@cloudflare/containers`](https://www.npmjs.com/package/@cloudflare/containers) package: `npm install @cloudflare/containers` + + + Cloudflare Containers are implemented as container-enabled Durable Objects. + You declare a `Container` subclass, bind it as a Durable Object, and + reference its image in the `containers` array. Worker secrets are **not** + automatically visible inside the container — you have to pass them through + `envVars` when starting the container (see below). + + +--- + +## Pattern A: Agent inside the container + +SMFS and the Claude Agent SDK are baked into the container image. On startup, +the entrypoint mounts memory and runs the agent. + +### Dockerfile + +```dockerfile Dockerfile +FROM python:3.12-slim + +RUN apt-get update && apt-get install -y fuse3 curl bash && rm -rf /var/lib/apt/lists/* +RUN echo 'user_allow_other' >> /etc/fuse.conf + +RUN curl -fsSL https://smfs.ai/install | bash -s -- 0.0.1-rc2 +ENV PATH="/root/.local/bin:$PATH" +RUN pip install claude-agent-sdk + +COPY agent.py /app/agent.py +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +``` + +### Entrypoint + +```bash entrypoint.sh +#!/bin/bash +set -e + +smfs login --key "$SUPERMEMORY_API_KEY" +smfs mount my_agent --ephemeral --path /memory --foreground & +sleep 3 + +exec python3 /app/agent.py +``` + +### Agent code + +```python agent.py +import asyncio +from claude_agent_sdk import query, ClaudeAgentOptions + +MEMORY = "/memory" + +async def main(): + async for message in query( + prompt=f"You have a persistent memory filesystem at {MEMORY}. " + "Read profile.md to learn about the user, then create " + "session_notes.md summarizing what you found.", + options=ClaudeAgentOptions( + allowed_tools=["Bash", "Read", "Write"], + cwd=MEMORY, + ), + ): + print(message) + +asyncio.run(main()) +``` + +### Worker + +The Worker defines the `Container` subclass and forwards Worker secrets into +the container via `envVars`: + +```typescript worker.ts +import { Container, getContainer } from "@cloudflare/containers"; + +export class MyAgentContainer extends Container { + defaultPort = 8080; + // Forward Worker secrets into the container at start time. + // `this.env` is the Worker env object, populated from wrangler secrets. + envVars = { + SUPERMEMORY_API_KEY: this.env.SUPERMEMORY_API_KEY, + ANTHROPIC_API_KEY: this.env.ANTHROPIC_API_KEY, + }; +} + +export default { + async fetch(request: Request, env: Env) { + // The container runs the agent and exits; this Worker route just kicks + // it off (e.g. on a queue message or scheduled trigger). + const container = getContainer(env.MY_CONTAINER, "agent-singleton"); + return container.fetch(request); + }, +}; + +interface Env { + MY_CONTAINER: DurableObjectNamespace; + SUPERMEMORY_API_KEY: string; + ANTHROPIC_API_KEY: string; +} +``` + +### Config + +```jsonc wrangler.jsonc +{ + "name": "memory-agent", + "main": "worker.ts", + "compatibility_date": "2025-04-03", + "containers": [ + { + "class_name": "MyAgentContainer", + "image": "./Dockerfile", + "max_instances": 5 + } + ], + "durable_objects": { + "bindings": [ + { "name": "MY_CONTAINER", "class_name": "MyAgentContainer" } + ] + }, + "migrations": [ + { "tag": "v1", "new_sqlite_classes": ["MyAgentContainer"] } + ] +} +``` + +```bash +wrangler secret put SUPERMEMORY_API_KEY +wrangler secret put ANTHROPIC_API_KEY +wrangler deploy +``` + +--- + +## Pattern B: Agent outside the container + +The agent logic lives in a Worker. The container just runs SMFS and exposes an +HTTP endpoint for executing commands against the mount. + + + The `/exec` endpoint below runs arbitrary shell commands inside the + container. **Only call it from your Worker** — never expose it publicly, + and never pass user input straight into `command` without validation. + Cloudflare Containers are addressable only through their Worker by default, + so this is safe as long as you don't add a public route that proxies to + `/exec`. + + +### Container (exec server) + +The Dockerfile and entrypoint are nearly identical to Pattern A — the only +differences are the Python deps (`flask` instead of `claude-agent-sdk`) and +the file we exec at the end. + +```dockerfile Dockerfile +FROM python:3.12-slim + +RUN apt-get update && apt-get install -y fuse3 curl bash && rm -rf /var/lib/apt/lists/* +RUN echo 'user_allow_other' >> /etc/fuse.conf + +RUN curl -fsSL https://smfs.ai/install | bash -s -- 0.0.1-rc2 +ENV PATH="/root/.local/bin:$PATH" +RUN pip install flask gunicorn + +COPY server.py /app/server.py +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +``` + +The entrypoint differs from Pattern A only in the final `exec` line — we run +gunicorn against the Flask app instead of `python3 agent.py`: + +```bash entrypoint.sh +#!/bin/bash +set -e + +smfs login --key "$SUPERMEMORY_API_KEY" +smfs mount my_agent --ephemeral --path /memory --foreground & +sleep 3 + +exec gunicorn -b 0.0.0.0:8080 --chdir /app server:app +``` + +```python server.py +import subprocess +from flask import Flask, request, jsonify + +app = Flask(__name__) + +@app.route("/exec", methods=["POST"]) +def exec_command(): + cmd = request.json["command"] + result = subprocess.run( + cmd, shell=True, capture_output=True, text=True, cwd="/memory", timeout=10 + ) + return jsonify(stdout=result.stdout, stderr=result.stderr, code=result.returncode) +``` + + + We use gunicorn instead of `app.run(...)` because Flask's built-in dev + server isn't meant for production traffic. If you'd rather just see it + work, you can replace the `exec` line with + `exec python3 /app/server.py` and add `app.run(host="0.0.0.0", port=8080)` + to `server.py` — but switch back to gunicorn before you ship. + + +### Worker (agent logic) + +```typescript worker.ts +import { Container, getContainer } from "@cloudflare/containers"; + +export class ExecContainer extends Container { + defaultPort = 8080; + envVars = { + SUPERMEMORY_API_KEY: this.env.SUPERMEMORY_API_KEY, + }; +} + +export default { + async fetch(_request: Request, env: Env) { + const container = getContainer(env.MY_CONTAINER, "agent-singleton"); + + const profile = await container + .fetch(new Request("http://container/exec", { + method: "POST", + body: JSON.stringify({ command: "cat /memory/profile.md" }), + headers: { "Content-Type": "application/json" }, + })) + .then((r) => r.json<{ stdout: string }>()); + + return Response.json({ profile: profile.stdout }); + }, +}; + +interface Env { + MY_CONTAINER: DurableObjectNamespace; + SUPERMEMORY_API_KEY: string; +} +``` + +### Config + +```jsonc wrangler.jsonc +{ + "name": "memory-exec", + "main": "worker.ts", + "compatibility_date": "2025-04-03", + "containers": [ + { + "class_name": "ExecContainer", + "image": "./Dockerfile", + "max_instances": 5 + } + ], + "durable_objects": { + "bindings": [ + { "name": "MY_CONTAINER", "class_name": "ExecContainer" } + ] + }, + "migrations": [ + { "tag": "v1", "new_sqlite_classes": ["ExecContainer"] } + ] +} +``` + +```bash +wrangler secret put SUPERMEMORY_API_KEY +wrangler deploy +``` + +--- + +## Tips + +- Use `--ephemeral` for container mounts — keeps the cache in memory only, but + writes still push to Supermemory +- Use `smfs grep 'query'` for semantic search across all files +- Worker secrets aren't automatically visible inside the container. Pass each + one through the `envVars` field on your `Container` subclass (see the Worker + snippets above) +- Use `containerFetch` from within a Container class method (e.g., lifecycle + hooks) to call the container's own HTTP server. From the Worker, use the + stub's `.fetch()` method instead diff --git a/apps/docs/smfs/providers/daytona.mdx b/apps/docs/smfs/providers/daytona.mdx new file mode 100644 index 000000000..7026783bd --- /dev/null +++ b/apps/docs/smfs/providers/daytona.mdx @@ -0,0 +1,282 @@ +--- +title: "Daytona" +description: "Give your AI agent persistent memory inside a Daytona sandbox using SMFS" +--- + +Mount a Supermemory container inside a [Daytona](https://daytona.io) sandbox so +your agent can read and write memory using standard filesystem commands. + + + Daytona sandboxes currently cannot reach `api.supermemory.ai` from their + datacenter IPs. The SMFS binary still installs (we download it directly from + GitHub Releases), the FUSE mount still starts, and `pip install + claude-agent-sdk` still works — but the runtime sync to Supermemory fails. We're + working with Daytona to resolve this. In the meantime, use + [E2B](/smfs/providers/e2b) or a [self-hosted mount](/smfs/providers/vercel). + + +## How it works + +There are two ways to wire SMFS into a Daytona sandbox — pick the one that fits +your architecture. + +### Agent inside the sandbox + +The agent process runs inside the sandbox and accesses the SMFS mount directly. + +```mermaid +graph LR + subgraph Daytona Sandbox + Agent["Claude Agent"] -->|"cat, ls, echo"| Mount["/home/daytona/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +### Agent outside the sandbox + +The agent runs in your orchestrating code and executes commands inside the +sandbox remotely. + +```mermaid +graph LR + Agent["Claude Agent
(your server)"] -->|"sandbox.process.exec()"| Sandbox + subgraph Sandbox ["Daytona Sandbox"] + Mount["/home/daytona/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +## Prerequisites + +- A [Supermemory API key](https://supermemory.ai) +- A [Daytona API key](https://app.daytona.io) — go to **API Keys** in the sidebar +- An [Anthropic API key](https://console.anthropic.com) + +--- + +## Install SMFS in a Daytona sandbox + +Both patterns below run the same setup snippet inside the sandbox before +mounting. Daytona can't reach `smfs.ai`, so we download the binary directly +from GitHub Releases and add `~/.local/bin` to PATH. + + + + ```python + SMFS_INSTALL = ( + "mkdir -p $HOME/.local/bin && " + "curl -sL https://github.com/supermemoryai/smfs/releases/download/" + "v0.0.1-rc2/smfs-linux-x64 -o $HOME/.local/bin/smfs && " + "chmod +x $HOME/.local/bin/smfs && " + "echo 'user_allow_other' | sudo tee -a /etc/fuse.conf > /dev/null && " + "pip install claude-agent-sdk" + ) + ``` + + + ```typescript + const SMFS_INSTALL = + "mkdir -p $HOME/.local/bin && " + + "curl -sL https://github.com/supermemoryai/smfs/releases/download/" + + "v0.0.1-rc2/smfs-linux-x64 -o $HOME/.local/bin/smfs && " + + "chmod +x $HOME/.local/bin/smfs && " + + "echo 'user_allow_other' | sudo tee -a /etc/fuse.conf > /dev/null && " + + "pip install claude-agent-sdk"; + ``` + + + +--- + +## Pattern A: Agent inside the sandbox + +### Agent code + +```python agent.py +import asyncio +from claude_agent_sdk import query, ClaudeAgentOptions + +MEMORY = "/home/daytona/memory" + +async def main(): + async for message in query( + prompt=f"You have a persistent memory filesystem at {MEMORY}. " + "Read profile.md to learn about the user, then create " + "session_notes.md summarizing what you found.", + options=ClaudeAgentOptions( + allowed_tools=["Bash", "Read", "Write"], + cwd=MEMORY, + ), + ): + print(message) + +asyncio.run(main()) +``` + +### Orchestration + + + + ```python run.py + import os + from pathlib import Path + from daytona_sdk import Daytona, DaytonaConfig + + daytona = Daytona(DaytonaConfig( + api_key=os.environ["DAYTONA_API_KEY"], + )) + sandbox = daytona.create( + env_vars={ + "SUPERMEMORY_API_KEY": os.environ["SUPERMEMORY_API_KEY"], + "ANTHROPIC_API_KEY": os.environ["ANTHROPIC_API_KEY"], + }, + ) + + # See "Install SMFS in a Daytona sandbox" above + sandbox.process.exec(SMFS_INSTALL) + + # Mount memory + sandbox.process.exec("$HOME/.local/bin/smfs login --key $SUPERMEMORY_API_KEY") + sandbox.process.exec( + "bash -c '$HOME/.local/bin/smfs mount my_agent --ephemeral" + " --path /home/daytona/memory --foreground &' && sleep 3" + ) + + # Upload and run the agent + sandbox.fs.upload_file(Path("agent.py").read_bytes(), "agent.py") + result = sandbox.process.exec("python3 agent.py") + print(result.result) + + daytona.delete(sandbox) + ``` + + + ```typescript run.ts + import { Daytona } from "@daytonaio/sdk"; + import { readFileSync } from "fs"; + + const daytona = new Daytona({ + apiKey: process.env.DAYTONA_API_KEY!, + }); + const sandbox = await daytona.create({ + envVars: { + SUPERMEMORY_API_KEY: process.env.SUPERMEMORY_API_KEY!, + ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY!, + }, + }); + + // See "Install SMFS in a Daytona sandbox" above + await sandbox.process.exec(SMFS_INSTALL); + + // Mount memory + await sandbox.process.exec( + "$HOME/.local/bin/smfs login --key $SUPERMEMORY_API_KEY" + ); + await sandbox.process.exec( + "bash -c '$HOME/.local/bin/smfs mount my_agent --ephemeral " + + "--path /home/daytona/memory --foreground &' && sleep 3" + ); + + // Upload and run the agent + await sandbox.fs.uploadFile(readFileSync("agent.py"), "agent.py"); + const result = await sandbox.process.exec("python3 agent.py"); + console.log(result.result); + + await daytona.delete(sandbox); + ``` + + + +--- + +## Pattern B: Agent outside the sandbox + +The agent runs in your server process and executes commands inside the sandbox +remotely via `sandbox.process.exec()`. + + + + ```python run.py + import os + from daytona_sdk import Daytona, DaytonaConfig + + daytona = Daytona(DaytonaConfig( + api_key=os.environ["DAYTONA_API_KEY"], + )) + sandbox = daytona.create( + env_vars={ + "SUPERMEMORY_API_KEY": os.environ["SUPERMEMORY_API_KEY"], + }, + ) + + # See "Install SMFS in a Daytona sandbox" above + sandbox.process.exec(SMFS_INSTALL) + sandbox.process.exec("$HOME/.local/bin/smfs login --key $SUPERMEMORY_API_KEY") + sandbox.process.exec( + "bash -c '$HOME/.local/bin/smfs mount my_agent --ephemeral" + " --path /home/daytona/memory --foreground &' && sleep 3" + ) + + # Agent runs here — executes commands in the sandbox + profile = sandbox.process.exec("cat /home/daytona/memory/profile.md") + print("Profile:", profile.result) + + sandbox.process.exec( + "bash -c 'echo \"Session started at $(date)\" > /home/daytona/memory/session_notes.md'" + ) + + files = sandbox.process.exec("ls /home/daytona/memory") + print("Files:", files.result) + + daytona.delete(sandbox) + ``` + + + ```typescript run.ts + import { Daytona } from "@daytonaio/sdk"; + + const daytona = new Daytona({ + apiKey: process.env.DAYTONA_API_KEY!, + }); + const sandbox = await daytona.create({ + envVars: { + SUPERMEMORY_API_KEY: process.env.SUPERMEMORY_API_KEY!, + }, + }); + + // See "Install SMFS in a Daytona sandbox" above + await sandbox.process.exec(SMFS_INSTALL); + await sandbox.process.exec( + "$HOME/.local/bin/smfs login --key $SUPERMEMORY_API_KEY" + ); + await sandbox.process.exec( + "bash -c '$HOME/.local/bin/smfs mount my_agent --ephemeral " + + "--path /home/daytona/memory --foreground &' && sleep 3" + ); + + // Agent runs here — executes commands in the sandbox + const profile = await sandbox.process.exec("cat /home/daytona/memory/profile.md"); + console.log("Profile:", profile.result); + + await sandbox.process.exec( + `bash -c 'echo "Session started at $(date)" > /home/daytona/memory/session_notes.md'` + ); + + const files = await sandbox.process.exec("ls /home/daytona/memory"); + console.log("Files:", files.result); + + await daytona.delete(sandbox); + ``` + + + +--- + +## Tips + +- FUSE is available in Daytona sandboxes but `user_allow_other` needs to be + added to `/etc/fuse.conf` +- We invoke SMFS as `$HOME/.local/bin/smfs` in the examples because Daytona's + default zsh PATH doesn't include `~/.local/bin`. Alternatively, prepend it + once with `export PATH=$HOME/.local/bin:$PATH` +- Use `pip install claude-agent-sdk` to install the agent SDK (PyPI is reachable) diff --git a/apps/docs/smfs/providers/e2b.mdx b/apps/docs/smfs/providers/e2b.mdx new file mode 100644 index 000000000..e014b8e1e --- /dev/null +++ b/apps/docs/smfs/providers/e2b.mdx @@ -0,0 +1,265 @@ +--- +title: "E2B" +description: "Give your AI agent persistent memory inside an E2B sandbox using SMFS" +--- + +Mount a Supermemory container inside an [E2B](https://e2b.dev) sandbox so your +agent can read and write memory using standard filesystem commands. + +## How it works + +There are two ways to wire SMFS into an E2B sandbox — pick the one that fits +your architecture. + +### Agent inside the sandbox + +The agent process runs inside the sandbox and accesses the SMFS mount directly. +Your orchestrating code just boots the sandbox and kicks off the agent. + +```mermaid +graph LR + subgraph E2B Sandbox + Agent["Claude Agent"] -->|"cat, ls, echo"| Mount["/home/user/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +### Agent outside the sandbox + +The agent runs in your orchestrating code and executes commands inside the +sandbox remotely. Useful when you want to keep the agent loop in your own +infra. + +```mermaid +graph LR + Agent["Claude Agent
(your server)"] -->|"sbx.commands.run()"| Sandbox + subgraph Sandbox ["E2B Sandbox"] + Mount["/home/user/memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +## Prerequisites + +- A [Supermemory API key](https://supermemory.ai) +- An [E2B API key](https://e2b.dev) +- An [Anthropic API key](https://console.anthropic.com) + +## 1. Create a custom template + +Bake SMFS and the Claude Agent SDK into a template so sandboxes start ready: + +```dockerfile e2b.Dockerfile +FROM e2b/code-interpreter:latest + +RUN apt-get update && apt-get install -y fuse3 && rm -rf /var/lib/apt/lists/* +RUN echo 'user_allow_other' >> /etc/fuse.conf +RUN curl -fsSL https://smfs.ai/install | bash -s -- 0.0.1-rc2 +ENV PATH="/root/.local/bin:$PATH" +RUN pip install claude-agent-sdk +``` + +```bash +e2b template build -d e2b.Dockerfile +``` + +--- + +## Pattern A: Agent inside the sandbox + +The agent runs inside the sandbox as a Python script. Your orchestrating code +just sets up the mount and starts it. + +### Agent code + +```python agent.py +import asyncio +from claude_agent_sdk import query, ClaudeAgentOptions + +MEMORY = "/home/user/memory" + +async def main(): + async for message in query( + prompt=f"You have a persistent memory filesystem at {MEMORY}. " + "Read profile.md to learn about the user, then create " + "session_notes.md summarizing what you found.", + options=ClaudeAgentOptions( + allowed_tools=["Bash", "Read", "Write"], + cwd=MEMORY, + ), + ): + print(message) + +asyncio.run(main()) +``` + +### Orchestration + + + + ```python run.py + import os + from pathlib import Path + from e2b_code_interpreter import Sandbox + + sbx = Sandbox.create( + template="your-template-id", + timeout=300, + envs={ + "SUPERMEMORY_API_KEY": os.environ["SUPERMEMORY_API_KEY"], + "ANTHROPIC_API_KEY": os.environ["ANTHROPIC_API_KEY"], + }, + ) + + # /dev/fuse exists in E2B but is root-only by default. chmod once per sandbox. + sbx.commands.run("sudo chmod 666 /dev/fuse") + + # Mount memory. We background the foreground daemon so this command returns, + # then sleep briefly to let the FUSE mount come up before the agent reads it. + sbx.commands.run("smfs login --key $SUPERMEMORY_API_KEY") + sbx.commands.run( + "bash -c 'smfs mount my_agent --ephemeral" + " --path /home/user/memory --foreground &' && sleep 3" + ) + + # Upload and run the agent + sbx.files.write("/home/user/agent.py", Path("agent.py").read_text()) + result = sbx.commands.run("python3 /home/user/agent.py", timeout=120) + print(result.stdout) + + sbx.kill() + ``` + + + ```typescript run.ts + import { Sandbox } from "@e2b/code-interpreter"; + import { readFileSync } from "fs"; + + const sbx = await Sandbox.create({ + template: "your-template-id", + timeoutMs: 300_000, + envs: { + SUPERMEMORY_API_KEY: process.env.SUPERMEMORY_API_KEY!, + ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY!, + }, + }); + + // /dev/fuse exists in E2B but is root-only by default. chmod once per sandbox. + await sbx.commands.run("sudo chmod 666 /dev/fuse"); + + // Mount memory. We background the foreground daemon so this command returns, + // then sleep briefly to let the FUSE mount come up before the agent reads it. + await sbx.commands.run("smfs login --key $SUPERMEMORY_API_KEY"); + await sbx.commands.run( + "bash -c 'smfs mount my_agent --ephemeral --path /home/user/memory --foreground &' && sleep 3" + ); + + // Upload and run the agent + await sbx.files.write("/home/user/agent.py", readFileSync("agent.py", "utf-8")); + const result = await sbx.commands.run("python3 /home/user/agent.py", { + timeoutMs: 120_000, + }); + console.log(result.stdout); + + await sbx.kill(); + ``` + + + +--- + +## Pattern B: Agent outside the sandbox + +The agent runs in your server process and executes commands inside the sandbox +remotely via `sbx.commands.run()`. The SMFS mount lives inside the sandbox — +the agent never touches the filesystem directly. + + + The FUSE mount is owned by root inside the sandbox. When writing to it from + outside the agent, wrap the command in `sudo bash -c '…'` so the redirect + runs with the right permissions. You'll see this in the write examples below. + + + + + ```python run.py + import os + from e2b_code_interpreter import Sandbox + + sbx = Sandbox.create( + template="your-template-id", + timeout=300, + envs={ + "SUPERMEMORY_API_KEY": os.environ["SUPERMEMORY_API_KEY"], + }, + ) + + # Set up SMFS inside the sandbox + sbx.commands.run("sudo chmod 666 /dev/fuse") + sbx.commands.run("smfs login --key $SUPERMEMORY_API_KEY") + sbx.commands.run( + "bash -c 'smfs mount my_agent --ephemeral" + " --path /home/user/memory --foreground &' && sleep 3" + ) + + # Agent runs here — executes commands in the sandbox + profile = sbx.commands.run("cat /home/user/memory/profile.md").stdout + print("Profile:", profile) + + sbx.commands.run( + "sudo bash -c 'echo \"Session started at $(date)\" > /home/user/memory/session_notes.md'" + ) + + files = sbx.commands.run("ls /home/user/memory").stdout + print("Files:", files) + + sbx.kill() + ``` + + + ```typescript run.ts + import { Sandbox } from "@e2b/code-interpreter"; + + const sbx = await Sandbox.create({ + template: "your-template-id", + timeoutMs: 300_000, + envs: { + SUPERMEMORY_API_KEY: process.env.SUPERMEMORY_API_KEY!, + }, + }); + + // Set up SMFS inside the sandbox + await sbx.commands.run("sudo chmod 666 /dev/fuse"); + await sbx.commands.run("smfs login --key $SUPERMEMORY_API_KEY"); + await sbx.commands.run( + "bash -c 'smfs mount my_agent --ephemeral --path /home/user/memory --foreground &' && sleep 3" + ); + + // Agent runs here — executes commands in the sandbox + const profile = await sbx.commands.run("cat /home/user/memory/profile.md"); + console.log("Profile:", profile.stdout); + + await sbx.commands.run( + `sudo bash -c 'echo "Session started at $(date)" > /home/user/memory/session_notes.md'` + ); + + const files = await sbx.commands.run("ls /home/user/memory"); + console.log("Files:", files.stdout); + + await sbx.kill(); + ``` + + + +--- + +## Tips + +- Use `--ephemeral` for sandbox mounts — keeps the cache in memory only, but + writes still push to Supermemory +- Use `smfs grep 'query'` for semantic search across all files in the container +- Without a custom template, add the install steps to your run script: + ```python + sbx.commands.run("curl -fsSL https://smfs.ai/install | bash -s -- 0.0.1-rc2", timeout=60) + sbx.commands.run("pip install claude-agent-sdk", timeout=60) + ``` diff --git a/apps/docs/smfs/providers/vercel.mdx b/apps/docs/smfs/providers/vercel.mdx new file mode 100644 index 000000000..d3e13daa4 --- /dev/null +++ b/apps/docs/smfs/providers/vercel.mdx @@ -0,0 +1,169 @@ +--- +title: "Vercel AI SDK" +description: "Give your AI agent persistent memory using SMFS with the Vercel AI SDK" +--- + +This guide is about the [Vercel AI SDK](https://ai-sdk.dev) — the TypeScript +agent framework — not Vercel hosting. The choice of pattern depends on where +your code actually runs: + +- **Self-hosted Node** (your own VM, ECS, Fly.io, Railway, a Vercel Sandbox, + etc.): you can mount SMFS as a real filesystem on the server. +- **Vercel Functions / serverless / edge**: there's no long-lived process to + hold a FUSE mount, so use the [Bash Tool](/smfs/bash-tool) + (`@supermemory/bash`) instead. The container becomes the filesystem; no mount + needed. + +## How it works + +### Self-hosted Node (real mount) + +The agent runs as a separate process with direct access to the SMFS mount. +Best when you want full bash, read, and write capabilities and your server is +long-lived. + +```mermaid +graph LR + subgraph Your Server + Agent["Claude Agent"] -->|"cat, ls, echo"| Mount["./memory
(SMFS mount)"] + end + Mount -->|sync| SM["Supermemory"] +``` + +### Vercel Functions / serverless (Bash Tool) + +The agent runs inside `generateText` and accesses memory through `@supermemory/bash`, +which proxies bash commands to your Supermemory container over HTTP. No mount, +no FUSE, no long-lived process required. + +```mermaid +graph LR + subgraph Vercel Function + AI["generateText()"] -->|"bash tool"| Bash["@supermemory/bash"] + end + Bash -->|HTTPS| SM["Supermemory"] +``` + +## Prerequisites + +- A [Supermemory API key](https://supermemory.ai) +- An [Anthropic API key](https://console.anthropic.com) +- For Pattern A only: SMFS installed on your server (`curl -fsSL https://smfs.ai/install | bash`) + +--- + +## Pattern A: Claude Agent SDK on self-hosted Node + +Use this when the Vercel AI SDK is just the orchestrator and your real workload +is a Claude agent running on a long-lived server you control. + +Start the mount once when your server boots — not per-request: + +```bash +smfs login --key $SUPERMEMORY_API_KEY +smfs mount my_agent --path ./memory +``` + + + This won't work on Vercel Functions or any serverless runtime: there's no + process between requests to hold the mount, and FUSE isn't available. For + those targets, jump to Pattern B. + + +Write a standalone agent script. Nothing server-specific — just Python that +reads and writes files: + +```python agent.py +import asyncio +from claude_agent_sdk import query, ClaudeAgentOptions + +MEMORY = "./memory" + +async def main(): + async for message in query( + prompt=f"You have a persistent memory filesystem at {MEMORY}. " + "Read profile.md to learn about the user, then create " + "session_notes.md summarizing what you found.", + options=ClaudeAgentOptions( + allowed_tools=["Bash", "Read", "Write"], + cwd=MEMORY, + ), + ): + print(message) + +asyncio.run(main()) +``` + +```bash +python3 agent.py +``` + +--- + +## Pattern B: Vercel AI SDK + Bash Tool (serverless-friendly) + +`@supermemory/bash` exposes your Supermemory container as a single agent tool +— `run_bash(command)` — without mounting anything. It runs anywhere TypeScript +runs, including Vercel Functions, edge runtimes, and Lambda. + +```bash +npm install @supermemory/bash ai @ai-sdk/anthropic zod +``` + +```typescript api/agent.ts +import { generateText, tool } from "ai"; +import { anthropic } from "@ai-sdk/anthropic"; +import { createBash } from "@supermemory/bash"; +import { z } from "zod"; + +export async function POST(req: Request) { + const { prompt } = await req.json(); + + const { bash, toolDescription } = await createBash({ + apiKey: process.env.SUPERMEMORY_API_KEY!, + containerTag: "my_agent", + }); + + const result = await generateText({ + model: anthropic("claude-sonnet-4-5"), + tools: { + bash: tool({ + description: toolDescription, + inputSchema: z.object({ cmd: z.string() }), + execute: async ({ cmd }) => bash.exec(cmd), + }), + }, + maxSteps: 10, + prompt, + }); + + return Response.json({ text: result.text }); +} +``` + +A few things worth calling out: + +- **`maxSteps: 10`** lets the agent chain multiple bash calls per request + (read `profile.md`, then `cat` a few notes, then write a summary). Bump it + if your agent needs deeper chains; lower it to cap cost per request. +- **`toolDescription`** is a pre-written description of the available bash + surface (semantic `sgrep`, `cat`, `ls`, redirects, etc.). Hand it straight + to the model — don't roll your own. +- **No timeout/abort plumbing.** `bash.exec` already runs against the + container over HTTPS, so it returns when the command returns. No event-loop + blocking and no FUSE. + +See the [Bash Tool reference](/smfs/bash-tool) for the full command surface, +memory path configuration, and other framework integrations. + +--- + +## Tips + +- **Pattern A**: mount SMFS once when your server starts, not per-request. + Use `--ephemeral` if you don't need a local cache on the server. +- **Pattern B**: configure memory paths once at startup with + `configureMemoryPaths(["/notes/", "/journal.md"])` to control which files + get distilled into Supermemory memories. +- Both: use `smfs grep 'query'` (Pattern A) or `sgrep 'query'` inside the + bash tool (Pattern B) for semantic search across all files. diff --git a/apps/web/app/(auth)/login/new/page.tsx b/apps/web/app/(auth)/login/new/page.tsx index e3d2a1c5c..421d0f06c 100644 --- a/apps/web/app/(auth)/login/new/page.tsx +++ b/apps/web/app/(auth)/login/new/page.tsx @@ -204,18 +204,12 @@ export default function LoginPage() { email_domain: email.split("@")[1] || "unknown", }) - try { - await signIn.magicLink({ - callbackURL: getCallbackURL(), - email, - }) - setSubmittedEmail(email) - setPendingLoginMethod("magic_link") - // Track successful magic link send - posthog.capture("login_magic_link_sent", { - email_domain: email.split("@")[1] || "unknown", - }) - } catch (error) { + const { error } = await signIn.magicLink({ + callbackURL: getCallbackURL(), + email, + }) + + if (error) { console.error(error) // Track login failure @@ -232,6 +226,12 @@ export default function LoginPage() { return } + setSubmittedEmail(email) + setPendingLoginMethod("magic_link") + posthog.capture("login_magic_link_sent", { + email_domain: email.split("@")[1] || "unknown", + }) + setIsLoading(false) setIsLoadingEmail(false) } @@ -355,6 +355,9 @@ export default function LoginPage() { callbackURL: getCallbackURL(), provider: "google", }) + .catch((err: unknown) => { + setError(getErrorMessage(err)) + }) .finally(() => { setIsLoading(false) }) @@ -419,6 +422,9 @@ export default function LoginPage() { callbackURL: getCallbackURL(), provider: "github", }) + .catch((err: unknown) => { + setError(getErrorMessage(err)) + }) .finally(() => { setIsLoading(false) }) diff --git a/packages/lib/auth.ts b/packages/lib/auth.ts index 4369bef1b..081c10535 100644 --- a/packages/lib/auth.ts +++ b/packages/lib/auth.ts @@ -13,7 +13,6 @@ export const authClient = createAuthClient({ baseURL: process.env.NEXT_PUBLIC_BACKEND_URL ?? "https://api.supermemory.ai", fetchOptions: { credentials: "include", - throw: true, }, plugins: [ usernameClient(),