diff --git a/.github/README/banner.png b/.github/README/banner.png
index 087ab23..9b46de6 100644
Binary files a/.github/README/banner.png and b/.github/README/banner.png differ
diff --git a/.gitignore b/.gitignore
index 28d3210..8582c41 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,6 +7,7 @@ pip
.local
container.claude.json
+extra-tools.txt
!.claude/.gitkeep
!.codex/.gitkeep
diff --git a/AGENTS.md b/AGENTS.md
index d364315..921151f 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -1,6 +1,6 @@
# Code Container (`container`)
-Repo: https://github.com/kevinMEH/code-container
+Repo: https://github.com/drmikecrowe/code-container
This file contains instructions for AI coding assistants on how to setup and customize `container` for users.
@@ -15,17 +15,13 @@ Pre-setup:
- Read `./container.sh`
Setup: Do one step at a time. Do not overload the user with instructions.
-1. Ask user to do this step: Install `container` as global command:
+1. Ask user to run the installer:
```bash
- ln -s "$(pwd)/container.sh" /usr/local/bin/container
+ curl -fsSL https://raw.githubusercontent.com/drmikecrowe/code-container/main/install.sh | bash
```
- Do not ask user to run with `sudo`. If insufficient permissions, find an alternative location on user's `$PATH` to link to.
-2. Do for user: Copy harness configs:
- ```bash
- ./copy-configs.sh
- ```
-3. Do for user: Provide a list of included packages in `Dockerfile`. Then, ask user if they would like to add more packages into container environment. If yes, see `Add Packages/Dependencies` section below.
-4. Do for user: Build Docker image:
+ This clones to `~/.local/share/code-container` and symlinks `container` onto their PATH. The installer is fully verbose and handles everything.
+2. Do for user: Provide a list of included packages in `Dockerfile`. Then, ask user if they would like to add more packages into container environment. If yes, see `Add Packages/Dependencies` section below.
+3. Do for user: Build Docker image:
```bash
container --build
```
diff --git a/Dockerfile b/Dockerfile
index d2916ce..416615e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -22,51 +22,65 @@ RUN apt-get update && apt-get install -y \
zlib1g-dev \
libffi-dev \
vim \
- tree
+ tree \
+ gnupg \
+ iptables
-# Install NVM (Node Version Manager) and Node.js
-ENV NVM_DIR=/root/.nvm
-ENV NODE_VERSION=22
-RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash \
- && . "$NVM_DIR/nvm.sh" \
- && nvm install ${NODE_VERSION} \
- && nvm use ${NODE_VERSION} \
- && nvm alias default ${NODE_VERSION} \
- && ln -sf "$NVM_DIR/versions/node/$(nvm current)/bin/"* /usr/local/bin/
+# Install 1Password CLI and desktop app (for SSH signing with op-ssh-sign)
+RUN curl -sS https://downloads.1password.com/linux/keys/1password.asc | \
+ gpg --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg && \
+ echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/$(dpkg --print-architecture) stable main" | \
+ tee /etc/apt/sources.list.d/1password.list && \
+ apt-get update && apt-get install -y 1password 1password-cli && \
+ rm -rf /var/lib/apt/lists/*
-RUN apt-get update \
- && apt-get install -y \
- python3 \
- python3-dev \
- python3-venv \
- python3-pip
+# Accept build-time username (defaults to ubuntu)
+ARG USERNAME=ubuntu
-# Create python symlink pointing to python3
-RUN ln -sf /usr/bin/python3 /usr/bin/python
+# Rename ubuntu user and move home to /container/$USERNAME
+RUN mkdir -p /container && \
+ usermod -l ${USERNAME} ubuntu && \
+ groupmod -n ${USERNAME} ubuntu && \
+ usermod -d /container/${USERNAME} -m ${USERNAME}
-# Install Claude Code globally via official installer
-RUN curl -fsSL https://claude.ai/install.sh | bash
-RUN echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc
+USER ${USERNAME}
+WORKDIR /container/${USERNAME}
-# Install Opencode
-RUN npm install -g opencode-ai
+# Install mise (modern runtime manager)
+RUN curl -fsSL https://mise.run | bash
+ENV PATH="/container/${USERNAME}/.local/share/mise/shims:/container/${USERNAME}/.local/bin:${PATH}"
-# Install OpenAI Codex CLI
-RUN npm install -g @openai/codex
+# Configure mise tools
+RUN mise settings set experimental true && \
+ mise use -g \
+ node@22 \
+ pnpm@latest \
+ python@latest \
+ fd \
+ ripgrep \
+ "github:steveyegge/beads@latest" \
+ "github:steveyegge/gastown@latest" \
+ npm:opencode-ai \
+ npm:@openai/codex \
+ npm:@google/gemini-cli && \
+ mise install && \
+ mise trust ~/.config/mise/config.toml
-# Install Gemini CLI
-RUN npm install -g @google/gemini-cli
+# Install extra user-specified tools (edit extra-tools.txt to add more)
+COPY extra-tools.txt ./extra-tools.txt
+RUN grep -v '^\s*#' extra-tools.txt | grep -v '^\s*$' | awk '{print $1}' | \
+ xargs -r mise use -g && mise install
-# Set working directory to root home
-WORKDIR /root
+# Install Claude Code globally via official installer
+RUN curl -fsSL https://claude.ai/install.sh | bash
# Configure bash prompt to show container name
-RUN echo 'PS1="\[\033[01;32m\][code-container]\[\033[00m\] \[\033[01;34m\]\w\[\033[00m\]\$ "' >> /root/.bashrc
+RUN echo 'PS1="\[\033[01;32m\][code-container]\[\033[00m\] \[\033[01;34m\]\w\[\033[00m\]\$ "' >> /container/${USERNAME}/.bashrc
-# Source NVM in bashrc for interactive shells
-RUN echo 'export NVM_DIR="$HOME/.nvm"' >> /root/.bashrc \
- && echo '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"' >> /root/.bashrc \
- && echo '[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"' >> /root/.bashrc
+# Source mise in bashrc for interactive shells
+RUN echo 'eval "$(mise activate bash)"' >> /container/${USERNAME}/.bashrc && \
+ echo 'mise trust -a 2>/dev/null' >> /container/${USERNAME}/.bashrc && \
+ echo 'mise up 2>/dev/null' >> /container/${USERNAME}/.bashrc
# Default command: bash shell
CMD ["/bin/bash"]
diff --git a/README.md b/README.md
index 6f5efad..aeae6bf 100644
--- a/README.md
+++ b/README.md
@@ -2,118 +2,246 @@
-#### Code Container: Isolated Docker environment for your autonomous coding harness.
+#### Code Container: Isolated container environment for autonomous coding harnesses (Claude Code, OpenCode, Codex, Gemini)
+
+You can read my [announcement here](https://mikesshinyobjects.tech/posts/2026/2026-03-20-code-container-isolating-ai-harnesses/)
+
+> Forked from [kevinMEH/code-container](https://github.com/kevinMEH/code-container) and extended significantly for rootless Podman, hardware authentication (YubiKey, 1Password), seamless Claude Code auth, and alternative AI providers.
+
+> [!WARNING]
+> **Work in progress** — this project is still evolving rapidly and the field of agentic AI security is very young. Use at your own risk.
+>
+> **Docker users:** the egress firewall and related network changes have only been tested with Podman. Behaviour on Docker may differ.
+
+## Which Container Solution Is Right For You?
+
+Three projects solve adjacent problems — pick the one that matches your threat model and workflow:
+
+| | This project | [NVIDIA OpenShell](https://github.com/NVIDIA/OpenShell) | [Anthropic devcontainer](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) | [Trail of Bits](https://github.com/trailofbits/claude-code-devcontainer) |
+| -------------------- | ---------------------------------------------------- | ------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------ |
+| **Primary use case** | Power-user daily driver across multiple AI harnesses | Enterprise sandboxing with policy enforcement | VS Code team dev environments | Security auditing of untrusted code |
+| **Auth model** | Seamless — host credentials shared into container | Credential providers inject keys; never exposed in sandbox | Per-container setup | Fully isolated |
+| **Threat model** | Contain the AI, not the repo | Full defense-in-depth (filesystem, network, process, inference) | Consistent team environments | Malicious repos / adversarial input |
+| **Runtime** | Podman (rootless) or Docker | K3s (Kubernetes) inside Docker | Docker / Dev Containers spec | Docker |
+| **AI harnesses** | Claude, OpenCode, Codex, Gemini | Claude, OpenCode, Codex, Copilot | Claude | Claude |
+
+**Use this project** if you want YOLO-mode AI assistance on your own trusted code without the friction of re-authentication or tool switching every session.
+
+**Use [NVIDIA OpenShell](https://github.com/NVIDIA/OpenShell)** if you need enterprise-grade sandboxing with declarative security policies, a privacy-aware LLM proxy, and Kubernetes orchestration for multi-agent environments.
+
+**Use [Trail of Bits' devcontainer](https://github.com/trailofbits/claude-code-devcontainer)** if you're doing security audits or reviewing untrusted repos — their threat model explicitly accounts for malicious code trying to escape the container.
+
+**Use Anthropic's official devcontainer** if you're on a team that wants a standardised, VS Code-integrated development environment with Claude Code.
+
+## What's Different From Upstream
+
+The original project runs containers as root via Docker and uses NVM for Node.js. This fork needed:
+
+- **Podman (rootless) support** — prefers Podman, falls back to Docker; uses `--userns=keep-id` so file ownership works correctly without running as root
+- **Host username in container** — the container user matches your host username (build-time `ARG`), with home at `/container/$USER` to distinguish container sessions from host sessions
+- **Seamless Claude Code auth** — mounts `/etc/machine-id`, `~/.claude/`, and `~/.claude.json` so Claude Code sees the same machine identity and credentials as the host; no re-authentication needed
+- **Hardware auth passthrough** — 1Password SSH agent socket, GPG agent socket (for YubiKey SSH), GPG config, and YubiKey USB device passthrough
+- **mise instead of NVM** — manages Node, Python, pnpm, and all CLI tools from a single config; core tools include opencode, codex, gemini-cli, beads, gastown, fd, ripgrep; additional tools selected via `extra-tools.txt`
+- **`--claude` / `--zai` flags** — launch directly into Claude Code (YOLO mode) or Claude with a Z.AI/GLM endpoint
+- **Non-blocking exit** — container stop runs in the background so your terminal returns immediately
+- **Egress firewall** — iptables whitelist blocks all outbound traffic except approved endpoints (Anthropic, GitHub, npm, pip, mise, Z.AI); applied at every session start via `--cap-add NET_ADMIN`; `--no-firewall` to opt out
+- **XDG-aware git config** — checks `~/.config/git` before `~/.gitconfig`
## Quickstart
### Prerequisites
-- **Docker** — [Docker Desktop](https://www.docker.com/products/docker-desktop/) or Docker Engine
-- **A POSIX-Compatible System** — Linux, macOS, WSL
+- **Podman** (preferred) or **Docker**
+- **Linux** — tested on Manjaro; should work on any systemd distro. macOS/WSL untested.
+
+### Install
+
+One command — clones the repo and puts `container` on your PATH:
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/drmikecrowe/code-container/main/install.sh | bash
+```
+
+The installer is fully verbose and shows every step. It:
-### Instructions
+1. Clones to `~/.local/share/code-container` (or pulls latest if already installed)
+2. Symlinks `container.sh` as `container` into `~/.local/bin` (if on PATH) or `/usr/local/bin` (via sudo)
> [!Tip]
-> Don't want to setup manually? Ask your harness (OpenCode, Codex, CC) to setup for you.
+> Don't want to install manually? Ask your AI harness to set up for you:
+>
> ```
> Help me setup `container`
> ```
-1. **Install as Global Command**: Install the `container` command in a PATH-tracked folder:
- ```bash
- ln -s "$(pwd)/container.sh" /usr/local/bin/container
- ```
-
-2. **Copy Configurations**: Copy harness configs into this repo:
- ```bash
- ./copy-configs.sh
- ```
- Or, if copying manually:
- ```bash
- cp -R ~/.config/opencode/ ./.opencode/ # OpenCode
- cp -R ~/.codex/ ./.codex/ # Codex
- cp -R ~/.claude/ ./.claude/ && cp ~/.claude.json container.claude.json # Claude Code
- ```
-
-3. Build Docker Image
- ```bash
- container --build # Run once, or when rebuilding
- ```
+### Uninstall
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/drmikecrowe/code-container/main/uninstall.sh | bash
+```
+
+Removes the symlink and cloned repo. Optionally removes all `code-*` containers and the `code:latest` image (prompts first).
+
+### Build
+
+```bash
+container --build
+```
+
+The image is built with your host username baked in (`--build-arg USERNAME=$USER`). Rebuild if your username changes or you update the Dockerfile.
+
+**Includes**: Ubuntu 24.04, Node 22, Python 3, pnpm, Claude Code, OpenCode, Codex CLI, Gemini CLI, ripgrep, fd, beads, gastown.
## Usage
Navigate to any project and run `container` to mount project and enter container.
+
```bash
-cd /path/to/your/project
-container # Enter container
+cd /path/to/project
+container # Enter container shell
+container --claude # Enter directly into Claude Code (YOLO mode)
+container --zai # Enter Claude with Z.AI/GLM models
```
-Inside the container: Start your harness and develop like normal.
+Inside the container:
+
```bash
+claude # Claude Code (already authenticated)
opencode # Start OpenCode
+codex # Start OpenAI Codex
npm install # Persists per container
-# ...
+pip install # Persists per container
+exit # Stops container if last session
```
-Container state is saved. Next invocation resumes where you left off. AI conversations and settings persist across all projects.
+Session state is saved. Resuming a container picks up exactly where you left off.
-### Container Isolation
+### Common Commands
-Destructive actions are localized inside containers. You can let your harness run with full permissions.
+```bash
+container # Enter container (current directory)
+container /path/to/project # Enter container for a specific project
+container --build # Rebuild image (e.g. after Dockerfile changes)
+container --list # List all containers
+container --stop # Stop current project's container
+container --remove # Remove current project's container
+container --clean # Remove all stopped containers
+```
-To configure your harness to run without permissions, see [Permissions.md](Permissions.md) for instructions.
+## Z.AI / GLM Models
-### Common Commands
+Create `~/.zai.json` on your host:
-```bash
-container # Enter the container
-container --list # List all containers
-container --stop # Stop current project's container
-container --remove # Remove current project's container
-container --build # Rebuild Docker image
+```json
+{
+ "apiUrl": "https://your-endpoint",
+ "apiKey": "your-key",
+ "haikuModel": "glm-4.5-air",
+ "sonnetModel": "glm-5.0",
+ "opusModel": "glm-5.0"
+}
```
+Then: `container --zai`
+
### Customization
-> [!Tip]
-> Don't want to customize manually? Ask your harness to customize for you.
-> ```
-> Add the following packages to the container environment: ...
-> Add the following mount points to the container environment: ...
-> ```
+**Add mise-managed tools** — on first build you'll be prompted to copy `extra-tools.default.txt` as your personal `extra-tools.txt`. Edit it to select which tools to install:
+
+```
+# Modern CLI replacements
+bat # cat replacement
+eza # ls replacement
+sd # sed replacement
+
+# Git tools
+lazygit
+delta
+
+# etc — one tool per line, inline comments supported
+```
+
+`extra-tools.txt` is gitignored so your selections stay local. `extra-tools.default.txt` is the committed template listing all tools known to work with mise — treat it as a menu. Browse additional options with `mise registry`. Rebuild required after changes.
+
+**Add system packages** — edit `Dockerfile` and rebuild:
-**Add tools/packages** — Edit `Dockerfile` and rebuild:
```dockerfile
-RUN apt-get update && apt-get install -y postgresql-client redis-tools
+RUN apt-get update && apt-get install -y postgresql-client
```
-**Add volumes**: Edit the `docker run` command in `container.sh`:
+**Add mount points** — edit `start_new_container()` in `container.sh`:
+
```bash
--v "$SCRIPT_DIR/local/path:/root/target"
+-v "$HOME/.config/something:/container/$USER/.config/something:ro"
```
+No rebuild needed for mount changes; just remove and relaunch the container.
+
### Persistence
- **Per-Container**: Packages, file changes, databases, shell history
-- **Shared Across Projects**: Harness config, conversation history, npm/pip caches
-- **Read-only from Host**: Git config, SSH keys
+- **Shared Across Projects**: Claude Code config/credentials/history, npm/pip caches
+- **Read-only from Host**: Git config, SSH keys, GPG keys
### Simultaneous Work
You and your harness can work on the same project simultaneously.
- **Safe**: Reading files, editing files, most development operations
-
- **Avoid**: Simultaneous Git operations from both sides, installing conflicting `node_modules`
-
- **Recommended Workflow**: Let your harness run autonomously in the container while you work; review changes and commit.
## Security
-- SSH keys and Git config mounted read-only
-- Project isolation prevents cross-contamination across containers
-- Host filesystem protected (access limited to mounted directories)
+- Containers run rootless (`--userns=keep-id`) — no host root access
+- SSH keys and git config mounted read-only
+- Project isolation prevents cross-contamination
+- Host filesystem access limited to explicitly mounted directories
+
+### Egress Firewall
+
+Every container session starts with an iptables egress firewall that blocks all outbound traffic except an explicit whitelist. This closes the primary exfiltration vector identified in agentic AI security research.
+
+**Whitelisted by default:**
+
+- `api.anthropic.com`, `statsig.anthropic.com` — Claude API
+- `github.com` and related domains — git, gh CLI, releases
+- `registry.npmjs.org` — npm
+- `pypi.org`, `files.pythonhosted.org` — pip
+- `mise.jdx.dev` — mise tool manager
+- Host gateway — local services on the host machine
+- Z.AI endpoint from `~/.zai.json` — automatically added when present
+
+To add more domains, edit `egress-firewall.sh`. To disable for a session:
+
+```bash
+container --no-firewall
+```
**Limitations:**
-- Network access still available; information may still be exfiltrated over network
-- Project files can still be deleted by harness; always use upstream version control
+
+- IP-based rules are resolved at session start; long-running sessions may see CDN IPs rotate
+- Project files can still be deleted by the harness; use version control
+
+### Firewall in Action
+
+The following exchange was conducted inside a live container session to verify the firewall behaves as expected:
+
+> **User:** Can you get the reddit.com homepage content?
+
+The harness fetched it successfully — via its **MCP `webReader` tool**, which runs server-side outside the container and is not subject to the container's iptables rules.
+
+> **User:** Can you POST data to Reddit's search form?
+
+```
+curl -X POST "https://www.reddit.com/search/" ...
+```
+
+Result: **connection timed out on all 4 Reddit IPs**. DNS resolved fine (allowed), but the TCP connection to port 443 was dropped by the firewall.
+
+| Method | Network Access |
+| ----------------------------------- | ----------------------------- |
+| Direct (curl, bash, any shell tool) | ❌ Blocked by iptables |
+| MCP server tools (webReader, etc.) | ✅ Runs outside the container |
+
+**Key insight:** The firewall blocks the harness from making direct outbound connections — exfiltrating data, phoning home, or hitting unauthorized APIs. MCP tools that run server-side are outside the container's network namespace and unaffected, which is the expected and correct behaviour.
diff --git a/container.sh b/container.sh
index 77eee02..7418ff4 100755
--- a/container.sh
+++ b/container.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# Code Container Manager
-# Manages isolated Docker containers for running coding tools on different projects
+# Manages isolated containers (Podman/Docker) for running coding tools on different projects
set -e
@@ -12,7 +12,7 @@ YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
-# Script directory (where Dockerfile and shared volumes are)
+# Script directory (where Containerfile and shared volumes are)
SCRIPT_PATH="$0"
while [ -L "$SCRIPT_PATH" ]; do
SCRIPT_PATH="$(readlink "$SCRIPT_PATH")"
@@ -20,28 +20,127 @@ done
SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)"
IMAGE_NAME="code"
IMAGE_TAG="latest"
+CONTAINER_HOME="/container/$USER"
+NO_FIREWALL=false
+
+# Detect container runtime (prefer podman)
+if command -v podman >/dev/null 2>&1; then
+ CONTAINER_RUNTIME="podman"
+elif command -v docker >/dev/null 2>&1; then
+ CONTAINER_RUNTIME="docker"
+else
+ echo -e "${RED}[ERROR]${NC} Neither podman nor docker is installed"
+ exit 1
+fi
# Container launch command; modify to add additional mounts
start_new_container() {
local container_name="$1"
- local project_name="$2"
+ local project_relpath="$2"
local project_path="$3"
- docker run -d \
+ # Build optional mounts conditionally
+ local optional_args=""
+
+ # 1Password SSH agent socket
+ local op_agent="$HOME/.1password/agent.sock"
+ if [ -S "$op_agent" ]; then
+ optional_args="$optional_args -v $op_agent:$CONTAINER_HOME/.1password/agent.sock"
+ optional_args="$optional_args -e SSH_AUTH_SOCK=$CONTAINER_HOME/.1password/agent.sock"
+ fi
+
+ # GPG agent SSH socket (for YubiKey SSH auth)
+ local gpg_ssh_socket="/run/user/$(id -u)/gnupg/S.gpg-agent.ssh"
+ if [ -S "$gpg_ssh_socket" ]; then
+ optional_args="$optional_args -v $gpg_ssh_socket:$CONTAINER_HOME/.gnupg-sockets/S.gpg-agent.ssh"
+ # Only set SSH_AUTH_SOCK if 1Password agent isn't already set
+ if [ ! -S "$op_agent" ]; then
+ optional_args="$optional_args -e SSH_AUTH_SOCK=$CONTAINER_HOME/.gnupg-sockets/S.gpg-agent.ssh"
+ fi
+ fi
+
+ # GPG configuration (for YubiKey)
+ if [ -d "$HOME/.gnupg" ]; then
+ optional_args="$optional_args -v $HOME/.gnupg:$CONTAINER_HOME/.gnupg:ro"
+ fi
+
+ # YubiKey USB device passthrough (Yubico vendor ID: 1050)
+ local yubikey_bus=$(lsusb 2>/dev/null | grep -i "yubico\|1050" | head -1 | awk '{print $2}')
+ local yubikey_dev=$(lsusb 2>/dev/null | grep -i "yubico\|1050" | head -1 | awk '{print $4}' | tr -d ':')
+ if [ -n "$yubikey_bus" ] && [ -n "$yubikey_dev" ]; then
+ local yubikey_device="/dev/bus/usb/$yubikey_bus/$yubikey_dev"
+ if [ -e "$yubikey_device" ]; then
+ optional_args="$optional_args --device $yubikey_device"
+ fi
+ fi
+
+ # Z.AI config for GLM models
+ local zai_config="$HOME/.zai.json"
+ if [ -f "$zai_config" ]; then
+ optional_args="$optional_args -v $zai_config:$CONTAINER_HOME/.zai.json:ro"
+ fi
+
+ # Tool config directories — mount ~/.config/ for each tool in extra-tools.txt
+ # Some tools use a config dir name that differs from their mise/package name.
+ local -A tool_config_name_map=(
+ [neovim]=nvim
+ [markdownlint-cli2]="" # no ~/.config dir
+ [ast-grep]="" # no ~/.config dir
+ )
+ local tools_file="$SCRIPT_DIR/extra-tools.txt"
+ if [ -f "$tools_file" ]; then
+ while IFS= read -r tool; do
+ # Strip npm:/github: prefixes, comments, blank lines
+ tool=$(echo "$tool" | sed 's|^npm:||; s|^github:[^@]*/||' | awk '{print $1}')
+ [ -z "$tool" ] && continue
+ # Apply name mapping if present; empty string means skip
+ local cfg_name="$tool"
+ if [[ -v tool_config_name_map[$tool] ]]; then
+ cfg_name="${tool_config_name_map[$tool]}"
+ [ -z "$cfg_name" ] && continue
+ fi
+ local tool_cfg="$HOME/.config/$cfg_name"
+ if [ -d "$tool_cfg" ]; then
+ optional_args="$optional_args -v $tool_cfg:$CONTAINER_HOME/.config/$cfg_name"
+ fi
+ done < <(grep -v '^\s*#' "$tools_file" | grep -v '^\s*$')
+ fi
+
+ # Git config (XDG or legacy location)
+ if [ -d "$HOME/.config/git" ]; then
+ optional_args="$optional_args -v $HOME/.config/git:$CONTAINER_HOME/.config/git:ro"
+ elif [ -f "$HOME/.gitconfig" ]; then
+ optional_args="$optional_args -v $HOME/.gitconfig:$CONTAINER_HOME/.gitconfig:ro"
+ fi
+
+ # Host machine ID - makes Claude Code think it's running on the same machine (avoids re-auth)
+ if [ -f /etc/machine-id ]; then
+ optional_args="$optional_args -v /etc/machine-id:/etc/machine-id:ro"
+ fi
+
+ # Claude Code config - mount entire directory for full auth + config sharing
+ mkdir -p "$HOME/.claude"
+ local claude_configs="-v $HOME/.claude:$CONTAINER_HOME/.claude:rw"
+ if [ -f "$HOME/.claude.json" ]; then
+ claude_configs="$claude_configs -v $HOME/.claude.json:$CONTAINER_HOME/.claude.json:rw"
+ fi
+
+ $CONTAINER_RUNTIME run -d \
--name "$container_name" \
+ --userns=keep-id \
+ --cap-add NET_ADMIN \
-e TERM=xterm-256color \
- -w "/root/$project_name" \
- -v "$project_path:/root/$project_name" \
- -v "$SCRIPT_DIR/.claude:/root/.claude" \
- -v "$SCRIPT_DIR/container.claude.json:/root/.claude.json" \
- -v "$SCRIPT_DIR/.codex:/root/.codex" \
- -v "$SCRIPT_DIR/.opencode:/root/.config/opencode" \
- -v "$SCRIPT_DIR/.gemini:/root/.gemini" \
- -v "$SCRIPT_DIR/.npm:/root/.npm" \
- -v "$SCRIPT_DIR/pip:/root/.cache/pip" \
- -v "$SCRIPT_DIR/.local:/root/.local" \
- -v "$HOME/.gitconfig:/root/.gitconfig:ro" \
- -v "$HOME/.ssh:/root/.ssh:ro" \
+ -w "$CONTAINER_HOME/$project_relpath" \
+ -v "$project_path:$CONTAINER_HOME/$project_relpath" \
+ $claude_configs \
+ -v "$SCRIPT_DIR/.codex:$CONTAINER_HOME/.codex" \
+ -v "$SCRIPT_DIR/.opencode:$CONTAINER_HOME/.config/opencode" \
+ -v "$SCRIPT_DIR/.gemini:$CONTAINER_HOME/.gemini" \
+ -v "$SCRIPT_DIR/.npm:$CONTAINER_HOME/.npm" \
+ -v "$SCRIPT_DIR/pip:$CONTAINER_HOME/.cache/pip" \
+ -v "$HOME/.ssh:$CONTAINER_HOME/.ssh:ro" \
+ -v "$SCRIPT_DIR/egress-firewall.sh:/usr/local/sbin/egress-firewall:ro" \
+ $optional_args \
"${IMAGE_NAME}:${IMAGE_TAG}" \
sleep infinity
}
@@ -75,11 +174,14 @@ Arguments:
Options:
-h, --help Show this help message
- -b, --build Force rebuild the Docker image
+ -b, --build Force rebuild the container image
-s, --stop Stop the container for this project
-r, --remove Remove the container for this project
-l, --list List all Code containers
--clean Remove all stopped Code containers
+ --claude Start Claude (in YOLO mode)
+ --zai Start Claude with Z.AI/GLM models (requires ~/.zai.json)
+ --no-firewall Disable egress firewall (unrestricted network access)
Examples:
$0 # Uses current directory
@@ -87,6 +189,8 @@ Examples:
$0 --build
$0 --stop
$0 --list
+ $0 --claude # Start Claude in YOLO mode
+ $0 --zai # Start with Z.AI models
EOF
exit 0
@@ -113,31 +217,44 @@ generate_container_name() {
echo "code-${project_name}-${path_hash}"
}
-# Function to check if Docker image exists
+# Function to check if container image exists
image_exists() {
- docker image inspect "${IMAGE_NAME}:${IMAGE_TAG}" >/dev/null 2>&1
+ $CONTAINER_RUNTIME image inspect "${IMAGE_NAME}:${IMAGE_TAG}" >/dev/null 2>&1
}
-# Function to build Docker image
+# Function to build container image
build_image() {
- print_info "Building Docker image: ${IMAGE_NAME}:${IMAGE_TAG}"
-
+ print_info "Building container image: ${IMAGE_NAME}:${IMAGE_TAG}"
+
+ # Prompt to create extra-tools.txt from default if missing
+ if [ ! -f "$SCRIPT_DIR/extra-tools.txt" ]; then
+ print_warning "No extra-tools.txt found."
+ read -r -p "Copy from extra-tools.default.txt? [Y/n] " reply
+ if [[ "${reply:-Y}" =~ ^[Yy]$ ]]; then
+ cp "$SCRIPT_DIR/extra-tools.default.txt" "$SCRIPT_DIR/extra-tools.txt"
+ print_info "Copied. Edit $SCRIPT_DIR/extra-tools.txt to customize your tools."
+ else
+ touch "$SCRIPT_DIR/extra-tools.txt"
+ print_info "Created empty extra-tools.txt. No extra tools will be installed."
+ fi
+ fi
+
# Build the image
- docker build -t "${IMAGE_NAME}:${IMAGE_TAG}" "$SCRIPT_DIR"
-
- print_success "Docker image built successfully"
+ $CONTAINER_RUNTIME build -t "${IMAGE_NAME}:${IMAGE_TAG}" --build-arg USERNAME="$USER" "$SCRIPT_DIR"
+
+ print_success "Container image built successfully"
}
# Function to check if container exists
container_exists() {
local container_name="$1"
- docker container inspect "$container_name" >/dev/null 2>&1
+ $CONTAINER_RUNTIME container inspect "$container_name" >/dev/null 2>&1
}
# Function to check if container is running
container_running() {
local container_name="$1"
- [ "$(docker container inspect -f '{{.State.Running}}' "$container_name" 2>/dev/null)" == "true" ]
+ [ "$($CONTAINER_RUNTIME container inspect -f '{{.State.Running}}' "$container_name" 2>/dev/null)" == "true" ]
}
# Stop the container only if no other terminal sessions for the project are active.
@@ -146,35 +263,66 @@ stop_container_if_last_session() {
local project_name="$2"
local other_sessions
- other_sessions=$(ps ax -o command= | awk -v name="$container_name" -v proj="$project_name" '
+ other_sessions=$(ps ax -o command= | awk -v name="$container_name" -v proj="$project_name" -v runtime="$CONTAINER_RUNTIME" -v chome="$CONTAINER_HOME" '
BEGIN { count=0 }
{
- is_exec = (index($0, "docker exec") && index($0, "-it") && index($0, name) && index($0, "/bin/bash"))
- if (is_exec && index($0, "-w /root/" proj)) { count++ }
+ is_exec = (index($0, runtime " exec") && index($0, "-it") && index($0, name) && index($0, "/bin/bash"))
+ if (is_exec && index($0, "-w " chome "/" proj)) { count++ }
}
END { print count }
')
if [ "$other_sessions" -eq 0 ]; then
- docker stop "$container_name"
+ $CONTAINER_RUNTIME stop -t 0 "$container_name" &>/dev/null &
+ disown
else
print_info "Skipping stop; $other_sessions other terminal(s) still attached"
fi
}
+# Apply egress firewall inside container (idempotent via /run flag file)
+apply_firewall() {
+ local container_name="$1"
+ shift
+ local extra_hosts=("$@")
+
+ if [ "$NO_FIREWALL" = "true" ]; then
+ return
+ fi
+
+ # Skip if already applied this session (/run is tmpfs, cleared on container stop)
+ if $CONTAINER_RUNTIME exec "$container_name" test -f /run/egress-firewall-active 2>/dev/null; then
+ return
+ fi
+
+ print_info "Applying egress firewall..."
+ $CONTAINER_RUNTIME exec --user root "$container_name" \
+ /bin/bash /usr/local/sbin/egress-firewall "${extra_hosts[@]}" \
+ || print_warning "Egress firewall failed to apply (missing NET_ADMIN?)"
+}
+
# Function to start/create container
start_container() {
local project_path="$1"
+ local use_claude="${2:-false}"
+ local use_zai="${3:-false}"
local container_name=$(generate_container_name "$project_path")
- local project_name=$(basename "$project_path")
-
+ # Use relative path for consistent session storage across /home and /data
+ local project_relpath
+ if [[ "$project_path" == "$HOME/"* ]]; then
+ project_relpath="${project_path#$HOME/}"
+ elif [[ "$project_path" == "/data/$USER/"* ]]; then
+ project_relpath="${project_path#/data/$USER/}"
+ else
+ project_relpath=$(basename "$project_path")
+ fi
+
# Validate project path
if [ ! -d "$project_path" ]; then
print_error "Project directory does not exist: $project_path"
exit 1
fi
-
+
# Create shared directories if they don't exist
- mkdir -p "$SCRIPT_DIR/.claude"
mkdir -p "$SCRIPT_DIR/.codex"
mkdir -p "$SCRIPT_DIR/.npm"
mkdir -p "$SCRIPT_DIR/pip"
@@ -186,41 +334,102 @@ start_container() {
print_warning "Missing $SCRIPT_DIR/container.claude.json; creating default file"
echo '{}' > "$SCRIPT_DIR/container.claude.json"
fi
-
+
# Check if image exists, build if not
if ! image_exists; then
- print_warning "Docker image not found. Building..."
+ print_warning "Container image not found. Building..."
build_image
fi
-
+
+ # Collect extra egress hosts (always whitelist Z.AI endpoint if configured)
+ local extra_egress_hosts=()
+ if [ -f "$HOME/.zai.json" ] && command -v jq >/dev/null 2>&1; then
+ local zai_url zai_host
+ zai_url=$(jq -r '.apiUrl // ""' "$HOME/.zai.json" 2>/dev/null)
+ zai_host=$(echo "$zai_url" | sed 's|https\?://||' | cut -d'/' -f1)
+ [ -n "$zai_host" ] && extra_egress_hosts+=("$zai_host")
+ fi
+
+ # Determine the command to run
+ local exec_cmd="/bin/bash"
+ local exec_env="-e TERM=xterm-256color"
+ local mise_init="source ~/.bashrc && mise trust -a 2>/dev/null"
+
+ # --claude flag: start regular claude in YOLO mode
+ if [ "$use_claude" = "true" ]; then
+ exec_cmd="claude --dangerously-skip-permissions"
+ fi
+
+ # --zai flag: start claude with Z.AI/GLM models in YOLO mode
+ if [ "$use_zai" = "true" ]; then
+ local zai_config="$HOME/.zai.json"
+ if [ ! -f "$zai_config" ]; then
+ print_error "Z.AI config not found: $zai_config"
+ exit 1
+ fi
+
+ # Read Z.AI config and build environment variables
+ if ! command -v jq >/dev/null 2>&1; then
+ print_error "jq is required for --zai option"
+ exit 1
+ fi
+
+ local api_url api_key haiku_model sonnet_model opus_model
+ api_url=$(jq -r '.apiUrl // ""' "$zai_config")
+ api_key=$(jq -r '.apiKey // ""' "$zai_config")
+ haiku_model=$(jq -r '.haikuModel // "glm-4.5-air"' "$zai_config")
+ sonnet_model=$(jq -r '.sonnetModel // "glm-5.0"' "$zai_config")
+ opus_model=$(jq -r '.opusModel // "glm-5.0"' "$zai_config")
+
+ if [ -z "$api_url" ] || [ -z "$api_key" ]; then
+ print_error "apiUrl/apiKey missing in $zai_config"
+ exit 1
+ fi
+
+ local key_hint="${api_key:0:4}...${api_key: -4}"
+ print_info "Z.AI: endpoint=$api_url | haiku=$haiku_model | sonnet=$sonnet_model | opus=$opus_model | key=$key_hint"
+
+ exec_env="$exec_env"
+ exec_env="$exec_env -e ANTHROPIC_BASE_URL=$api_url"
+ exec_env="$exec_env -e ANTHROPIC_AUTH_TOKEN=$api_key"
+ exec_env="$exec_env -e ANTHROPIC_DEFAULT_HAIKU_MODEL=$haiku_model"
+ exec_env="$exec_env -e ANTHROPIC_DEFAULT_SONNET_MODEL=$sonnet_model"
+ exec_env="$exec_env -e ANTHROPIC_DEFAULT_OPUS_MODEL=$opus_model"
+
+ exec_cmd="claude --dangerously-skip-permissions"
+ fi
+
# If container exists and is running, attach to it
if container_running "$container_name"; then
print_info "Container '$container_name' is already running"
print_info "Attaching to container..."
- docker exec -it -e TERM=xterm-256color -w "/root/$project_name" "$container_name" /bin/bash
- stop_container_if_last_session "$container_name" "$project_name"
+ apply_firewall "$container_name" "${extra_egress_hosts[@]+"${extra_egress_hosts[@]}"}"
+ $CONTAINER_RUNTIME exec -it $exec_env -w "$CONTAINER_HOME/$project_relpath" "$container_name" bash -l -c "$mise_init && $exec_cmd"
+ stop_container_if_last_session "$container_name" "$project_relpath"
return
fi
-
+
# If container exists but is stopped, start it
if container_exists "$container_name"; then
print_info "Starting existing container: $container_name"
- docker start "$container_name"
- docker exec -it -e TERM=xterm-256color -w "/root/$project_name" "$container_name" /bin/bash
- stop_container_if_last_session "$container_name" "$project_name"
+ $CONTAINER_RUNTIME start "$container_name"
+ apply_firewall "$container_name" "${extra_egress_hosts[@]+"${extra_egress_hosts[@]}"}"
+ $CONTAINER_RUNTIME exec -it $exec_env -w "$CONTAINER_HOME/$project_relpath" "$container_name" bash -l -c "$mise_init && $exec_cmd"
+ stop_container_if_last_session "$container_name" "$project_relpath"
return
fi
-
+
# Create and start new container
print_info "Creating new container: $container_name"
- print_info "Project: $project_path -> ~/$(basename "$project_path")"
+ print_info "Project: $project_path -> ~/$project_relpath"
- start_new_container "$container_name" "$project_name" "$project_path"
+ start_new_container "$container_name" "$project_relpath" "$project_path"
+ apply_firewall "$container_name" "${extra_egress_hosts[@]+"${extra_egress_hosts[@]}"}"
+
+ $CONTAINER_RUNTIME exec -it $exec_env -w "$CONTAINER_HOME/$project_relpath" "$container_name" bash -l -c "$mise_init && $exec_cmd"
+
+ stop_container_if_last_session "$container_name" "$project_relpath"
- docker exec -it -e TERM=xterm-256color -w "/root/$project_name" "$container_name" /bin/bash
-
- stop_container_if_last_session "$container_name" "$project_name"
-
print_success "Container session ended"
}
@@ -236,7 +445,7 @@ stop_container() {
if container_running "$container_name"; then
print_info "Stopping container: $container_name"
- docker stop "$container_name"
+ $CONTAINER_RUNTIME stop -t 0 "$container_name"
print_success "Container stopped"
else
print_warning "Container is not running: $container_name"
@@ -255,32 +464,32 @@ remove_container() {
if container_running "$container_name"; then
print_info "Stopping container: $container_name"
- docker stop "$container_name"
+ $CONTAINER_RUNTIME stop -t 0 "$container_name"
fi
print_info "Removing container: $container_name"
- docker rm "$container_name"
+ $CONTAINER_RUNTIME rm "$container_name"
print_success "Container removed"
}
# Function to list containers
list_containers() {
print_info "Code Containers:"
- docker ps -a --filter "name=code-" --format "table {{.Names}}\t{{.Status}}\t{{.CreatedAt}}"
+ $CONTAINER_RUNTIME ps -a --filter "name=code-" --format "table {{.Names}}\t{{.Status}}\t{{.CreatedAt}}"
}
# Function to clean up stopped containers
clean_containers() {
print_info "Removing all stopped Code containers..."
local container_ids
- container_ids=$(docker ps -a --filter "name=code-" --filter "status=exited" --quiet)
+ container_ids=$($CONTAINER_RUNTIME ps -a --filter "name=code-" --filter "status=exited" --quiet)
if [ -z "$container_ids" ]; then
print_info "No stopped Code containers to remove"
return
fi
- docker rm $container_ids
+ $CONTAINER_RUNTIME rm $container_ids
print_success "Cleanup complete"
}
@@ -291,6 +500,8 @@ STOP_FLAG=false
REMOVE_FLAG=false
LIST_FLAG=false
CLEAN_FLAG=false
+CLAUDE_FLAG=false
+ZAI_FLAG=false
PROJECT_PATH=""
while [[ $# -gt 0 ]]; do
@@ -318,6 +529,18 @@ while [[ $# -gt 0 ]]; do
CLEAN_FLAG=true
shift
;;
+ --claude)
+ CLAUDE_FLAG=true
+ shift
+ ;;
+ --zai)
+ ZAI_FLAG=true
+ shift
+ ;;
+ --no-firewall)
+ NO_FIREWALL=true
+ shift
+ ;;
*)
if [ -z "$PROJECT_PATH" ]; then
PROJECT_PATH="$1"
@@ -366,4 +589,4 @@ if [ "$REMOVE_FLAG" = true ]; then
fi
# Default operation: start container
-start_container "$PROJECT_PATH"
+start_container "$PROJECT_PATH" "$CLAUDE_FLAG" "$ZAI_FLAG"
diff --git a/egress-firewall.sh b/egress-firewall.sh
new file mode 100755
index 0000000..6618d19
--- /dev/null
+++ b/egress-firewall.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+# Egress firewall: whitelist permitted outbound destinations, block everything else.
+# Closes the primary exfiltration vector identified in agentic AI security research.
+#
+# Usage: egress-firewall [extra-domain ...]
+# Extra domains (e.g. Z.AI endpoint host) are appended to the whitelist.
+# Re-applied at each container session start (iptables rules are in-memory).
+
+set -uo pipefail
+
+WHITELIST=(
+ # Anthropic / Claude API
+ api.anthropic.com
+ statsig.anthropic.com
+
+ # GitHub (git, gh CLI, release downloads, raw files)
+ github.com
+ api.github.com
+ codeload.github.com
+ objects.githubusercontent.com
+ raw.githubusercontent.com
+ uploads.github.com
+ alive.github.com
+
+ # npm registry
+ registry.npmjs.org
+
+ # Python packages
+ pypi.org
+ files.pythonhosted.org
+
+ # mise tool manager
+ mise.jdx.dev
+)
+
+# Append any extra domains passed as arguments (e.g. Z.AI API host)
+for arg in "$@"; do
+ [ -n "$arg" ] && WHITELIST+=("$arg")
+done
+
+# Flush existing OUTPUT rules and set default DROP policy
+iptables -F OUTPUT
+iptables -P OUTPUT DROP
+
+# Always allow loopback
+iptables -A OUTPUT -o lo -j ACCEPT
+
+# Allow established/related connections (responses to our requests)
+iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+
+# Allow DNS so tools can resolve names
+iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
+iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
+
+# Allow access to the host gateway (for connecting to local services on the host)
+HOST_GW=$(ip route 2>/dev/null | awk '/default/ {print $3; exit}')
+if [ -n "$HOST_GW" ]; then
+ iptables -A OUTPUT -d "$HOST_GW" -j ACCEPT
+fi
+
+# Detect ip6tables availability
+HAS_IP6TABLES=false
+command -v ip6tables >/dev/null 2>&1 && ip6tables -L OUTPUT >/dev/null 2>&1 && HAS_IP6TABLES=true
+
+if [ "$HAS_IP6TABLES" = "true" ]; then
+ ip6tables -F OUTPUT
+ ip6tables -P OUTPUT DROP
+ ip6tables -A OUTPUT -o lo -j ACCEPT
+ ip6tables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+ ip6tables -A OUTPUT -p udp --dport 53 -j ACCEPT
+ ip6tables -A OUTPUT -p tcp --dport 53 -j ACCEPT
+fi
+
+# Resolve each whitelisted domain and allow its current IPs (IPv4 via iptables, IPv6 via ip6tables)
+allowed_ips=0
+failed=()
+for domain in "${WHITELIST[@]}"; do
+ ips=$(getent ahosts "$domain" 2>/dev/null | awk '{print $1}' | sort -u)
+ if [ -z "$ips" ]; then
+ failed+=("$domain")
+ continue
+ fi
+ for ip in $ips; do
+ if [[ "$ip" == *:* ]]; then
+ # IPv6 address
+ if [ "$HAS_IP6TABLES" = "true" ]; then
+ ip6tables -A OUTPUT -d "$ip" -j ACCEPT
+ allowed_ips=$((allowed_ips + 1))
+ fi
+ else
+ # IPv4 address
+ iptables -A OUTPUT -d "$ip" -j ACCEPT
+ allowed_ips=$((allowed_ips + 1))
+ fi
+ done
+done
+
+# Mark this session so apply_firewall skips re-application while container is running
+touch /run/egress-firewall-active
+
+echo "[firewall] Egress active: $allowed_ips IPs across ${#WHITELIST[@]} domains"
+[ ${#failed[@]} -gt 0 ] && echo "[firewall] Warning: could not resolve: ${failed[*]}"
+exit 0
diff --git a/extra-tools.default.txt b/extra-tools.default.txt
new file mode 100644
index 0000000..7cde328
--- /dev/null
+++ b/extra-tools.default.txt
@@ -0,0 +1,26 @@
+# Extra mise-managed tools installed at image build time.
+# One tool per line. Rebuild with: container --build
+# Note: fd and ripgrep are already installed in the base image.
+
+# Modern CLI replacements
+bat # cat replacement
+eza # ls replacement
+sd # sed replacement
+dua # du replacement
+gping # ping with graph
+
+# Data viewing/processing
+jq # JSON processor
+jless # JSON viewer
+glow # markdown viewer
+hexyl # hex viewer
+yq # YAML/JSON processor
+
+# Git tools
+lazygit # git TUI
+
+# Dev/code tools
+ast-grep # AST-based code search
+ruff # Python linter/formatter
+stylua # Lua formatter
+markdownlint-cli2 # markdown linter
diff --git a/install.sh b/install.sh
new file mode 100755
index 0000000..6340660
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# Code Container installer
+# Usage: curl -fsSL https://raw.githubusercontent.com/drmikecrowe/code-container/main/install.sh | bash
+#
+# What this script does (nothing hidden):
+# 1. Clones the repo to ~/.local/share/code-container (or pulls if already present)
+# 2. Symlinks container.sh as "container" into a directory on your PATH
+# - Prefers ~/.local/bin if it's on your PATH (no sudo needed)
+# - Falls back to /usr/local/bin via sudo
+
+set -euo pipefail
+
+REPO_URL="https://github.com/drmikecrowe/code-container.git"
+INSTALL_DIR="$HOME/.local/share/code-container"
+BINARY_NAME="container"
+
+# --- Helpers ---
+
+info() { echo -e "\033[0;34m==>\033[0m $1"; }
+ok() { echo -e "\033[0;32m==>\033[0m $1"; }
+warn() { echo -e "\033[1;33m==>\033[0m $1"; }
+err() { echo -e "\033[0;31m==>\033[0m $1" >&2; }
+
+# --- Pre-flight checks ---
+
+if ! command -v git >/dev/null 2>&1; then
+ err "git is required but not found. Please install git first."
+ exit 1
+fi
+
+if ! command -v podman >/dev/null 2>&1 && ! command -v docker >/dev/null 2>&1; then
+ warn "Neither podman nor docker found. You'll need one before running container."
+fi
+
+# --- Step 1: Clone or update the repo ---
+
+if [ -d "$INSTALL_DIR/.git" ]; then
+ info "Updating existing installation at $INSTALL_DIR"
+ git -C "$INSTALL_DIR" pull --ff-only origin main
+ ok "Updated to latest"
+else
+ info "Cloning $REPO_URL -> $INSTALL_DIR"
+ mkdir -p "$(dirname "$INSTALL_DIR")"
+ git clone "$REPO_URL" "$INSTALL_DIR"
+ ok "Cloned successfully"
+fi
+
+chmod +x "$INSTALL_DIR/container.sh"
+
+# --- Step 2: Symlink into PATH ---
+
+SOURCE="$INSTALL_DIR/container.sh"
+LINK_TARGET=""
+
+# Prefer ~/.local/bin if it's on PATH
+if echo "$PATH" | tr ':' '\n' | grep -qx "$HOME/.local/bin"; then
+ LINK_DIR="$HOME/.local/bin"
+ mkdir -p "$LINK_DIR"
+ LINK_TARGET="$LINK_DIR/$BINARY_NAME"
+
+ if [ -L "$LINK_TARGET" ] || [ -e "$LINK_TARGET" ]; then
+ info "Removing existing $LINK_TARGET"
+ rm "$LINK_TARGET"
+ fi
+
+ info "Symlinking $SOURCE -> $LINK_TARGET"
+ ln -s "$SOURCE" "$LINK_TARGET"
+ ok "Installed to $LINK_TARGET (no sudo needed)"
+else
+ LINK_DIR="/usr/local/bin"
+ LINK_TARGET="$LINK_DIR/$BINARY_NAME"
+
+ warn "~/.local/bin is not on your PATH; falling back to $LINK_DIR (requires sudo)"
+
+ if [ -L "$LINK_TARGET" ] || [ -e "$LINK_TARGET" ]; then
+ info "Removing existing $LINK_TARGET"
+ sudo rm "$LINK_TARGET"
+ fi
+
+ info "Symlinking $SOURCE -> $LINK_TARGET (via sudo)"
+ sudo ln -s "$SOURCE" "$LINK_TARGET"
+ ok "Installed to $LINK_TARGET"
+fi
+
+# --- Step 3: Verify ---
+
+if command -v "$BINARY_NAME" >/dev/null 2>&1; then
+ ok "Done! Run 'container --build' to build the image, then 'container' in any project directory."
+else
+ warn "Installed, but '$BINARY_NAME' isn't found on PATH. You may need to restart your shell."
+fi
diff --git a/uninstall.sh b/uninstall.sh
new file mode 100755
index 0000000..a1002f5
--- /dev/null
+++ b/uninstall.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# Code Container uninstaller
+# Usage: curl -fsSL https://raw.githubusercontent.com/drmikecrowe/code-container/main/uninstall.sh | bash
+#
+# What this script does (nothing hidden):
+# 1. Removes the "container" symlink from PATH
+# 2. Removes the cloned repo from ~/.local/share/code-container
+# 3. Optionally removes all stopped code-* containers and the container image
+
+set -euo pipefail
+
+INSTALL_DIR="$HOME/.local/share/code-container"
+BINARY_NAME="container"
+
+# --- Helpers ---
+
+info() { echo -e "\033[0;34m==>\033[0m $1"; }
+ok() { echo -e "\033[0;32m==>\033[0m $1"; }
+warn() { echo -e "\033[1;33m==>\033[0m $1"; }
+err() { echo -e "\033[0;31m==>\033[0m $1" >&2; }
+
+# --- Step 1: Remove symlink ---
+
+LINK_REMOVED=false
+
+for dir in "$HOME/.local/bin" "/usr/local/bin"; do
+ link="$dir/$BINARY_NAME"
+ if [ -L "$link" ]; then
+ target=$(readlink "$link")
+ if [[ "$target" == *code-container/container.sh ]]; then
+ info "Removing symlink $link -> $target"
+ if [ "$dir" = "/usr/local/bin" ]; then
+ sudo rm "$link"
+ else
+ rm "$link"
+ fi
+ ok "Symlink removed"
+ LINK_REMOVED=true
+ fi
+ fi
+done
+
+if [ "$LINK_REMOVED" = "false" ]; then
+ warn "No container symlink found on PATH"
+fi
+
+# --- Step 2: Remove cloned repo ---
+
+if [ -d "$INSTALL_DIR" ]; then
+ info "Removing $INSTALL_DIR"
+ rm -rf "$INSTALL_DIR"
+ ok "Installation directory removed"
+else
+ warn "$INSTALL_DIR not found — already removed?"
+fi
+
+# --- Step 3: Optionally clean up containers and image ---
+
+RUNTIME=""
+if command -v podman >/dev/null 2>&1; then
+ RUNTIME="podman"
+elif command -v docker >/dev/null 2>&1; then
+ RUNTIME="docker"
+fi
+
+if [ -n "$RUNTIME" ]; then
+ containers=$($RUNTIME ps -a --filter "name=code-" --quiet 2>/dev/null || true)
+ image_exists=$($RUNTIME image inspect "code:latest" >/dev/null 2>&1 && echo yes || echo no)
+
+ if [ -n "$containers" ] || [ "$image_exists" = "yes" ]; then
+ echo ""
+ read -r -p "Also remove all code-* containers and the code:latest image? [y/N] " reply
+ if [[ "${reply:-N}" =~ ^[Yy]$ ]]; then
+ if [ -n "$containers" ]; then
+ info "Stopping and removing code-* containers"
+ $RUNTIME rm -f $containers
+ ok "Containers removed"
+ fi
+ if [ "$image_exists" = "yes" ]; then
+ info "Removing code:latest image"
+ $RUNTIME rmi "code:latest"
+ ok "Image removed"
+ fi
+ else
+ info "Leaving containers and image in place"
+ fi
+ fi
+fi
+
+echo ""
+ok "Code Container uninstalled"