diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 71da7e5efe..08a6bfe68d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -159,8 +159,8 @@ jobs: echo "integration=false" >> $GITHUB_OUTPUT fi - # Detect docs changes - if echo "$CHANGED" | grep -qE '^(docs/|README\.md)'; then + # Detect docs changes (includes CLI source which generates docs/cli.md) + if echo "$CHANGED" | grep -qE '^(docs/|README\.md|cmd/|pkg/cli/)'; then echo "docs=true" >> $GITHUB_OUTPUT else echo "docs=false" >> $GITHUB_OUTPUT @@ -390,6 +390,8 @@ jobs: cache: false - name: Check llms.txt is up to date run: mise run docs:llm:check + - name: Check CLI docs are up to date + run: mise run docs:cli:check # ============================================================================= # Lint Checks - Parallel diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index ca4b20b59f..6b17c93cab 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -10,10 +10,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 + - uses: actions/setup-go@v5 + with: + go-version: '1.23' - uses: actions/setup-python@v6 with: python-version: '3.13' + - name: Generate CLI docs + run: go run ./tools/gendocs/main.go -o docs/cli.md + - name: Copy top-level docs like README and CONTRIBUTING run: | sed 's/docs\///g' README.md > ./docs/README.md diff --git a/.gitignore b/.gitignore index 34e893c7d7..10de1868f3 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,6 @@ target /.rustup /.rustup/** .coverage + +# Generated docs +/site diff --git a/AGENTS.md b/AGENTS.md index 279fce4127..08c137eac1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -34,6 +34,7 @@ Development tasks are managed with [mise](https://mise.jdx.dev/). Run `mise task | `mise run build:sdk` | Build SDK wheel | | `mise run install` | Build and symlink cog to /usr/local/bin | | `mise run docs:llm` | **IMPORTANT:** Regenerate `docs/llms.txt` after editing docs | +| `mise run docs:cli` | Generate CLI reference docs from Go source code | ### Task Naming Convention @@ -240,6 +241,7 @@ For comprehensive architecture documentation, see [`architecture/`](./architectu ### Updating the docs - Documentation is in the `docs/` directory, written in Markdown and generated into HTML using `mkdocs`. - **IMPORTANT:** After editing any file in `docs/` or `README.md`, you MUST run `mise run docs:llm` to regenerate `docs/llms.txt`. This file is used by coding agents and should be kept in sync with the documentation. +- **IMPORTANT:** CLI reference docs (`docs/cli.md`) are auto-generated from Go source code. After modifying CLI commands in `cmd/` or `pkg/cli/`, run `mise run docs:cli` to regenerate, and ensure `mise run docs:cli:check` passes before committing. ## CI Tool Dependencies diff --git a/docs/cli.md b/docs/cli.md index 713960bbf0..f282d72530 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,402 +1,178 @@ -# CLI +# CLI reference -Cog provides a command-line interface for building, running, and deploying machine learning models. + -## Overview +## `cog` -The Cog CLI follows this general pattern: +Containers for machine learning. -``` -cog [global-options] [command-options] [arguments] -``` - -For help with any command, use the `--help` flag: - -```bash -cog --help -cog build --help -``` - -## Global Options +To get started, take a look at the documentation: +https://github.com/replicate/cog -These options are available for all commands: +**Examples** -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--debug` | bool | false | Show debugging output | -| `--registry` | string | r8.im | Container registry host (can also be set via `COG_REGISTRY_HOST` env var) | -| `--version` | bool | false | Show version of Cog | - -## Commands - -### cog init - -Initialize a new Cog project in the current directory. - -``` -cog init ``` - -This command creates: -- `cog.yaml` - Configuration file defining the environment -- `predict.py` - Python file with a basic prediction model template -- `requirements.txt` - Python dependencies file - -**Examples:** - -```bash -# Initialize a new project -cog init - -# The created files provide a starting template -ls -# cog.yaml predict.py requirements.txt + To run a command inside a Docker environment defined with Cog: + $ cog run echo hello world ``` -### cog build - -Build a Docker image from a `cog.yaml` configuration file. +**Options** ``` -cog build [options] + --debug Show debugging output + -h, --help help for cog + --version Show version of Cog ``` +## `cog build` -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-t, --tag` | string | | A name for the built image in the form 'repository:tag' | -| `--progress` | string | auto | Set type of build progress output: 'auto', 'tty', or 'plain' | -| `--secret` | string[] | | Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' | -| `--no-cache` | bool | false | Do not use cache when building the image | -| `--separate-weights` | bool | false | Separate model weights from code in image layers | -| `--openapi-schema` | string | | Load OpenAPI schema from a file | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image: 'true', 'false', or 'auto' | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image for faster cold boots | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Build with default settings -cog build - -# Build with a custom tag -cog build -t my-model:latest - -# Build without cache -cog build --no-cache - -# Build with separated weights for faster deploys -cog build --separate-weights -t my-model:v1 +Build an image from cog.yaml -# Build without CUDA for smaller images (non-GPU models) -cog build --use-cuda-base-image=false +``` +cog build [flags] ``` -### cog predict - -Run a prediction on a model. +**Options** ``` -cog predict [image] [options] + -f, --file string The name of the config file. (default "cog.yaml") + -h, --help help for build + --no-cache Do not use cache when building the image + --openapi-schema string Load OpenAPI schema from a file + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --secret stringArray Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' + --separate-weights Separate model weights from code in image layers + -t, --tag string A name for the built image in the form 'repository:tag' + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` +## `cog init` -If an image is specified, it runs predictions on that Docker image. Otherwise, it builds the model in the current directory and runs predictions on it. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-i, --input` | string[] | | Inputs in the form name=value. Use @filename to read from a file | -| `-o, --output` | string | | Output path | -| `-e, --env` | string[] | | Environment variables in the form name=value | -| `--json` | string | | Pass inputs as JSON object from file (@inputs.json) or stdin (@-) | -| `--use-replicate-token` | bool | false | Pass REPLICATE_API_TOKEN from local environment | -| `--setup-timeout` | uint32 | 300 | Timeout for container setup in seconds | -| `--gpus` | string | | GPU devices to add to the container | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `--progress` | string | auto | Set type of build progress output | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Run prediction with inputs -cog predict -i image=@input.jpg -i scale=2 - -# Run prediction with output path -cog predict -i image=@photo.png -o output.png - -# Run prediction with JSON input from file -echo '{"image": "@input.jpg", "scale": 2}' > inputs.json -cog predict --json @inputs.json - -# Run prediction with JSON input from stdin -echo '{"image": "@input.jpg", "scale": 2}' | cog predict --json @- - -# Run prediction on specific image -cog predict my-model:latest -i text="Hello world" - -# Run with environment variables -cog predict -e API_KEY=secret -i prompt="Generate text" +Configure your project for use with Cog -# Run with specific GPU -cog predict --gpus 0 -i image=@input.jpg +``` +cog init [flags] ``` -### cog run - -Run a command inside a Docker environment defined by Cog. +**Options** ``` -cog run [options] [args...] + -h, --help help for init ``` +## `cog login` -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-p, --publish` | string[] | | Publish a container's port to the host (e.g., -p 8000) | -| `-e, --env` | string[] | | Environment variables in the form name=value | -| `--gpus` | string | | GPU devices to add to the container | -| `--progress` | string | auto | Set type of build progress output | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Run Python interpreter -cog run python - -# Run a Python script -cog run python train.py - -# Run with environment variables -cog run -e API_KEY=secret python script.py +Log in to a container registry. -# Run with published ports -cog run -p 8888 jupyter notebook +For Replicate's registry (r8.im), this command handles authentication +through Replicate's token-based flow. -# Run with GPU access -cog run --gpus all python gpu_test.py +For other registries, this command prompts for username and password, +then stores credentials using Docker's credential system. -# Run bash commands -cog run ls -la -cog run bash -c "echo Hello && python --version" +``` +cog login [flags] ``` -### cog serve - -Run the cog HTTP server locally. +**Options** ``` -cog serve [options] + -h, --help help for login + --token-stdin Pass login token on stdin instead of opening a browser. You can find your Replicate login token at https://replicate.com/auth/token ``` +## `cog predict` -Generates and runs an HTTP server based on the model's declared inputs and outputs. +Run a prediction. -**Flags:** +If 'image' is passed, it will run the prediction on that Docker image. +It must be an image that has been built by Cog. -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-p, --port` | int | 8393 | Port on which to listen | -| `--gpus` | string | | GPU devices to add to the container | -| `--progress` | string | auto | Set type of build progress output | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | +Otherwise, it will build the model in the current directory and run +the prediction on that. -**Examples:** - -```bash -# Start server on default port -cog serve - -# Start server on custom port -cog serve -p 5000 - -# Start server with GPU -cog serve --gpus all - -# Test the server -curl http://localhost:8393/predictions -X POST \ - -H 'Content-Type: application/json' \ - -d '{"input": {"text": "Hello"}}' +``` +cog predict [image] [flags] ``` -### cog push - -Build and push a model to a Docker registry. +**Options** ``` -cog push [IMAGE] + -e, --env stringArray Environment variables, in the form name=value + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for predict + -i, --input stringArray Inputs, in the form name=value. if value is prefixed with @, then it is read from a file on disk. E.g. -i path=@image.jpg + --json string Pass inputs as JSON object, read from file (@inputs.json) or via stdin (@-) + -o, --output string Output path + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --setup-timeout uint32 The timeout for a container to setup (in seconds). (default 300) + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") + --use-replicate-token Pass REPLICATE_API_TOKEN from local environment into the model context ``` +## `cog push` -Cog can push to any OCI-compliant container registry. When pushing to Replicate's registry (r8.im), additional features like version tracking are available. For other registries, standard Docker push is used. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--secret` | string[] | | Secrets to pass to the build environment | -| `--no-cache` | bool | false | Do not use cache when building | -| `--separate-weights` | bool | false | Separate model weights from code | -| `--openapi-schema` | string | | Load OpenAPI schema from a file | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `--progress` | string | auto | Set type of build progress output | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Push to Replicate -cog push r8.im/username/model-name - -# Push to GitHub Container Registry -docker login ghcr.io -cog push ghcr.io/your-org/model-name +Build and push model in current directory to a Docker registry -# Push to Google Container Registry -docker login gcr.io -cog push gcr.io/your-project/model-name - -# Push with separated weights (Replicate only) -cog push r8.im/username/model-name --separate-weights - -# Push without cache -cog push r8.im/username/model-name --no-cache +``` +cog push [IMAGE] [flags] ``` -### cog login - -Log in to a container registry. +**Examples** ``` -cog login [options] +cog push registry.example.com/your-username/model-name ``` -For Replicate's registry (r8.im), this command handles authentication through Replicate's token-based flow. For other registries, this command prompts for username and password, then stores credentials using Docker's credential system. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--token-stdin` | bool | false | Pass login token on stdin instead of opening browser (Replicate only) | - -**Examples:** - -```bash -# Interactive login to Replicate (opens browser) -cog login - -# Login to Replicate with token -echo $REPLICATE_API_TOKEN | cog login --token-stdin - -# Login to GitHub Container Registry -cog login --registry ghcr.io +**Options** -# Login to Google Container Registry -cog login --registry gcr.io - -# Login to a private registry -cog login --registry your-registry.example.com ``` + -f, --file string The name of the config file. (default "cog.yaml") + -h, --help help for push + --no-cache Do not use cache when building the image + --openapi-schema string Load OpenAPI schema from a file + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --secret stringArray Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' + --separate-weights Separate model weights from code in image layers + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") +``` +## `cog run` -### cog debug - -Generate a Dockerfile from cog configuration. +Run a command inside a Docker environment ``` -cog debug [options] +cog run [arg...] [flags] ``` -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--image-name` | string | | The image name for the generated Dockerfile | -| `--separate-weights` | bool | false | Separate model weights from code | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Generate Dockerfile to stdout -cog debug +**Options** -# Generate Dockerfile with custom image name -cog debug --image-name my-model:debug ``` - -## Common Workflows - -### Basic Model Development - -```bash -# 1. Initialize a new project -cog init - -# 2. Edit cog.yaml and predict.py to define your model - -# 3. Test predictions locally -cog predict -i input_image=@photo.jpg - -# 4. Build and push to registry -cog push r8.im/username/my-model + -e, --env stringArray Environment variables, in the form name=value + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for run + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + -p, --publish stringArray Publish a container's port to the host, e.g. -p 8000 + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` +## `cog serve` -### Using JSON Inputs - -The `--json` flag for `cog predict` allows passing complex inputs as JSON: - -```bash -# From file -cat > inputs.json << EOF -{ - "prompt": "A beautiful sunset", - "num_outputs": 4, - "guidance_scale": 7.5 -} -EOF -cog predict --json @inputs.json +Run a prediction HTTP server. -# From stdin -echo '{"prompt": "A cat", "seed": 42}' | cog predict --json @- +Generate and run an HTTP server based on the declared model inputs and outputs. -# With local file paths (automatically converted to base64) -echo '{"image": "@input.jpg", "scale": 2}' | cog predict --json @- ``` - -### Working with GPUs - -```bash -# Use all available GPUs -cog run --gpus all python train.py - -# Use specific GPU -cog predict --gpus 0 -i image=@input.jpg - -# Use multiple specific GPUs -cog run --gpus '"device=0,1"' python multi_gpu_train.py +cog serve [flags] ``` -### Environment Variables - -```bash -# Pass environment variables to predict -cog predict -e API_KEY=$MY_API_KEY -i prompt="Hello" +**Options** -# Pass Replicate API token -export REPLICATE_API_TOKEN=your_token -cog predict --use-replicate-token -i prompt="Hello" - -# Multiple environment variables -cog run -e CUDA_VISIBLE_DEVICES=0 -e BATCH_SIZE=32 python train.py +``` + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for serve + -p, --port int Port on which to listen (default 8393) + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` diff --git a/docs/llms.txt b/docs/llms.txt index 43a8b376bf..476f56e175 100644 --- a/docs/llms.txt +++ b/docs/llms.txt @@ -197,407 +197,183 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) for how to set up a development environme --- -# CLI +# CLI reference -Cog provides a command-line interface for building, running, and deploying machine learning models. + -## Overview +## `cog` -The Cog CLI follows this general pattern: +Containers for machine learning. -``` -cog [global-options] [command-options] [arguments] -``` - -For help with any command, use the `--help` flag: - -```bash -cog --help -cog build --help -``` - -## Global Options +To get started, take a look at the documentation: +https://github.com/replicate/cog -These options are available for all commands: - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--debug` | bool | false | Show debugging output | -| `--registry` | string | r8.im | Container registry host (can also be set via `COG_REGISTRY_HOST` env var) | -| `--version` | bool | false | Show version of Cog | - -## Commands - -### cog init - -Initialize a new Cog project in the current directory. +**Examples** ``` -cog init -``` - -This command creates: -- `cog.yaml` - Configuration file defining the environment -- `predict.py` - Python file with a basic prediction model template -- `requirements.txt` - Python dependencies file - -**Examples:** - -```bash -# Initialize a new project -cog init - -# The created files provide a starting template -ls -# cog.yaml predict.py requirements.txt + To run a command inside a Docker environment defined with Cog: + $ cog run echo hello world ``` -### cog build - -Build a Docker image from a `cog.yaml` configuration file. +**Options** ``` -cog build [options] + --debug Show debugging output + -h, --help help for cog + --version Show version of Cog ``` +## `cog build` -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-t, --tag` | string | | A name for the built image in the form 'repository:tag' | -| `--progress` | string | auto | Set type of build progress output: 'auto', 'tty', or 'plain' | -| `--secret` | string[] | | Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' | -| `--no-cache` | bool | false | Do not use cache when building the image | -| `--separate-weights` | bool | false | Separate model weights from code in image layers | -| `--openapi-schema` | string | | Load OpenAPI schema from a file | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image: 'true', 'false', or 'auto' | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image for faster cold boots | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Build with default settings -cog build - -# Build with a custom tag -cog build -t my-model:latest +Build an image from cog.yaml -# Build without cache -cog build --no-cache - -# Build with separated weights for faster deploys -cog build --separate-weights -t my-model:v1 - -# Build without CUDA for smaller images (non-GPU models) -cog build --use-cuda-base-image=false +``` +cog build [flags] ``` -### cog predict - -Run a prediction on a model. +**Options** ``` -cog predict [image] [options] + -f, --file string The name of the config file. (default "cog.yaml") + -h, --help help for build + --no-cache Do not use cache when building the image + --openapi-schema string Load OpenAPI schema from a file + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --secret stringArray Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' + --separate-weights Separate model weights from code in image layers + -t, --tag string A name for the built image in the form 'repository:tag' + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` +## `cog init` -If an image is specified, it runs predictions on that Docker image. Otherwise, it builds the model in the current directory and runs predictions on it. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-i, --input` | string[] | | Inputs in the form name=value. Use @filename to read from a file | -| `-o, --output` | string | | Output path | -| `-e, --env` | string[] | | Environment variables in the form name=value | -| `--json` | string | | Pass inputs as JSON object from file (@inputs.json) or stdin (@-) | -| `--use-replicate-token` | bool | false | Pass REPLICATE_API_TOKEN from local environment | -| `--setup-timeout` | uint32 | 300 | Timeout for container setup in seconds | -| `--gpus` | string | | GPU devices to add to the container | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `--progress` | string | auto | Set type of build progress output | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Run prediction with inputs -cog predict -i image=@input.jpg -i scale=2 - -# Run prediction with output path -cog predict -i image=@photo.png -o output.png - -# Run prediction with JSON input from file -echo '{"image": "@input.jpg", "scale": 2}' > inputs.json -cog predict --json @inputs.json - -# Run prediction with JSON input from stdin -echo '{"image": "@input.jpg", "scale": 2}' | cog predict --json @- - -# Run prediction on specific image -cog predict my-model:latest -i text="Hello world" +Configure your project for use with Cog -# Run with environment variables -cog predict -e API_KEY=secret -i prompt="Generate text" - -# Run with specific GPU -cog predict --gpus 0 -i image=@input.jpg +``` +cog init [flags] ``` -### cog run - -Run a command inside a Docker environment defined by Cog. +**Options** ``` -cog run [options] [args...] + -h, --help help for init ``` +## `cog login` -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-p, --publish` | string[] | | Publish a container's port to the host (e.g., -p 8000) | -| `-e, --env` | string[] | | Environment variables in the form name=value | -| `--gpus` | string | | GPU devices to add to the container | -| `--progress` | string | auto | Set type of build progress output | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Run Python interpreter -cog run python - -# Run a Python script -cog run python train.py - -# Run with environment variables -cog run -e API_KEY=secret python script.py +Log in to a container registry. -# Run with published ports -cog run -p 8888 jupyter notebook +For Replicate's registry (r8.im), this command handles authentication +through Replicate's token-based flow. -# Run with GPU access -cog run --gpus all python gpu_test.py +For other registries, this command prompts for username and password, +then stores credentials using Docker's credential system. -# Run bash commands -cog run ls -la -cog run bash -c "echo Hello && python --version" +``` +cog login [flags] ``` -### cog serve - -Run the cog HTTP server locally. +**Options** ``` -cog serve [options] + -h, --help help for login + --token-stdin Pass login token on stdin instead of opening a browser. You can find your Replicate login token at https://replicate.com/auth/token ``` +## `cog predict` -Generates and runs an HTTP server based on the model's declared inputs and outputs. +Run a prediction. -**Flags:** +If 'image' is passed, it will run the prediction on that Docker image. +It must be an image that has been built by Cog. -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `-p, --port` | int | 8393 | Port on which to listen | -| `--gpus` | string | | GPU devices to add to the container | -| `--progress` | string | auto | Set type of build progress output | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | +Otherwise, it will build the model in the current directory and run +the prediction on that. -**Examples:** - -```bash -# Start server on default port -cog serve - -# Start server on custom port -cog serve -p 5000 - -# Start server with GPU -cog serve --gpus all - -# Test the server -curl http://localhost:8393/predictions -X POST \ - -H 'Content-Type: application/json' \ - -d '{"input": {"text": "Hello"}}' +``` +cog predict [image] [flags] ``` -### cog push - -Build and push a model to a Docker registry. +**Options** ``` -cog push [IMAGE] + -e, --env stringArray Environment variables, in the form name=value + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for predict + -i, --input stringArray Inputs, in the form name=value. if value is prefixed with @, then it is read from a file on disk. E.g. -i path=@image.jpg + --json string Pass inputs as JSON object, read from file (@inputs.json) or via stdin (@-) + -o, --output string Output path + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --setup-timeout uint32 The timeout for a container to setup (in seconds). (default 300) + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") + --use-replicate-token Pass REPLICATE_API_TOKEN from local environment into the model context ``` +## `cog push` -Cog can push to any OCI-compliant container registry. When pushing to Replicate's registry (r8.im), additional features like version tracking are available. For other registries, standard Docker push is used. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--secret` | string[] | | Secrets to pass to the build environment | -| `--no-cache` | bool | false | Do not use cache when building | -| `--separate-weights` | bool | false | Separate model weights from code | -| `--openapi-schema` | string | | Load OpenAPI schema from a file | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `--progress` | string | auto | Set type of build progress output | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Push to Replicate -cog push r8.im/username/model-name - -# Push to GitHub Container Registry -docker login ghcr.io -cog push ghcr.io/your-org/model-name - -# Push to Google Container Registry -docker login gcr.io -cog push gcr.io/your-project/model-name - -# Push with separated weights (Replicate only) -cog push r8.im/username/model-name --separate-weights +Build and push model in current directory to a Docker registry -# Push without cache -cog push r8.im/username/model-name --no-cache +``` +cog push [IMAGE] [flags] ``` -### cog login - -Log in to a container registry. +**Examples** ``` -cog login [options] +cog push registry.example.com/your-username/model-name ``` -For Replicate's registry (r8.im), this command handles authentication through Replicate's token-based flow. For other registries, this command prompts for username and password, then stores credentials using Docker's credential system. - -**Flags:** - -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--token-stdin` | bool | false | Pass login token on stdin instead of opening browser (Replicate only) | - -**Examples:** - -```bash -# Interactive login to Replicate (opens browser) -cog login - -# Login to Replicate with token -echo $REPLICATE_API_TOKEN | cog login --token-stdin - -# Login to GitHub Container Registry -cog login --registry ghcr.io +**Options** -# Login to Google Container Registry -cog login --registry gcr.io - -# Login to a private registry -cog login --registry your-registry.example.com ``` + -f, --file string The name of the config file. (default "cog.yaml") + -h, --help help for push + --no-cache Do not use cache when building the image + --openapi-schema string Load OpenAPI schema from a file + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --secret stringArray Secrets to pass to the build environment in the form 'id=foo,src=/path/to/file' + --separate-weights Separate model weights from code in image layers + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") +``` +## `cog run` -### cog debug - -Generate a Dockerfile from cog configuration. +Run a command inside a Docker environment ``` -cog debug [options] +cog run [arg...] [flags] ``` -**Flags:** +**Options** -| Flag | Type | Default | Description | -|------|------|---------|-------------| -| `--image-name` | string | | The image name for the generated Dockerfile | -| `--separate-weights` | bool | false | Separate model weights from code | -| `--use-cuda-base-image` | string | auto | Use Nvidia CUDA base image | -| `--use-cog-base-image` | bool | true | Use pre-built Cog base image | -| `-f` | string | cog.yaml | The name of the config file | - -**Examples:** - -```bash -# Generate Dockerfile to stdout -cog debug - -# Generate Dockerfile with custom image name -cog debug --image-name my-model:debug ``` - -## Common Workflows - -### Basic Model Development - -```bash -# 1. Initialize a new project -cog init - -# 2. Edit cog.yaml and predict.py to define your model - -# 3. Test predictions locally -cog predict -i input_image=@photo.jpg - -# 4. Build and push to registry -cog push r8.im/username/my-model + -e, --env stringArray Environment variables, in the form name=value + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for run + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + -p, --publish stringArray Publish a container's port to the host, e.g. -p 8000 + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` +## `cog serve` -### Using JSON Inputs +Run a prediction HTTP server. -The `--json` flag for `cog predict` allows passing complex inputs as JSON: +Generate and run an HTTP server based on the declared model inputs and outputs. -```bash -# From file -cat > inputs.json << EOF -{ - "prompt": "A beautiful sunset", - "num_outputs": 4, - "guidance_scale": 7.5 -} -EOF -cog predict --json @inputs.json - -# From stdin -echo '{"prompt": "A cat", "seed": 42}' | cog predict --json @- - -# With local file paths (automatically converted to base64) -echo '{"image": "@input.jpg", "scale": 2}' | cog predict --json @- ``` - -### Working with GPUs - -```bash -# Use all available GPUs -cog run --gpus all python train.py - -# Use specific GPU -cog predict --gpus 0 -i image=@input.jpg - -# Use multiple specific GPUs -cog run --gpus '"device=0,1"' python multi_gpu_train.py +cog serve [flags] ``` -### Environment Variables - -```bash -# Pass environment variables to predict -cog predict -e API_KEY=$MY_API_KEY -i prompt="Hello" - -# Pass Replicate API token -export REPLICATE_API_TOKEN=your_token -cog predict --use-replicate-token -i prompt="Hello" +**Options** -# Multiple environment variables -cog run -e CUDA_VISIBLE_DEVICES=0 -e BATCH_SIZE=32 python train.py +``` + -f, --file string The name of the config file. (default "cog.yaml") + --gpus docker run --gpus GPU devices to add to the container, in the same format as docker run --gpus. + -h, --help help for serve + -p, --port int Port on which to listen (default 8393) + --progress string Set type of build progress output, 'auto' (default), 'tty', 'plain', or 'quiet' (default "auto") + --use-cog-base-image Use pre-built Cog base image for faster cold boots (default true) + --use-cuda-base-image string Use Nvidia CUDA base image, 'true' (default) or 'false' (use python base image). False results in a smaller image but may cause problems for non-torch projects (default "auto") ``` diff --git a/go.mod b/go.mod index aa16f649e1..e1dcc03456 100644 --- a/go.mod +++ b/go.mod @@ -25,8 +25,8 @@ require ( github.com/replicate/go v0.0.0-20250205165008-b772d7cd506b github.com/rogpeppe/go-internal v1.14.1 github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.9 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.40.0 github.com/testcontainers/testcontainers-go/modules/registry v0.40.0 @@ -74,6 +74,7 @@ require ( github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect @@ -129,6 +130,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shirou/gopsutil/v4 v4.25.6 // indirect @@ -154,6 +156,7 @@ require ( go.opentelemetry.io/otel/sdk v1.39.0 // indirect go.opentelemetry.io/otel/trace v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.48.0 // indirect golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect diff --git a/go.sum b/go.sum index 20c91013be..b8bd970eef 100644 --- a/go.sum +++ b/go.sum @@ -81,6 +81,8 @@ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsx github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -250,6 +252,7 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= @@ -263,10 +266,10 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= @@ -342,6 +345,8 @@ go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U= go.yaml.in/yaml/v4 v4.0.0-rc.4/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/mise.toml b/mise.toml index 8be8715268..e6da17cb8d 100644 --- a/mise.toml +++ b/mise.toml @@ -536,6 +536,7 @@ mkdocs serve [tasks."docs:llm"] description = "Update LLM documentation (llms.txt)" +depends = ["docs:cli"] sources = ["README.md", "docs/*.md"] outputs = ["docs/llms.txt"] run = """ @@ -565,6 +566,28 @@ fi echo "docs/llms.txt is up to date" """ +[tasks."docs:cli"] +description = "Generate CLI reference documentation" +sources = ["pkg/cli/*.go", "cmd/cog/*.go"] +outputs = ["docs/cli.md"] +run = "go run ./tools/gendocs/main.go -o docs/cli.md" + +[tasks."docs:cli:check"] +description = "Check that CLI docs are up to date" +run = """ +#!/usr/bin/env bash +set -e +tmpfile=$(mktemp) +trap 'rm -f "$tmpfile"' EXIT +# Generate to temp file and compare +go run ./tools/gendocs/main.go -o "$tmpfile" +if ! diff -q "$tmpfile" docs/cli.md > /dev/null 2>&1; then + echo "ERROR: docs/cli.md is out of date. Run 'mise run docs:cli' to update." + exit 1 +fi +echo "docs/cli.md is up to date" +""" + # ============================================================================= # CI tasks - granular for parallel execution and caching # ============================================================================= diff --git a/tools/gendocs/main.go b/tools/gendocs/main.go new file mode 100644 index 0000000000..1e8f25397d --- /dev/null +++ b/tools/gendocs/main.go @@ -0,0 +1,214 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "slices" + "sort" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" + + "github.com/replicate/cog/pkg/cli" + "github.com/replicate/cog/pkg/util/console" +) + +func main() { + var output string + + rootCmd := &cobra.Command{ + Use: "gendocs", + Short: "Generate CLI reference documentation for Cog", + Run: func(cmd *cobra.Command, args []string) { + if err := generateDocs(output); err != nil { + console.Fatalf("Failed to generate docs: %s", err) + } + console.Infof("Generated CLI docs at %s", output) + }, + } + + rootCmd.Flags().StringVarP(&output, "output", "o", "docs/cli.md", "Output file path") + if err := rootCmd.Execute(); err != nil { + console.Fatal(err.Error()) + } +} + +func generateDocs(outputPath string) error { + // Create temporary directory for cobra doc generation + tmpDir, err := os.MkdirTemp("", "cog-cli-docs-*") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Get the cog command + cmd, err := cli.NewRootCommand() + if err != nil { + return fmt.Errorf("failed to create root command: %w", err) + } + + // Generate markdown files using cobra/doc + if err := doc.GenMarkdownTree(cmd, tmpDir); err != nil { + return fmt.Errorf("failed to generate markdown: %w", err) + } + + // Read all generated files + files, err := os.ReadDir(tmpDir) + if err != nil { + return fmt.Errorf("failed to read temp dir: %w", err) + } + + // Sort files to ensure consistent ordering + // Order: cog (root), then alphabetically by command name + var fileNames []string + for _, file := range files { + if !file.IsDir() && strings.HasSuffix(file.Name(), ".md") { + fileNames = append(fileNames, file.Name()) + } + } + sort.Strings(fileNames) + + // Build the combined markdown content + var content strings.Builder + + // Write header + content.WriteString("# CLI reference\n\n") + content.WriteString("\n\n") + + // Process each command file + for _, fileName := range fileNames { + filePath := filepath.Join(tmpDir, fileName) + data, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("failed to read %s: %w", fileName, err) + } + + // Process the content + processed := processCommandDoc(string(data), fileName) + content.WriteString(processed) + content.WriteString("\n") + } + + // Ensure output directory exists + outputDir := filepath.Dir(outputPath) + if err := os.MkdirAll(outputDir, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Write the combined file + if err := os.WriteFile(outputPath, []byte(content.String()), 0o644); err != nil { + return fmt.Errorf("failed to write output file: %w", err) + } + + return nil +} + +func processCommandDoc(content string, fileName string) string { + // Remove the "SEE ALSO" section and everything after it + if idx := strings.Index(content, "### SEE ALSO"); idx != -1 { + content = content[:idx] + } + + // Remove the "Options inherited from parent commands" section + if idx := strings.Index(content, "### Options inherited from parent commands"); idx != -1 { + content = content[:idx] + } + + // Remove trailing whitespace + content = strings.TrimRight(content, "\n") + + // Fix command headers to use backticks + // Change "## cog init" to "## `cog init`" + // Change "### Options" to "**Options**" (not a heading, won't appear in TOC) + // Change "### Examples" to "**Examples**" (not a heading, won't appear in TOC) + // Remove "### Synopsis" heading but keep its content + // Skip the short description if there's a Synopsis section (to avoid duplication) + lines := strings.Split(content, "\n") + var result []string + skipSynopsis := false + skipShortDesc := false + for _, line := range lines { + switch { + case strings.HasPrefix(line, "## cog"): + // Extract the command name + command := strings.TrimPrefix(line, "## ") + result = append(result, "## `"+command+"`") + // Check if next non-empty line is "### Synopsis" - if so, skip the short desc + skipShortDesc = hasSynopsisSection(lines) + case skipShortDesc: + // Skip the short description line (first non-empty line after header) + // Also skip any blank lines that follow the header + if strings.TrimSpace(line) != "" && !strings.HasPrefix(line, "###") { + // This is the short description line, skip it + skipShortDesc = false + } + // If line is blank, we continue skipping until we hit the short desc + case line == "### Synopsis": + // Skip the "### Synopsis" heading line, but keep content after it + skipSynopsis = true + case skipSynopsis: + // Keep synopsis content until we hit the usage block (```) or another heading + switch { + case line == "### Examples": + skipSynopsis = false + // Add blank line before if needed + if len(result) > 0 && strings.TrimSpace(result[len(result)-1]) != "" { + result = append(result, "") + } + result = append(result, "**Examples**") + case strings.HasPrefix(line, "###"), strings.HasPrefix(line, "```"): + skipSynopsis = false + // Add blank line before if needed + if len(result) > 0 && strings.TrimSpace(result[len(result)-1]) != "" { + result = append(result, "") + } + result = append(result, line) + default: + // Keep all lines from synopsis (including blank lines for paragraph breaks) + result = append(result, line) + } + case line == "### Options": + // Add blank line before if needed + if len(result) > 0 && strings.TrimSpace(result[len(result)-1]) != "" { + result = append(result, "") + } + result = append(result, "**Options**") + case line == "### Examples": + // Add blank line before if needed + if len(result) > 0 && strings.TrimSpace(result[len(result)-1]) != "" { + result = append(result, "") + } + result = append(result, "**Examples**") + default: + result = append(result, line) + } + } + + // Remove consecutive blank lines + result = removeConsecutiveBlankLines(result) + + return strings.Join(result, "\n") +} + +// removeConsecutiveBlankLines removes consecutive blank lines, keeping only one +func removeConsecutiveBlankLines(lines []string) []string { + var result []string + prevBlank := false + for _, line := range lines { + isBlank := strings.TrimSpace(line) == "" + if isBlank && prevBlank { + // Skip consecutive blank lines + continue + } + result = append(result, line) + prevBlank = isBlank + } + return result +} + +// hasSynopsisSection checks if the content has a "### Synopsis" section +func hasSynopsisSection(lines []string) bool { + return slices.Contains(lines, "### Synopsis") +}