diff --git a/Cargo.lock b/Cargo.lock index 1549925..416c1fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,13 +91,29 @@ version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive 0.5.1", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + [[package]] name = "asn1-rs" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" dependencies = [ - "asn1-rs-derive", + "asn1-rs-derive 0.6.0", "asn1-rs-impl", "displaydoc", "nom", @@ -107,6 +123,18 @@ dependencies = [ "time", ] +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + [[package]] name = "asn1-rs-derive" version = "0.6.0" @@ -175,6 +203,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.8.8" @@ -227,6 +277,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "axum-server" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9" +dependencies = [ + "bytes", + "fs-err", + "http", + "http-body", + "hyper", + "hyper-util", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.76" @@ -313,6 +380,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", + "jobserver", + "libc", "shlex", ] @@ -335,8 +404,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-link", ] @@ -380,6 +451,15 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -525,13 +605,27 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs 0.6.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "der-parser" version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" dependencies = [ - "asn1-rs", + "asn1-rs 0.7.1", "displaydoc", "nom", "num-bigint", @@ -643,6 +737,12 @@ version = "0.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f678cf4a922c215c63e0de95eb1ff08a958a81d47e485cf9da1e27bf6305cfa5" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "enum-as-inner" version = "0.6.1" @@ -698,6 +798,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "foldhash" version = "0.1.5" @@ -713,6 +819,22 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs-err" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" +dependencies = [ + "autocfg", + "tokio", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.32" @@ -891,6 +1013,25 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.15.5" @@ -1013,6 +1154,7 @@ dependencies = [ "bytes", "futures-channel", "futures-core", + "h2", "http", "http-body", "httparse", @@ -1039,7 +1181,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots", + "webpki-roots 1.0.6", ] [[package]] @@ -1271,6 +1413,16 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + [[package]] name = "js-sys" version = "0.3.91" @@ -1586,13 +1738,22 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs 0.6.2", +] + [[package]] name = "oid-registry" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" dependencies = [ - "asn1-rs", + "asn1-rs 0.7.1", ] [[package]] @@ -1700,7 +1861,7 @@ dependencies = [ "patchbay", "patchbay-server", "patchbay-utils", - "rcgen", + "rcgen 0.14.7", "regex", "serde", "serde_json", @@ -1719,11 +1880,21 @@ dependencies = [ "anyhow", "async-stream", "axum", + "axum-server", + "chrono", + "clap", + "dirs", + "flate2", + "rustls", "serde", "serde_json", + "tar", "tokio", + "tokio-rustls-acme", "tokio-stream", "tracing", + "tracing-subscriber", + "uuid", ] [[package]] @@ -1954,6 +2125,19 @@ dependencies = [ "getrandom 0.3.4", ] +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + [[package]] name = "rcgen" version = "0.14.7" @@ -1964,7 +2148,7 @@ dependencies = [ "ring", "rustls-pki-types", "time", - "x509-parser", + "x509-parser 0.18.1", "yasna", ] @@ -2065,7 +2249,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 1.0.6", ] [[package]] @@ -2155,6 +2339,8 @@ version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ + "aws-lc-rs", + "log", "once_cell", "ring", "rustls-pki-types", @@ -2179,6 +2365,7 @@ version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -2672,6 +2859,35 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls-acme" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f296d48ff72e0df96e2d7ef064ad5904d016a130869e542f00b08c8e05cc18cf" +dependencies = [ + "async-trait", + "axum-server", + "base64", + "chrono", + "futures", + "log", + "num-bigint", + "pem", + "proc-macro2", + "rcgen 0.13.2", + "reqwest", + "ring", + "rustls", + "serde", + "serde_json", + "thiserror 2.0.18", + "time", + "tokio", + "tokio-rustls", + "webpki-roots 0.26.11", + "x509-parser 0.16.0", +] + [[package]] name = "tokio-stream" version = "0.1.18" @@ -3098,6 +3314,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.6", +] + [[package]] name = "webpki-roots" version = "1.0.6" @@ -3537,18 +3762,35 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs 0.6.2", + "data-encoding", + "der-parser 9.0.0", + "lazy_static", + "nom", + "oid-registry 0.7.1", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + [[package]] name = "x509-parser" version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d43b0f71ce057da06bc0851b23ee24f3f86190b07203dd8f567d0b706a185202" dependencies = [ - "asn1-rs", + "asn1-rs 0.7.1", "data-encoding", - "der-parser", + "der-parser 10.0.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.8.1", "ring", "rusticata-macros", "thiserror 2.0.18", diff --git a/docs/guide/testing.md b/docs/guide/testing.md index 169e19f..9d4dcf9 100644 --- a/docs/guide/testing.md +++ b/docs/guide/testing.md @@ -232,3 +232,179 @@ enables TRACE (e.g. `RUST_LOG=trace`). | `--no-fail-fast` | | Run all tests even if some fail | | `--recreate` | | Stop and recreate the VM | | `-- ` | | Extra args passed to cargo | + +## Running in CI + +If you run a `patchbay-serve` instance (see [patchbay-serve](#patchbay-serve) +below), you can push test results from GitHub Actions and get a link +posted as a PR comment. + +Set two repository secrets: `PATCHBAY_URL` (e.g. `https://patchbay.example.com`) +and `PATCHBAY_API_KEY`. + +Add this to your workflow **after** the test step: + +```yaml + - name: Push patchbay results + if: always() + env: + PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} + PATCHBAY_API_KEY: ${{ secrets.PATCHBAY_API_KEY }} + run: | + set -euo pipefail + + PROJECT="${{ github.event.repository.name }}" + TESTDIR="$(cargo metadata --format-version=1 --no-deps | jq -r .target_directory)/testdir-current" + + if [ ! -d "$TESTDIR" ]; then + echo "No testdir output found, skipping push" + exit 0 + fi + + # Create run.json manifest + cat > "$TESTDIR/run.json" <> "$GITHUB_ENV" + echo "Results uploaded: $VIEW_URL" + + - name: Comment on PR + if: always() && github.event.pull_request && env.PATCHBAY_VIEW_URL + uses: actions/github-script@v7 + with: + script: | + const marker = ''; + const body = `${marker}\n**patchbay results:** ${process.env.PATCHBAY_VIEW_URL}`; + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + const existing = comments.find(c => c.body.includes(marker)); + if (existing) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + body, + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body, + }); + } +``` + +The PR comment is auto-updated on each push, so you always see the latest run. + +## patchbay-serve + +`patchbay-serve` is a standalone server for hosting run results. CI +pipelines push test output to it; the devtools UI lets you browse them. + +### Install + +```bash +cargo install --git https://github.com/n0-computer/patchbay patchbay-server --bin patchbay-serve +``` + +### Quick start + +```bash +patchbay-serve \ + --accept-push \ + --api-key "$(openssl rand -hex 32)" \ + --http-bind 0.0.0.0:8080 \ + --retention 10GB +``` + +With automatic TLS: + +```bash +patchbay-serve \ + --accept-push \ + --api-key "$(openssl rand -hex 32)" \ + --acme-domain patchbay.example.com \ + --acme-email you@example.com \ + --retention 10GB +``` + +This will: +- Serve the runs index at `/runs` +- Accept pushed runs at `POST /api/push/{project}` +- Auto-provision TLS via Let's Encrypt (when `--acme-domain` is set) +- Store data in `~/.local/share/patchbay-serve/` (runs + ACME certs) +- Delete oldest runs when total size exceeds the retention limit + +### Flags + +| Flag | Description | +|------|-------------| +| `--run-dir ` | Override run storage location | +| `--data-dir ` | Override data directory (default: `~/.local/share/patchbay-serve`) | +| `--accept-push` | Enable the push API | +| `--api-key ` | Required with `--accept-push`; also reads `PATCHBAY_API_KEY` env | +| `--acme-domain ` | Enable automatic TLS for domain | +| `--acme-email ` | Contact email for Let's Encrypt (required with `--acme-domain`) | +| `--retention ` | Max total run storage (e.g. `500MB`, `10GB`) | +| `--http-bind ` | HTTP listen address (default: `0.0.0.0:8080`; redirect when ACME is active) | +| `--https-bind ` | HTTPS listen address (default: `0.0.0.0:4443`; only with `--acme-domain`) | + +### systemd + +A unit file is included at `patchbay-server/patchbay-serve.service`. +To install: + +```bash +# Create service user and data directory +sudo useradd -r -s /usr/sbin/nologin patchbay +sudo mkdir -p /var/lib/patchbay-serve +sudo chown patchbay:patchbay /var/lib/patchbay-serve + +# Install the binary +cargo install --git https://github.com/n0-computer/patchbay patchbay-server --bin patchbay-serve +sudo cp ~/.cargo/bin/patchbay-serve /usr/local/bin/ + +# Install and configure the unit file +sudo cp patchbay-server/patchbay-serve.service /etc/systemd/system/ +sudo systemctl edit patchbay-serve # set PATCHBAY_API_KEY, --acme-domain, --acme-email +sudo systemctl enable --now patchbay-serve +``` + +Check status: + +```bash +sudo systemctl status patchbay-serve +journalctl -u patchbay-serve -f +``` diff --git a/patchbay-server/Cargo.toml b/patchbay-server/Cargo.toml index c20a89b..cafcc03 100644 --- a/patchbay-server/Cargo.toml +++ b/patchbay-server/Cargo.toml @@ -8,12 +8,26 @@ authors.workspace = true repository.workspace = true build = "build.rs" +[[bin]] +name = "patchbay-serve" +path = "src/main.rs" + [dependencies] axum = { version = "0.8", features = ["tokio"] } -tokio = { version = "1", features = ["rt", "macros", "sync", "time", "fs", "io-util"] } +tokio = { version = "1", features = ["rt-multi-thread", "rt", "macros", "sync", "time", "fs", "io-util", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } +tokio-rustls-acme = { version = "0.7", features = ["axum"] } anyhow = "1" async-stream = "0.3" +clap = { version = "4", features = ["derive", "env"] } +dirs = "6" +flate2 = "1" serde = { version = "1", features = ["derive"] } serde_json = "1" +tar = "0.4" tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +uuid = { version = "1", features = ["v4"] } +chrono = "0.4" +axum-server = "0.7" +rustls = "0.23" diff --git a/patchbay-server/README.md b/patchbay-server/README.md new file mode 100644 index 0000000..908671e --- /dev/null +++ b/patchbay-server/README.md @@ -0,0 +1,222 @@ +# patchbay-serve + +Standalone server for hosting patchbay run results. CI pipelines push +test output to it; the devtools UI lets you browse them. + +## Install + +```bash +cargo install --git https://github.com/n0-computer/patchbay patchbay-server --bin patchbay-serve +``` + +## Quick start + +```bash +patchbay-serve \ + --accept-push \ + --api-key "$(openssl rand -hex 32)" \ + --http-bind 0.0.0.0:8080 \ + --retention 10GB +``` + +With automatic TLS via Let's Encrypt: + +```bash +patchbay-serve \ + --accept-push \ + --api-key "$(openssl rand -hex 32)" \ + --acme-domain patchbay.example.com \ + --acme-email you@example.com \ + --retention 10GB +``` + +This will: + +- Serve the devtools UI at `/` with a runs index +- Accept pushed runs at `POST /api/push/{project}` +- Auto-provision TLS via Let's Encrypt (when `--acme-domain` is set) +- Store data in `~/.local/share/patchbay-serve/` (runs + ACME certs) +- Delete oldest runs when total size exceeds the retention limit + +## Push API + +``` +POST /api/push/{project} +Authorization: Bearer +Content-Type: application/gzip +Body: tar.gz of the run directory +``` + +Returns: + +```json +{"ok": true, "project": "myproject", "run": "myproject-20260320_120000-uuid", "invocation": "myproject-20260320_120000-uuid"} +``` + +The `invocation` value is used for deep linking: `https://your-server/#/inv/{invocation}` + +## Flags + +| Flag | Description | +|------|-------------| +| `--run-dir ` | Override run storage location | +| `--data-dir ` | Override data directory (default: `~/.local/share/patchbay-serve`) | +| `--accept-push` | Enable the push API | +| `--api-key ` | Required with `--accept-push`; also reads `PATCHBAY_API_KEY` env var | +| `--acme-domain ` | Enable automatic TLS for this domain | +| `--acme-email ` | Contact email for Let's Encrypt (required with `--acme-domain`) | +| `--retention ` | Max total run storage (e.g. `500MB`, `10GB`) | +| `--http-bind ` | HTTP listen address (default: `0.0.0.0:8080`; redirect when ACME is active) | +| `--https-bind ` | HTTPS listen address (default: `0.0.0.0:4443`; only used with `--acme-domain`) | + +## Deploy with systemd + +A unit file is included at [`patchbay-serve.service`](patchbay-serve.service). + +### 1. Create a service user and data directory + +```bash +sudo useradd -r -s /usr/sbin/nologin -d /var/lib/patchbay-serve patchbay +sudo mkdir -p /var/lib/patchbay-serve +sudo chown patchbay:patchbay /var/lib/patchbay-serve +``` + +### 2. Install the binary + +```bash +cargo install --git https://github.com/n0-computer/patchbay patchbay-server --bin patchbay-serve +sudo cp ~/.cargo/bin/patchbay-serve /usr/local/bin/ +``` + +### 3. Install and configure the unit file + +```bash +sudo cp patchbay-serve.service /etc/systemd/system/ +``` + +Edit the service to set your domain, email, and API key: + +```bash +sudo systemctl edit patchbay-serve +``` + +```ini +[Service] +ExecStart= +ExecStart=/usr/local/bin/patchbay-serve \ + --accept-push \ + --data-dir /var/lib/patchbay-serve \ + --http-bind 0.0.0.0:80 \ + --https-bind 0.0.0.0:443 \ + --acme-domain patchbay.yourcompany.com \ + --acme-email ops@yourcompany.com \ + --retention 10GB +Environment=PATCHBAY_API_KEY=your-secret-key-here +``` + +### 4. Start + +```bash +sudo systemctl daemon-reload +sudo systemctl enable --now patchbay-serve +``` + +### 5. Verify + +```bash +sudo systemctl status patchbay-serve +journalctl -u patchbay-serve -f +``` + +The service runs with hardened settings (`ProtectSystem=strict`, +`ProtectHome=true`, `NoNewPrivileges=true`) and only has write access +to `/var/lib/patchbay-serve`. + +## GitHub Actions + +Set two repository secrets: `PATCHBAY_URL` (e.g. `https://patchbay.yourcompany.com`) +and `PATCHBAY_API_KEY`. + +Add this to your workflow after the test step: + +```yaml + - name: Push patchbay results + if: always() + env: + PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} + PATCHBAY_API_KEY: ${{ secrets.PATCHBAY_API_KEY }} + run: | + set -euo pipefail + + PROJECT="${{ github.event.repository.name }}" + TESTDIR="$(cargo metadata --format-version=1 --no-deps | jq -r .target_directory)/testdir-current" + + if [ ! -d "$TESTDIR" ]; then + echo "No testdir output found, skipping push" + exit 0 + fi + + cat > "$TESTDIR/run.json" <> "$GITHUB_ENV" + echo "Results uploaded: $VIEW_URL" + + - name: Comment on PR + if: always() && github.event.pull_request && env.PATCHBAY_VIEW_URL + uses: actions/github-script@v7 + with: + script: | + const marker = ''; + const body = `${marker}\n**patchbay results:** ${process.env.PATCHBAY_VIEW_URL}`; + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + const existing = comments.find(c => c.body.includes(marker)); + if (existing) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + body, + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body, + }); + } +``` + +The PR comment is auto-updated on each push so you always see the latest run. diff --git a/patchbay-server/patchbay-serve.service b/patchbay-server/patchbay-serve.service new file mode 100644 index 0000000..14e1945 --- /dev/null +++ b/patchbay-server/patchbay-serve.service @@ -0,0 +1,31 @@ +[Unit] +Description=patchbay-serve - patchbay run results server +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=patchbay +Group=patchbay +ExecStart=/usr/local/bin/patchbay-serve \ + --accept-push \ + --data-dir /var/lib/patchbay-serve \ + --http-bind 0.0.0.0:80 \ + --https-bind 0.0.0.0:443 \ + --acme-domain patchbay.example.com \ + --acme-email you@example.com \ + --retention 10GB +Environment=PATCHBAY_API_KEY=changeme +AmbientCapabilities=CAP_NET_BIND_SERVICE +Restart=on-failure +RestartSec=5 + +# Hardening +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +PrivateTmp=true +ReadWritePaths=/var/lib/patchbay-serve + +[Install] +WantedBy=multi-user.target diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index dd9d78d..8f8d191 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -7,17 +7,19 @@ use std::{ convert::Infallible, fs, path::{Path, PathBuf}, + sync::Arc, time::Duration, }; use axum::{ + body::Bytes, extract::{Path as AxPath, Query, State}, - http::StatusCode, + http::{HeaderMap, StatusCode}, response::{ sse::{Event, KeepAlive, Sse}, Html, IntoResponse, }, - routing::get, + routing::{get, post}, Router, }; use serde::{Deserialize, Serialize}; @@ -183,39 +185,62 @@ struct EventRecord { rest: serde_json::Value, } +// ── Push configuration ────────────────────────────────────────────── + +/// Configuration for the push endpoint. +#[derive(Clone)] +pub struct PushConfig { + /// API key required in Authorization header. + pub api_key: String, + /// Directory where pushed runs are stored. + pub run_dir: PathBuf, +} + // ── Shared state ──────────────────────────────────────────────────── #[derive(Clone)] struct AppState { base: PathBuf, runs_tx: broadcast::Sender<()>, + push: Option>, } // ── Router construction ───────────────────────────────────────────── fn build_router(state: AppState) -> Router { - Router::new() + let mut r = Router::new() .route("/", get(index_html)) + .route("/runs", get(runs_index_html)) .route("/api/runs", get(get_runs)) .route("/api/runs/subscribe", get(runs_sse)) .route("/api/runs/{run}/state", get(get_run_state)) .route("/api/runs/{run}/events", get(run_events_sse)) + .route("/api/runs/{run}/events.json", get(run_events_json)) .route("/api/runs/{run}/logs", get(get_run_logs)) .route("/api/runs/{run}/logs/{*path}", get(get_run_log_file)) .route("/api/runs/{run}/files/{*path}", get(get_run_file)) .route( "/api/invocations/{name}/combined-results", get(get_invocation_combined), - ) - .with_state(state) + ); + if state.push.is_some() { + r = r.route("/api/push/{project}", post(push_run)); + } + r.with_state(state) } /// Creates an axum [`Router`] for serving a lab output directory. pub fn router(base: PathBuf) -> Router { + build_app(base, None) +} + +/// Creates an axum [`Router`] with optional push support. +pub fn build_app(base: PathBuf, push: Option) -> Router { let (runs_tx, _) = broadcast::channel(16); let state = AppState { base: base.clone(), runs_tx: runs_tx.clone(), + push: push.map(Arc::new), }; // Background run scanner: notifies SSE subscribers when new runs appear. @@ -424,6 +449,33 @@ async fn run_events_sse( .keep_alive(KeepAlive::default()) } +/// Return all events as a JSON array (non-streaming). +async fn run_events_json( + AxPath(run): AxPath, + State(state): State, +) -> impl IntoResponse { + let Some(run_dir) = safe_run_dir(&state.base, &run) else { + return ( + StatusCode::FORBIDDEN, + [("content-type", "application/json")], + r#"[]"#.to_string(), + ); + }; + let events_path = run_dir.join(EVENTS_JSONL); + let contents = tokio::fs::read_to_string(&events_path) + .await + .unwrap_or_default(); + let events: Vec = contents + .lines() + .filter_map(|line| serde_json::from_str(line).ok()) + .collect(); + ( + StatusCode::OK, + [("content-type", "application/json")], + serde_json::to_string(&events).unwrap_or_else(|_| "[]".to_string()), + ) +} + /// List log files in a run directory. async fn get_run_logs( AxPath(run): AxPath, @@ -746,3 +798,327 @@ async fn scan_log_files(run_dir: &Path) -> Vec { }); logs } + +// ── Run manifest (run.json) ───────────────────────────────────────── + +/// Manifest included with pushed runs, providing CI context. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct RunManifest { + /// Project name (from URL path). + #[serde(default)] + pub project: String, + /// Git branch name. + #[serde(default)] + pub branch: Option, + /// Git commit SHA. + #[serde(default)] + pub commit: Option, + /// PR number. + #[serde(default)] + pub pr: Option, + /// PR URL. + #[serde(default)] + pub pr_url: Option, + /// When this run was created. + #[serde(default)] + pub created_at: Option, + /// Human-readable run title/label. + #[serde(default)] + pub title: Option, +} + +const RUN_JSON: &str = "run.json"; + +fn read_run_json(dir: &Path) -> Option { + let text = fs::read_to_string(dir.join(RUN_JSON)).ok()?; + serde_json::from_str(&text).ok() +} + +// ── Runs index page ───────────────────────────────────────────────── + +/// Metadata for a run entry on the index page. +#[derive(Serialize)] +struct RunIndexEntry { + /// Relative path within run_dir. + path: String, + /// Project name (first path component). + project: String, + /// run.json manifest if present. + manifest: Option, + /// Timestamp from directory name. + date: Option, +} + +/// Discover pushed runs for the index page. +/// Structure: run_dir/{project}-{date}-{uuid}/... +fn discover_pushed_runs(run_dir: &Path) -> Vec { + let mut entries = Vec::new(); + let Ok(dirs) = fs::read_dir(run_dir) else { + return entries; + }; + for dir_entry in dirs.flatten() { + if !dir_entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + continue; + } + let name = dir_entry.file_name().to_string_lossy().to_string(); + let manifest = read_run_json(&dir_entry.path()); + // Extract project from dirname: {project}-YYYYMMDD_HHMMSS-{uuid} + // Find the date portion (first occurrence of YYYYMMDD_) + let project = name + .find(|c: char| c.is_ascii_digit()) + .and_then(|i| { + if i > 0 { + Some(name[..i - 1].to_string()) + } else { + None + } + }) + .or_else(|| manifest.as_ref().map(|m| m.project.clone())) + .unwrap_or_else(|| name.clone()); + let date = name + .find(|c: char| c.is_ascii_digit()) + .and_then(|i| name.get(i..i + 15)) + .map(|s| s.to_string()); + entries.push(RunIndexEntry { + path: name, + project, + manifest, + date, + }); + } + entries.sort_by(|a, b| b.path.cmp(&a.path)); + entries +} + +async fn runs_index_html(State(state): State) -> Html { + let entries = discover_pushed_runs(&state.base); + + let mut html = String::from( + r#" + + + + +patchbay runs + + + +

patchbay runs

+"#, + ); + + if entries.is_empty() { + html.push_str(r#"
No runs yet. Push results using the API.
"#); + } else { + for entry in &entries { + html.push_str(&format!( + r#"
"#, + html_escape(&entry.path) + )); + html.push_str(&format!( + r#"{}"#, + html_escape(&entry.project) + )); + + html.push_str(r#"
"#); + if let Some(m) = &entry.manifest { + if let Some(branch) = &m.branch { + html.push_str(&format!( + r#"{} "#, + html_escape(branch) + )); + } + if let Some(commit) = &m.commit { + let short = &commit[..commit.len().min(7)]; + html.push_str(&format!("{short} ")); + } + if let Some(pr) = m.pr { + if let Some(url) = &m.pr_url { + html.push_str(&format!(r#"PR #{pr} "#, html_escape(url))); + } else { + html.push_str(&format!("PR #{pr} ")); + } + } + if let Some(title) = &m.title { + html.push_str(&html_escape(title)); + } + } + html.push_str("
"); + + if let Some(date) = &entry.date { + html.push_str(&format!(r#"{date}"#)); + } + + // Deep-link into the UI invocation view + html.push_str(&format!( + r#" View →"#, + html_escape(&entry.path) + )); + + html.push_str("
\n"); + } + } + + html.push_str(""); + Html(html) +} + +fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +// ── Push endpoint ─────────────────────────────────────────────────── + +async fn push_run( + AxPath(project): AxPath, + headers: HeaderMap, + State(state): State, + body: Bytes, +) -> impl IntoResponse { + let Some(push) = &state.push else { + return (StatusCode::NOT_FOUND, "push not enabled".to_string()); + }; + + // Validate API key + let auth = headers + .get("authorization") + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + let expected = format!("Bearer {}", push.api_key); + if auth != expected { + return (StatusCode::UNAUTHORIZED, "invalid api key".to_string()); + } + + // Validate project name (alphanumeric, hyphens, underscores) + if project.is_empty() + || !project + .chars() + .all(|c| c.is_alphanumeric() || c == '-' || c == '_') + { + return (StatusCode::BAD_REQUEST, "invalid project name".to_string()); + } + + // Create run directory: {run_dir}/{project}-{date}-{uuid} + let now = chrono::Utc::now(); + let date = now.format("%Y%m%d_%H%M%S").to_string(); + let uuid = uuid::Uuid::new_v4(); + let run_name = format!("{project}-{date}-{uuid}"); + let run_dir = push.run_dir.join(&run_name); + + if let Err(e) = std::fs::create_dir_all(&run_dir) { + return ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("failed to create run dir: {e}"), + ); + } + + // Extract tar.gz + let decoder = flate2::read::GzDecoder::new(&body[..]); + let mut archive = tar::Archive::new(decoder); + if let Err(e) = archive.unpack(&run_dir) { + // Clean up on failure + let _ = std::fs::remove_dir_all(&run_dir); + return ( + StatusCode::BAD_REQUEST, + format!("failed to extract archive: {e}"), + ); + } + + // Notify subscribers about new run + let _ = state.runs_tx.send(()); + + // run_name is the invocation name (first path component for all sims inside) + let result = serde_json::json!({ + "ok": true, + "project": project, + "run": run_name, + "invocation": run_name, + }); + + (StatusCode::OK, serde_json::to_string(&result).unwrap()) +} + +// ── Retention watcher ─────────────────────────────────────────────── + +/// Background task that enforces a total size limit on the runs directory. +/// Deletes oldest runs (by directory name sort) when total exceeds `max_bytes`. +pub async fn retention_watcher(run_dir: PathBuf, max_bytes: u64) { + let mut interval = tokio::time::interval(Duration::from_secs(60)); + loop { + interval.tick().await; + if let Err(e) = enforce_retention(&run_dir, max_bytes) { + tracing::warn!("retention check failed: {e}"); + } + } +} + +fn enforce_retention(run_dir: &Path, max_bytes: u64) -> anyhow::Result<()> { + // Collect all run dirs with their sizes, sorted oldest first + let mut runs: Vec<(PathBuf, u64)> = Vec::new(); + + for entry in fs::read_dir(run_dir)?.flatten() { + if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + continue; + } + let size = dir_size(&entry.path()); + runs.push((entry.path(), size)); + } + + // Sort oldest first (by path, which includes date) + runs.sort_by(|a, b| a.0.cmp(&b.0)); + + let total: u64 = runs.iter().map(|(_, s)| s).sum(); + if total <= max_bytes { + return Ok(()); + } + + let mut to_free = total - max_bytes; + for (path, size) in &runs { + if to_free == 0 { + break; + } + tracing::info!("retention: removing {}", path.display()); + let _ = fs::remove_dir_all(path); + to_free = to_free.saturating_sub(*size); + } + + Ok(()) +} + +fn dir_size(path: &Path) -> u64 { + let mut total = 0; + if let Ok(entries) = fs::read_dir(path) { + for entry in entries.flatten() { + let ft = entry.file_type().unwrap_or_else(|_| unreachable!()); + if ft.is_file() { + total += entry.metadata().map(|m| m.len()).unwrap_or(0); + } else if ft.is_dir() { + total += dir_size(&entry.path()); + } + } + } + total +} diff --git a/patchbay-server/src/main.rs b/patchbay-server/src/main.rs new file mode 100644 index 0000000..113d1ad --- /dev/null +++ b/patchbay-server/src/main.rs @@ -0,0 +1,229 @@ +//! Standalone patchbay server binary. +//! +//! Serves the devtools UI and optionally accepts pushed run results via HTTP. +//! Supports automatic TLS via ACME (Let's Encrypt). + +use std::{net::SocketAddr, path::PathBuf, sync::Arc}; + +use anyhow::{bail, Context, Result}; +use clap::Parser; + +#[derive(Parser)] +#[command(name = "patchbay-serve", about = "Serve patchbay run results")] +struct Cli { + /// Directory containing run results to serve. + #[arg(long)] + run_dir: Option, + + /// Bind address for HTTP server (plain HTTP, or without ACME). + #[arg(long, default_value = "0.0.0.0:8080")] + http_bind: SocketAddr, + + /// Bind address for HTTPS server (only used with --acme-domain). + #[arg(long, default_value = "0.0.0.0:4443")] + https_bind: SocketAddr, + + /// Domain for automatic TLS via ACME (Let's Encrypt). + /// Serves HTTPS on --https-bind and HTTP redirect on --http-bind. + #[arg(long)] + acme_domain: Option, + + /// Contact email for ACME/Let's Encrypt (required with --acme-domain). + #[arg(long)] + acme_email: Option, + + /// Enable accepting pushed run results. + #[arg(long, default_value_t = false)] + accept_push: bool, + + /// API key required for push requests (Authorization: Bearer ). + #[arg(long, env = "PATCHBAY_API_KEY")] + api_key: Option, + + /// Data directory for storing pushed runs and ACME state. + /// Defaults to platform data dir (e.g. ~/.local/share/patchbay-serve). + #[arg(long)] + data_dir: Option, + + /// Maximum total size of stored runs (e.g. "10GB", "500MB"). + /// When exceeded, oldest runs are deleted. + #[arg(long)] + retention: Option, +} + +fn parse_size(s: &str) -> Result { + let s = s.trim(); + let (num, mult) = if let Some(n) = s.strip_suffix("TB") { + (n.trim(), 1_000_000_000_000u64) + } else if let Some(n) = s.strip_suffix("GB") { + (n.trim(), 1_000_000_000u64) + } else if let Some(n) = s.strip_suffix("MB") { + (n.trim(), 1_000_000u64) + } else if let Some(n) = s.strip_suffix("KB") { + (n.trim(), 1_000u64) + } else { + (s, 1u64) + }; + let val: f64 = num.parse().context("invalid size number")?; + Ok((val * mult as f64) as u64) +} + +#[tokio::main] +async fn main() -> Result<()> { + rustls::crypto::ring::default_provider() + .install_default() + .expect("install rustls crypto provider"); + + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let cli = Cli::parse(); + + // Resolve data dir + let data_dir = match &cli.data_dir { + Some(d) => d.clone(), + None => dirs::data_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join("patchbay-serve"), + }; + std::fs::create_dir_all(&data_dir) + .with_context(|| format!("create data dir {}", data_dir.display()))?; + + // Resolve run dir: explicit --run-dir, or data_dir/runs if push enabled, or cwd + let run_dir = match &cli.run_dir { + Some(d) => d.clone(), + None if cli.accept_push => data_dir.join("runs"), + None => std::env::current_dir().context("resolve cwd")?, + }; + std::fs::create_dir_all(&run_dir) + .with_context(|| format!("create run dir {}", run_dir.display()))?; + + if cli.accept_push && cli.api_key.is_none() { + bail!("--accept-push requires --api-key to be set"); + } + + if cli.acme_domain.is_some() && cli.acme_email.is_none() { + bail!("--acme-domain requires --acme-email to be set"); + } + + if cli.acme_domain.is_some() { + if cli.https_bind.port() != 443 { + bail!( + "ACME TLS-ALPN-01 requires port 443, but --https-bind is {}. \ + Use --https-bind 0.0.0.0:443", + cli.https_bind + ); + } + if cli.http_bind.port() != 80 { + tracing::warn!( + "ACME HTTP redirect is on port {} instead of 80; \ + browsers won't follow the redirect unless port 80 is forwarded", + cli.http_bind.port() + ); + } + } + + // Parse retention + let retention_bytes = cli + .retention + .as_deref() + .map(parse_size) + .transpose() + .context("invalid --retention value")?; + + let push_config = if cli.accept_push { + Some(patchbay_server::PushConfig { + api_key: cli.api_key.clone().unwrap(), + run_dir: run_dir.clone(), + }) + } else { + None + }; + + // Start retention watcher if configured + if let Some(max_bytes) = retention_bytes { + let retention_dir = run_dir.clone(); + tokio::spawn(async move { + patchbay_server::retention_watcher(retention_dir, max_bytes).await; + }); + } + + let app = patchbay_server::build_app(run_dir, push_config); + + if let Some(domain) = &cli.acme_domain { + let email = cli.acme_email.as_deref().unwrap(); + serve_acme(app, domain, email, &data_dir, cli.http_bind, cli.https_bind).await + } else { + tracing::info!("listening on {}", cli.http_bind); + let listener = tokio::net::TcpListener::bind(cli.http_bind).await?; + axum::serve(listener, app).await?; + Ok(()) + } +} + +async fn serve_acme( + app: axum::Router, + domain: &str, + email: &str, + data_dir: &std::path::Path, + http_bind: SocketAddr, + https_bind: SocketAddr, +) -> Result<()> { + use tokio_rustls_acme::{caches::DirCache, AcmeConfig}; + use tokio_stream::StreamExt; + + let acme_dir = data_dir.join("acme"); + std::fs::create_dir_all(&acme_dir)?; + + let mut state = AcmeConfig::new([domain]) + .contact([format!("mailto:{email}")]) + .cache(DirCache::new(acme_dir)) + .directory_lets_encrypt(true) + .state(); + + let rustls_config = rustls::ServerConfig::builder() + .with_no_client_auth() + .with_cert_resolver(state.resolver()); + let acceptor = state.axum_acceptor(Arc::new(rustls_config)); + + // Spawn ACME event handler + tokio::spawn(async move { + loop { + match state.next().await { + Some(Ok(ok)) => tracing::info!("acme event: {:?}", ok), + Some(Err(err)) => tracing::error!("acme error: {:?}", err), + None => break, + } + } + }); + + tracing::info!("ACME TLS for {domain}: https on {https_bind}, http redirect on {http_bind}"); + + // HTTP redirect + let redirect_domain = domain.to_string(); + tokio::spawn(async move { + let redirect = + axum::Router::new().fallback(axum::routing::any(move |req: axum::extract::Request| { + let host = redirect_domain.clone(); + async move { + let uri = req.uri(); + let path = uri.path_and_query().map(|p| p.as_str()).unwrap_or("/"); + axum::response::Redirect::permanent(&format!("https://{host}{path}")) + } + })); + let listener = tokio::net::TcpListener::bind(http_bind).await.unwrap(); + let _ = axum::serve(listener, redirect).await; + }); + + // HTTPS with ACME acceptor + axum_server::bind(https_bind) + .acceptor(acceptor) + .serve(app.into_make_service()) + .await?; + + Ok(()) +} diff --git a/patchbay/src/core.rs b/patchbay/src/core.rs index 7e622e3..a30b00e 100644 --- a/patchbay/src/core.rs +++ b/patchbay/src/core.rs @@ -563,11 +563,33 @@ pub(crate) struct LabInner { pub ipv6_dad_mode: Ipv6DadMode, /// IPv6 provisioning behavior. pub ipv6_provisioning_mode: Ipv6ProvisioningMode, + /// Writer task handle (kept alive until lab is dropped). + pub writer_handle: std::sync::Mutex>>, + /// Test outcome flag shared with the writer and [`TestGuard`]. + pub test_status: Arc, + /// Accumulated lab state, shared with the background writer. Updated on + /// every event so that `Drop` can always write a complete `state.json` + /// synchronously — even if the async writer task never ran its shutdown path. + pub shared_state: Arc>, } impl Drop for LabInner { fn drop(&mut self) { self.cancel.cancel(); + + // Determine final status from the test guard. + let status = match self.test_status.load(Ordering::Acquire) { + crate::writer::STATUS_SUCCESS => "success", + crate::writer::STATUS_FAILED => "failed", + _ => "stopped", + }; + + // Write the final state.json synchronously. The shared_state mutex + // holds the fully accumulated LabState (updated by the writer on every + // event), so this works even if the async task never flushed. + if let Some(ref run_dir) = self.run_dir { + crate::writer::write_final_state(run_dir, &self.shared_state, status); + } } } diff --git a/patchbay/src/event.rs b/patchbay/src/event.rs index 8e6c952..7176452 100644 --- a/patchbay/src/event.rs +++ b/patchbay/src/event.rs @@ -250,6 +250,13 @@ pub enum LabEventKind { /// Per-interface counters. counters: Vec, }, + + // ── Test lifecycle ── + /// Test completed with a result. + TestCompleted { + /// Whether the test passed. + passed: bool, + }, } /// Snapshot of a device interface at the time of an event. @@ -705,6 +712,9 @@ impl LabState { map.insert(c.iface.clone(), c.clone()); } } + LabEventKind::TestCompleted { passed } => { + self.status = if *passed { "success" } else { "failed" }.into(); + } } } } diff --git a/patchbay/src/lab.rs b/patchbay/src/lab.rs index eac0252..b8a03e9 100644 --- a/patchbay/src/lab.rs +++ b/patchbay/src/lab.rs @@ -6,7 +6,7 @@ use std::{ path::{Path, PathBuf}, process::Command, sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicU64, AtomicU8, Ordering}, Arc, }, }; @@ -460,6 +460,8 @@ impl Lab { let cancel = tokio_util::sync::CancellationToken::new(); let (events_tx, _rx) = tokio::sync::broadcast::channel::(256); drop(_rx); + let test_status = Arc::new(AtomicU8::new(crate::writer::STATUS_UNKNOWN)); + let shared_state = Arc::new(std::sync::Mutex::new(crate::event::LabState::default())); let lab = Self { inner: Arc::new(LabInner { @@ -473,6 +475,9 @@ impl Lab { run_dir: run_dir.clone(), ipv6_dad_mode: opts.ipv6_dad_mode, ipv6_provisioning_mode: opts.ipv6_provisioning_mode, + writer_handle: std::sync::Mutex::new(None), + test_status: test_status.clone(), + shared_state: shared_state.clone(), }), }; // Initialize root namespace and IX bridge eagerly — no lazy-init race. @@ -481,14 +486,16 @@ impl Lab { .await .context("failed to set up root namespace")?; - // Spawn file writer if outdir is configured — subscribe before emitting + // Spawn file writer if outdir is configured -- subscribe before emitting // initial events so the writer captures LabCreated and IxCreated. if let Some(ref run_dir) = run_dir { - crate::writer::spawn_writer( + let handle = crate::writer::spawn_writer( run_dir.clone(), lab.inner.events_tx.subscribe(), lab.inner.cancel.clone(), + shared_state, ); + *lab.inner.writer_handle.lock().unwrap() = Some(handle); } // Emit lifecycle events. @@ -530,6 +537,21 @@ impl Lab { self.inner.label.as_deref() } + /// Returns a guard that records whether the test passed or failed. + /// + /// On drop the guard checks [`std::thread::panicking`] and writes + /// "failed" to state.json if a panic is unwinding. Call [`.ok()`](TestGuard::ok) + /// at the end of a successful test to record "success" explicitly. + /// If neither `.ok()` is called nor a panic occurs (e.g. the test returns + /// `Err`), the status defaults to "failed" -- a safe default that avoids + /// false positives. + pub fn test_guard(&self) -> TestGuard { + TestGuard { + inner: Arc::clone(&self.inner), + marked: false, + } + } + /// Parses `lab.toml`, builds the network, and returns a ready-to-use lab. pub async fn load(path: impl AsRef) -> Result { let text = std::fs::read_to_string(path).context("read lab config")?; @@ -2389,3 +2411,45 @@ impl DeviceBuilder { )) } } + +/// RAII guard that records test pass/fail into the lab's state.json. +/// +/// Created by [`Lab::test_guard`]. Defaults to "failed" on drop unless +/// [`.ok()`](TestGuard::ok) was called. This means the only way to get +/// "success" is to explicitly call `.ok()`, avoiding false positives from +/// early `?` returns or panics. +/// +/// Both `.ok()` and the failure path emit a [`LabEventKind::TestCompleted`] +/// event so the result is visible in the timeline. +pub struct TestGuard { + inner: Arc, + marked: bool, +} + +impl TestGuard { + /// Mark the test as successful. + /// + /// Call this at the end of a passing test, typically just before `Ok(())`. + pub fn ok(mut self) { + use std::sync::atomic::Ordering; + self.inner + .test_status + .store(crate::writer::STATUS_SUCCESS, Ordering::Release); + self.inner + .emit(LabEventKind::TestCompleted { passed: true }); + self.marked = true; + } +} + +impl Drop for TestGuard { + fn drop(&mut self) { + use std::sync::atomic::Ordering; + if !self.marked { + self.inner + .test_status + .store(crate::writer::STATUS_FAILED, Ordering::Release); + self.inner + .emit(LabEventKind::TestCompleted { passed: false }); + } + } +} diff --git a/patchbay/src/lib.rs b/patchbay/src/lib.rs index 0b470a2..af46edf 100644 --- a/patchbay/src/lib.rs +++ b/patchbay/src/lib.rs @@ -226,7 +226,7 @@ pub use lab::{ FirewallConfig, FirewallConfigBuilder, IpSupport, Ipv6DadMode, Ipv6Profile, Ipv6ProvisioningMode, Ix, Lab, LabOpts, LinkCondition, LinkLimits, Nat, NatConfig, NatConfigBuilder, NatFiltering, NatMapping, NatV6Mode, OutDir, Region, RegionLink, Router, - RouterBuilder, RouterIface, RouterPreset, + RouterBuilder, RouterIface, RouterPreset, TestGuard, }; pub use crate::{ diff --git a/patchbay/src/writer.rs b/patchbay/src/writer.rs index 8d526c7..35f9d6e 100644 --- a/patchbay/src/writer.rs +++ b/patchbay/src/writer.rs @@ -4,6 +4,7 @@ use std::{ fs, io::{BufWriter, Write}, path::{Path, PathBuf}, + sync::{Arc, Mutex}, time::Duration, }; @@ -17,15 +18,20 @@ use crate::{ /// How often events.jsonl is flushed and state.json is written. const FLUSH_INTERVAL: Duration = Duration::from_secs(1); +/// Test outcome communicated from [`TestGuard`] to the writer on shutdown. +pub(crate) const STATUS_UNKNOWN: u8 = 0; +pub(crate) const STATUS_SUCCESS: u8 = 1; +pub(crate) const STATUS_FAILED: u8 = 2; + /// Writes events to `events.jsonl` and maintains `state.json`. -pub(crate) struct LabWriter { +struct LabWriter { outdir: PathBuf, - state: LabState, + shared_state: Arc>, events_file: BufWriter, } impl LabWriter { - pub(crate) fn new(outdir: &Path) -> Result { + fn new(outdir: &Path, shared_state: Arc>) -> Result { fs::create_dir_all(outdir)?; let events_path = outdir.join(consts::EVENTS_JSONL); let events_file = BufWriter::new( @@ -36,17 +42,17 @@ impl LabWriter { ); Ok(Self { outdir: outdir.to_path_buf(), - state: LabState::default(), + shared_state, events_file, }) } - /// Append event to events.jsonl buffer and update in-memory state. - /// Does NOT flush — call [`flush`] to persist to disk. + /// Append event to events.jsonl buffer and update shared state. + /// Does NOT flush -- call [`flush_events`] to persist to disk. fn append_event(&mut self, event: &LabEvent) -> Result<()> { serde_json::to_writer(&mut self.events_file, event)?; self.events_file.write_all(b"\n")?; - self.state.apply(event); + self.shared_state.lock().unwrap().apply(event); Ok(()) } @@ -60,7 +66,8 @@ impl LabWriter { fn write_state(&self) -> Result<()> { let tmp = self.outdir.join(consts::STATE_JSON_TMP); let dst = self.outdir.join(consts::STATE_JSON); - fs::write(&tmp, serde_json::to_string_pretty(&self.state)?)?; + let state = self.shared_state.lock().unwrap(); + fs::write(&tmp, serde_json::to_string_pretty(&*state)?)?; fs::rename(&tmp, &dst)?; Ok(()) } @@ -69,15 +76,17 @@ impl LabWriter { /// Spawns a background task that writes events to disk. /// /// Events are buffered in memory and flushed to `events.jsonl` + `state.json` -/// at most once per [`FLUSH_INTERVAL`], with a final flush when the lab is -/// cancelled or the channel closes. +/// at most once per [`FLUSH_INTERVAL`]. The shared `LabState` is updated on +/// every event so that `LabInner::drop` can always write a final `state.json` +/// synchronously, even if this task never gets to run its shutdown path. pub(crate) fn spawn_writer( outdir: PathBuf, mut rx: tokio::sync::broadcast::Receiver, cancel: tokio_util::sync::CancellationToken, + shared_state: Arc>, ) -> tokio::task::JoinHandle<()> { tokio::spawn(async move { - let mut writer = match LabWriter::new(&outdir) { + let mut writer = match LabWriter::new(&outdir, shared_state) { Ok(w) => w, Err(e) => { tracing::error!("LabWriter init failed: {e}"); @@ -105,20 +114,15 @@ pub(crate) fn spawn_writer( tracing::warn!("LabWriter lagged {n} events"); } Err(tokio::sync::broadcast::error::RecvError::Closed) => { - // Channel closed means the lab was dropped — treat as stop. - writer.state.status = "stopped".into(); - dirty = true; break; } } } _ = cancel.cancelled() => { - // Lab is shutting down — drain remaining events, then stop. + // Lab is shutting down -- drain remaining events, then stop. while let Ok(event) = rx.try_recv() { let _ = writer.append_event(&event); } - writer.state.status = "stopped".into(); - dirty = true; break; } _ = interval.tick() => { @@ -135,14 +139,33 @@ pub(crate) fn spawn_writer( } } - // Final flush on close. - if dirty { - if let Err(e) = writer.flush_events() { - tracing::error!("LabWriter final events flush error: {e}"); - } - if let Err(e) = writer.write_state() { - tracing::error!("LabWriter final state write error: {e}"); - } + // Final flush of events.jsonl. State.json is written by LabInner::drop + // which sets the final status and writes synchronously. + if let Err(e) = writer.flush_events() { + tracing::error!("LabWriter final events flush error: {e}"); + } + if let Err(e) = writer.write_state() { + tracing::error!("LabWriter final state write error: {e}"); } }) } + +/// Synchronously write the final `state.json` with the given status. +/// +/// Called from `LabInner::drop`. Reads the accumulated state from the shared +/// mutex, patches the status, and writes atomically. +pub(crate) fn write_final_state(run_dir: &Path, shared_state: &Mutex, status: &str) { + let tmp = run_dir.join(consts::STATE_JSON_TMP); + let dst = run_dir.join(consts::STATE_JSON); + let mut state = shared_state.lock().unwrap(); + state.status = status.into(); + match serde_json::to_string_pretty(&*state) { + Ok(json) => { + let _ = fs::write(&tmp, json); + let _ = fs::rename(&tmp, &dst); + } + Err(e) => { + tracing::error!("write_final_state serialize error: {e}"); + } + } +} diff --git a/ui/e2e/global-setup.ts b/ui/e2e/global-setup.ts index 5fd013a..c7d4622 100644 --- a/ui/e2e/global-setup.ts +++ b/ui/e2e/global-setup.ts @@ -20,4 +20,11 @@ export default function globalSetup() { stdio: 'inherit', timeout: 5 * 60_000, }) + + console.log('[setup] building patchbay-serve binary...') + execFileSync('cargo', ['build', '--bin', 'patchbay-serve'], { + cwd: REPO_ROOT, + stdio: 'inherit', + timeout: 3 * 60_000, + }) } diff --git a/ui/e2e/helpers.ts b/ui/e2e/helpers.ts index bddb789..a5a7e1d 100644 --- a/ui/e2e/helpers.ts +++ b/ui/e2e/helpers.ts @@ -18,6 +18,7 @@ function resolveTargetDir(): string { const TARGET_DIR = resolveTargetDir() export const PATCHBAY_BIN = path.join(TARGET_DIR, 'debug', 'patchbay') +export const PATCHBAY_SERVE_BIN = path.join(TARGET_DIR, 'debug', 'patchbay-serve') export async function waitForHttp(url: string, timeoutMs: number): Promise { const start = Date.now() diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts new file mode 100644 index 0000000..7bf1907 --- /dev/null +++ b/ui/e2e/push.spec.ts @@ -0,0 +1,128 @@ +import { expect, test } from '@playwright/test' +import { execFileSync, execSync, spawn, type ChildProcess } from 'node:child_process' +import { mkdtempSync, rmSync, writeFileSync } from 'node:fs' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { REPO_ROOT, PATCHBAY_BIN, PATCHBAY_SERVE_BIN, waitForHttp } from './helpers' + +const THIS_DIR = path.dirname(fileURLToPath(import.meta.url)) +const PING_TOML = path.join(THIS_DIR, 'fixtures', 'ping-e2e.toml') +const SERVE_BIND = '127.0.0.1:7433' +const SERVE_URL = `http://${SERVE_BIND}` +const API_KEY = 'test-e2e-key-123' + +test('push run results and view via deep link', async ({ page }) => { + test.setTimeout(4 * 60 * 1000) + const simWorkDir = mkdtempSync(`${tmpdir()}/patchbay-push-sim-`) + const serveDataDir = mkdtempSync(`${tmpdir()}/patchbay-push-serve-`) + let serveProc: ChildProcess | null = null + + try { + // Step 1: Run a sim to create output. + execFileSync( + PATCHBAY_BIN, + ['run', '--work-dir', simWorkDir, PING_TOML], + { + cwd: REPO_ROOT, + stdio: 'inherit', + env: process.env, + timeout: 2 * 60 * 1000, + }, + ) + + // Resolve the latest run directory (follows the "latest" symlink). + const latestDir = execSync(`readlink -f ${simWorkDir}/latest`, { + encoding: 'utf-8', + }).trim() + + // Write a run.json manifest into the output. + writeFileSync( + path.join(latestDir, 'run.json'), + JSON.stringify({ + project: 'test-project', + branch: 'feat/test', + commit: 'abc1234', + pr: 42, + pr_url: 'https://github.com/example/repo/pull/42', + title: 'E2E push test', + created_at: new Date().toISOString(), + }), + ) + + // Step 2: Start patchbay-serve with push enabled. + serveProc = spawn( + PATCHBAY_SERVE_BIN, + [ + '--accept-push', + '--api-key', API_KEY, + '--data-dir', serveDataDir, + '--http-bind', SERVE_BIND, + ], + { cwd: REPO_ROOT, stdio: 'inherit' }, + ) + await waitForHttp(`${SERVE_URL}/api/runs`, 15_000) + + // Step 3: Tar+gz the run output and push it. + const tarGz = execSync(`tar -czf - -C "${latestDir}" .`) + const pushRes = await fetch(`${SERVE_URL}/api/push/test-project`, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${API_KEY}`, + 'Content-Type': 'application/gzip', + }, + body: tarGz, + }) + expect(pushRes.status).toBe(200) + const pushBody = await pushRes.json() as { ok: boolean; invocation: string; project: string } + expect(pushBody.ok).toBe(true) + expect(pushBody.project).toBe('test-project') + expect(pushBody.invocation).toBeTruthy() + + // Step 4: Verify the run appears in the API. + const runsRes = await fetch(`${SERVE_URL}/api/runs`) + const runs = await runsRes.json() as Array<{ name: string; invocation: string | null }> + expect(runs.length).toBeGreaterThan(0) + // All runs should share the same invocation (the push dir name). + const inv = runs[0].invocation + expect(inv).toBe(pushBody.invocation) + + // Step 5: Open the deep link and verify the UI shows the run. + await page.goto(`${SERVE_URL}/#/inv/${pushBody.invocation}`) + + // The topbar should show "patchbay". + await expect(page.getByRole('heading', { name: 'patchbay' })).toBeVisible() + + // The sims tab should list the sim(s) from this push. + const simEntry = page.locator('.run-entry', { hasText: 'ping-e2e' }).first() + await expect(simEntry).toBeVisible({ timeout: 10_000 }) + + // Click through to an individual sim and verify topology loads. + await simEntry.click() + await expect(page.getByText('dc')).toBeVisible({ timeout: 10_000 }) + await expect(page.getByText('sender')).toBeVisible() + await expect(page.getByText('receiver')).toBeVisible() + + // Step 6: Verify push auth — request without key should fail. + const noAuthRes = await fetch(`${SERVE_URL}/api/push/test-project`, { + method: 'POST', + headers: { 'Content-Type': 'application/gzip' }, + body: tarGz, + }) + expect(noAuthRes.status).toBe(401) + + // Step 7: Verify the /runs server-side index page renders. + const runsPageRes = await fetch(`${SERVE_URL}/runs`) + expect(runsPageRes.status).toBe(200) + const runsHtml = await runsPageRes.text() + expect(runsHtml).toContain('test-project') + expect(runsHtml).toContain('feat/test') + expect(runsHtml).toContain('PR #42') + } finally { + if (serveProc && !serveProc.killed) { + serveProc.kill('SIGTERM') + } + rmSync(simWorkDir, { recursive: true, force: true }) + rmSync(serveDataDir, { recursive: true, force: true }) + } +}) diff --git a/ui/e2e/runner-sim.spec.ts b/ui/e2e/runner-sim.spec.ts index 0ff0dfe..a82904f 100644 --- a/ui/e2e/runner-sim.spec.ts +++ b/ui/e2e/runner-sim.spec.ts @@ -111,6 +111,8 @@ test('multi-sim invocation shows grouped selector and combined results', async ( await expect(combinedOption).toBeAttached() await selector.selectOption({ label: await combinedOption.innerText() }) + // Switch to perf tab — invocation view defaults to sims list. + await page.getByRole('button', { name: 'perf' }).click() // Perf tab should show summary and detail tables with both sims. await expect(page.getByText('summary')).toBeVisible({ timeout: 5_000 }) await expect(page.getByText('all steps')).toBeVisible() diff --git a/ui/package-lock.json b/ui/package-lock.json index ce15fd8..db76d9b 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -12,7 +12,8 @@ "@xyflow/react": "^12.10.1", "dagre": "^0.8.5", "react": "^18.2.0", - "react-dom": "^18.2.0" + "react-dom": "^18.2.0", + "react-router-dom": "^7.13.1" }, "devDependencies": { "@playwright/test": "^1.51.1", @@ -56,6 +57,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -1234,6 +1236,7 @@ "integrity": "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.18.0" } @@ -1251,6 +1254,7 @@ "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@types/prop-types": "*", "csstype": "^3.2.2" @@ -1365,6 +1369,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1413,6 +1418,19 @@ "dev": true, "license": "MIT" }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, "node_modules/csstype": { "version": "3.2.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", @@ -1477,6 +1495,7 @@ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "license": "ISC", + "peer": true, "engines": { "node": ">=12" } @@ -1874,6 +1893,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0" }, @@ -1886,6 +1906,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" @@ -1904,12 +1925,51 @@ "node": ">=0.10.0" } }, + "node_modules/react-router": { + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.1.tgz", + "integrity": "sha512-td+xP4X2/6BJvZoX6xw++A2DdEi++YypA69bJUV5oVvqf6/9/9nNlD70YO1e9d3MyamJEBQFEzk6mbfDYbqrSA==", + "license": "MIT", + "dependencies": { + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } + } + }, + "node_modules/react-router-dom": { + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.1.tgz", + "integrity": "sha512-UJnV3Rxc5TgUPJt2KJpo1Jpy0OKQr0AjgbZzBFjaPJcFOb2Y8jA5H3LT8HUJAiRLlWrEXWHbF1Z4SCZaQjWDHw==", + "license": "MIT", + "dependencies": { + "react-router": "7.13.1" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + } + }, "node_modules/rollup": { "version": "4.58.0", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.58.0.tgz", "integrity": "sha512-wbT0mBmWbIvvq8NeEYWWvevvxnOyhKChir47S66WCxw1SXqhw7ssIYejnQEVt7XYQpsj2y8F9PM+Cr3SNEa0gw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/estree": "1.0.8" }, @@ -1968,6 +2028,12 @@ "semver": "bin/semver.js" } }, + "node_modules/set-cookie-parser": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", + "license": "MIT" + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -2058,6 +2124,7 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", diff --git a/ui/package.json b/ui/package.json index 572e1b9..4d3eb95 100644 --- a/ui/package.json +++ b/ui/package.json @@ -14,7 +14,8 @@ "@xyflow/react": "^12.10.1", "dagre": "^0.8.5", "react": "^18.2.0", - "react-dom": "^18.2.0" + "react-dom": "^18.2.0", + "react-router-dom": "^7.13.1" }, "devDependencies": { "@playwright/test": "^1.51.1", diff --git a/ui/src/App.tsx b/ui/src/App.tsx index 142a7ca..bc69328 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -1,4 +1,5 @@ import { useCallback, useEffect, useRef, useState } from 'react' +import { useLocation, useNavigate } from 'react-router-dom' import type { Firewall, LabEvent, @@ -14,8 +15,8 @@ import type { CombinedResults, SimResults } from './types' import { fetchRuns, fetchState, + fetchEvents, subscribeEvents, - subscribeRuns, fetchLogs, fetchResults, fetchCombinedResults, @@ -28,12 +29,9 @@ import TimelineTab from './components/TimelineTab' import TopologyGraph from './components/TopologyGraph' import NodeDetail from './components/NodeDetail' -type Tab = 'topology' | 'logs' | 'timeline' | 'perf' +type Tab = 'topology' | 'logs' | 'timeline' | 'perf' | 'sims' // ── Selection model ──────────────────────────────────────────────── -// The user can select either an individual sim run (by name) or an -// invocation group (to see combined results). We encode this as a -// tagged union so the rest of the component can branch cleanly. type Selection = | { kind: 'run'; name: string } @@ -44,15 +42,9 @@ function selectionKey(s: Selection | null): string { return s.kind === 'invocation' ? `inv:${s.name}` : s.name } -function parseSelectionKey(key: string, runs: RunInfo[]): Selection | null { - if (!key) return null - if (key.startsWith('inv:')) { - return { kind: 'invocation', name: key.slice(4) } - } - if (runs.some((r) => r.name === key)) { - return { kind: 'run', name: key } - } - return null +function selectionPath(s: Selection | null): string { + if (!s) return '/' + return s.kind === 'invocation' ? `/inv/${s.name}` : `/run/${s.name}` } // ── Invocation grouping ──────────────────────────────────────────── @@ -176,17 +168,30 @@ function applyEvent(state: LabState, event: LabEvent): LabState { // ── Unified App ──────────────────────────────────────────────────── -export default function App() { - // Run selection +export default function App({ mode }: { mode: 'run' | 'inv' }) { + const location = useLocation() + const navigate = useNavigate() + + // Derive selection from the URL path. + // Route is /run/* or /inv/* so everything after the prefix is the name. + const nameFromUrl = location.pathname.slice(mode === 'run' ? 5 : 5) // "/run/" or "/inv/" = 5 chars + const selection: Selection | null = nameFromUrl + ? { kind: mode === 'inv' ? 'invocation' : 'run', name: nameFromUrl } + : null + + const selectedRun = selection?.kind === 'run' ? selection.name : null + const selectedInvocation = selection?.kind === 'invocation' ? selection.name : null + + const [tab, setTab] = useState(mode === 'inv' ? 'sims' : 'topology') + + // Run list (for the dropdown) const [runs, setRuns] = useState([]) - const [selection, setSelection] = useState(null) - const [tab, setTab] = useState('topology') - // Lab state (from SSE) + // Lab state and events const [labState, setLabState] = useState(null) const [labEvents, setLabEvents] = useState([]) const esRef = useRef(null) - const runsEsRef = useRef(null) + const lastOpidRef = useRef(0) // Log files const [logList, setLogList] = useState([]) @@ -202,34 +207,17 @@ export default function App() { // Cross-tab log jump const [logJump, setLogJump] = useState<{ node: string; path: string; timeLabel: string; nonce: number } | null>(null) - // Derived selection helpers - const selectedRun = selection?.kind === 'run' ? selection.name : null - const selectedInvocation = selection?.kind === 'invocation' ? selection.name : null - - // ── Fetch and subscribe to runs ── + // ── Poll runs list ── const refreshRuns = useCallback(async () => { const r = await fetchRuns() setRuns(r) - setSelection((prev) => { - if (r.length === 0) return null - if (prev) { - // Keep current selection if still valid - if (prev.kind === 'run' && r.some((ri) => ri.name === prev.name)) return prev - if (prev.kind === 'invocation' && r.some((ri) => ri.invocation === prev.name)) return prev - } - return { kind: 'run', name: r[0].name } - }) }, []) useEffect(() => { refreshRuns() - const es = subscribeRuns(() => refreshRuns()) - runsEsRef.current = es - return () => { - es.close() - runsEsRef.current = null - } + const id = setInterval(refreshRuns, 5_000) + return () => clearInterval(id) }, [refreshRuns]) // ── Load run data when an individual sim is selected ── @@ -246,11 +234,14 @@ export default function App() { let dead = false Promise.all([ fetchState(selectedRun), + fetchEvents(selectedRun), fetchLogs(selectedRun), fetchResults(selectedRun), - ]).then(([state, logs, results]) => { + ]).then(([state, events, logs, results]) => { if (dead) return if (state) setLabState(state) + setLabEvents(events) + lastOpidRef.current = events.length ? Math.max(...events.map((e) => e.opid ?? 0)) : 0 setLogList(logs) setSimResults(results) }) @@ -275,32 +266,36 @@ export default function App() { return () => { dead = true } }, [selectedInvocation]) - // ── SSE event subscription (from opid 0 to get historical + live) ── + // ── SSE for live updates (only when run is "running") ── useEffect(() => { if (!selectedRun) return - const es = subscribeEvents(selectedRun, 0, (event) => { + const runInfo = runs.find((r) => r.name === selectedRun) + if (runInfo?.status !== 'running') return + + const es = subscribeEvents(selectedRun, lastOpidRef.current, (event) => { setLabState((prev) => (prev ? applyEvent(prev, event) : prev)) setLabEvents((prev) => [...prev.slice(-999), event]) + if (event.opid != null) lastOpidRef.current = event.opid }) esRef.current = es return () => { es.close() esRef.current = null } - }, [selectedRun]) + }, [selectedRun, runs]) - // Close SSE connections on page unload/refresh. - // Firefox limits HTTP/1.1 to 6 connections per domain — stale SSE - // connections from a previous page load can exhaust the pool and block - // all subsequent fetch requests. + // Close SSE when tab becomes hidden, reconnect when visible. useEffect(() => { - const cleanup = () => { - runsEsRef.current?.close() - esRef.current?.close() + const onVisibility = () => { + if (document.hidden) { + esRef.current?.close() + esRef.current = null + } } - window.addEventListener('beforeunload', cleanup) - return () => window.removeEventListener('beforeunload', cleanup) + document.addEventListener('visibilitychange', onVisibility) + window.addEventListener('beforeunload', () => esRef.current?.close()) + return () => document.removeEventListener('visibilitychange', onVisibility) }, []) // ── Callbacks ── @@ -321,10 +316,15 @@ export default function App() { const isSimView = selection?.kind === 'run' const isInvocationView = selection?.kind === 'invocation' + // Runs belonging to the current invocation + const invocationRuns = isInvocationView + ? runs.filter((r) => r.invocation === selectedInvocation) + : [] + const availableTabs: Tab[] = isSimView ? ['topology', 'logs', 'timeline', ...(simResults ? (['perf'] as Tab[]) : [])] : isInvocationView - ? ['perf'] + ? ['sims', ...(combinedResults ? (['perf'] as Tab[]) : [])] : [] // When available tabs change, ensure current tab is still valid. @@ -345,15 +345,19 @@ export default function App() { return (
-

patchbay

+

patchbay