diff --git a/.gitignore b/.gitignore index fe58f6e91f..9d27c4ccb8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ cabal.project.local~ .hpc/ *.tix .coverage + diff --git a/2026-02-04-xftp-web-persistent-connections.md b/2026-02-04-xftp-web-persistent-connections.md new file mode 100644 index 0000000000..6aa8c2f51c --- /dev/null +++ b/2026-02-04-xftp-web-persistent-connections.md @@ -0,0 +1,53 @@ +# XFTPClientAgent Pattern + +## TOC +1. Executive Summary +2. Changes: client.ts +3. Changes: agent.ts +4. Changes: test/browser.test.ts +5. Verification + +## Executive Summary + +Add `XFTPClientAgent` — a per-server connection pool matching the Haskell pattern. The agent caches `XFTPClient` instances by server URL. All orchestration functions (`uploadFile`, `downloadFile`, `deleteFile`) take `agent` as first parameter and use `getXFTPServerClient(agent, server)` instead of calling `connectXFTP` directly. Connections stay open on success; the caller creates and closes the agent. + +`connectXFTP` and `closeXFTP` stay exported (used by `XFTPWebTests.hs` Haskell tests). The `browserClients` hack, per-function `connections: Map`, and `getOrConnect` are deleted. + +## Changes: client.ts + +**Add** after types section: `XFTPClientAgent` interface, `newXFTPAgent`, `getXFTPServerClient`, `closeXFTPServerClient`, `closeXFTPAgent`. + +**Delete**: `browserClients` Map and all `isNode` browser-cache checks in `connectXFTP` and `closeXFTP`. + +**Revert `closeXFTP`** to unconditional `c.transport.close()` (browser transport.close() is already a no-op). + +`connectXFTP` stays exported (backward compat) but becomes a raw low-level function — no caching. + +## Changes: agent.ts + +**Imports**: replace `connectXFTP`/`closeXFTP` with `getXFTPServerClient`/`closeXFTPAgent` etc. + +**Re-export** from agent.ts: `newXFTPAgent`, `closeXFTPAgent`, `XFTPClientAgent`. + +**`uploadFile`**: add `agent: XFTPClientAgent` as first param. Replace `connectXFTP` → `getXFTPServerClient`. Remove `finally { closeXFTP }`. Pass `agent` to `uploadRedirectDescription`. + +**`uploadRedirectDescription`**: change from `(client, server, innerFd)` to `(agent, server, innerFd)`. Get client via `getXFTPServerClient`. + +**`downloadFile`**: add `agent` param. Delete local `connections: Map`. Replace `getOrConnect` → `getXFTPServerClient`. Remove finally cleanup. Pass `agent` to `downloadWithRedirect`. + +**`downloadWithRedirect`**: add `agent` param. Same replacements. Remove try/catch cleanup. Recursive call passes `agent`. + +**`deleteFile`**: add `agent` param. Same pattern. + +**Delete**: `getOrConnect` function entirely. + +## Changes: test/browser.test.ts + +Create agent before operations, pass to upload/download, close in finally. + +## Verification + +1. `npx vitest --run` — browser round-trip test passes +2. No remaining `browserClients`, `getOrConnect`, or per-function `connections: Map` locals +3. `connectXFTP` and `closeXFTP` still exported (XFTPWebTests.hs compat) +4. All orchestration functions take `agent` as first param diff --git a/contributing/CODE.md b/contributing/CODE.md index c644a16064..ab5d7efccf 100644 --- a/contributing/CODE.md +++ b/contributing/CODE.md @@ -2,7 +2,17 @@ This file provides guidance on coding style and approaches and on building the code. -## Code Style and Formatting +## Code Security + +When designing code and planning implementations: +- Apply adversarial thinking, and consider what may happen if one of the communicating parties is malicious. +- Formulate an explicit threat model for each change - who can do which undesirable things and under which circumstances. + +## Code Quality Standards + +Haskell client and server code serves as system specification, not just implementation — we use type-driven design to reflect the business domain in types. Quality, conciseness, and clarity of Haskell code are critical. + +## Code Style, Formatting and Approaches The project uses **fourmolu** for Haskell code formatting. Configuration is in `fourmolu.yaml`. @@ -41,6 +51,11 @@ Some files that use CPP language extension cannot be formatted as a whole, so in - Never do refactoring unless it substantially reduces cost of solving the current problem, including the cost of refactoring - Aim to minimize the code changes - do what is minimally required to solve users' problems +**Document and code structure:** +- **Never move existing code or sections around** - add new content at appropriate locations without reorganizing existing structure. +- When adding new sections to documents, continue the existing numbering scheme. +- Minimize diff size - prefer small, targeted changes over reorganization. + **Code analysis and review:** - Trace data flows end-to-end: from origin, through storage/parameters, to consumption. Flag values that are discarded and reconstructed from partial data (e.g. extracted from a URI missing original fields) — this is usually a bug. - Read implementations of called functions, not just signatures — if duplication involves a called function, check whether decomposing it resolves the duplication. diff --git a/notes-flow.txt b/notes-flow.txt deleted file mode 100644 index 93f9845092..0000000000 --- a/notes-flow.txt +++ /dev/null @@ -1,23 +0,0 @@ -common: - corrId - random BS, used as CbNonce - entityId - p2r tlsUniq - -# setup -s->p: "proxy", uri, auth? - # unless connected - p->r: "p_handshake" - p<-r: "r_key", tls-signed dh pub -s<-r: "r_key", tls-signed dh pub # reply entityId contains tlsUniq - -# working -s ; generate random dh priv, make shared secret -s->p: s2r("forward", random dh pub, SEND command blob) - p->r: p2r("forward", random dh pub, s2r("forward", ...))) - r->c@ "msg", ... - p<-r: p2r("r_res", s2r("ok" / "error", error)) -s<-p@ s2r("ok" / "error", error) - -# expired - p<-r@ p2r("error", "key expired") -s<-p@ "error", "key expired" -s ; reconnect \ No newline at end of file diff --git a/rfcs/2026-01-30-send-file-page.md b/rfcs/2026-01-30-send-file-page.md new file mode 100644 index 0000000000..0e35d44994 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page.md @@ -0,0 +1,1081 @@ +# Send File Page — Web-based XFTP File Transfer + +## 1. Problem & Business Case + +There is no way to send or receive files using SimpleX without installing the app. A static web page that implements the XFTP protocol client-side would allow anyone with a browser to upload and download files via XFTP servers, promoting app adoption. + +**Business constraints:** +- Web page allows up to 100 MB uploads; app allows up to 1 GB. +- Page must promote app installation (e.g., banner, messaging around limits). + +**Security constraint:** +- The server hosting the page must never access file content or file descriptions. The file description is carried in the URL hash fragment (`#`), which browsers do not send to the server. +- The only way to compromise transfer security is page substitution (serving malicious JS). Mitigations: standard web security (HTTPS, CSP, SRI) and IPFS hosting with page fingerprints published in multiple independent locations. + +## 2. Design Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Static web page (HTML + JS bundle) │ +│ ┌───────────────────────────────────────────────────────────┐ │ +│ │ TypeScript XFTP Client Library │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌─────────────┐ │ │ +│ │ │ Protocol │ │ Crypto │ │ Transport │ │ Description │ │ │ +│ │ │ Encoding │ │(libsodium│ │ (fetch │ │ (YAML parse │ │ │ +│ │ │ │ │ .js) │ │ API) │ │ + encode) │ │ │ +│ │ └──────────┘ └──────────┘ └───────────┘ └─────────────┘ │ │ +│ └───────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ + │ fetch() over HTTP/2 │ fetch() over HTTP/2 + ▼ ▼ +┌─────────────────┐ ┌─────────────────┐ +│ XFTP Server 1 │ │ XFTP Server 2 │ +│ (SNI→web cert) │ │ (SNI→web cert) │ +│ (+CORS headers) │ │ (+CORS headers) │ +└─────────────────┘ └─────────────────┘ +``` + +**Key decisions:** +- **Language:** TypeScript (readable, auditable, good ecosystem, Node.js for testing). +- **Crypto:** libsodium.js (WASM-compiled libsodium; provides XSalsa20-Poly1305, Ed25519, X25519). +- **Transport:** Browser `fetch()` API over HTTP/2 with `ReadableStream` for streaming. +- **No backend logic:** The page is entirely static. All XFTP operations happen client-side. + +## 3. Web Page UX + +### 3.1 Upload Flow + +1. **Landing state:** Drag-and-drop zone with centered upload icon and "Drop file here or click to upload" text. File size limit displayed ("Up to 100 MB — install SimpleX app for up to 1 GB"). Simple white background, no decoration. +2. **File selected:** Show file name and size. Begin upload immediately. +3. **Upload progress:** Large circular progress indicator (clockwise, starting from 3 o'clock position). Percentage in center. Cancel button below. +4. **Upload complete:** Show shareable link with copy button. QR code if link is short enough (≤ ~1000 chars). "Install SimpleX for larger files" CTA. + +### 3.2 Download Flow + +1. **Link opened:** Page parses hash fragment, shows file name and size. "Download" button. +2. **Download progress:** Same circular progress indicator as upload. +3. **Download complete:** Browser save dialog triggered (via Blob + download link, or File System Access API where available). + +### 3.3 Error States + +- File too large (> 100 MB): Show limit message with app install CTA. +- Server unreachable: Retry with exponential backoff, show error after exhausting retries. +- File expired: "This file is no longer available" message. +- Decryption failure: "File corrupted or link invalid" message. + +## 4. URL Scheme + +### 4.1 Format + +``` +https://example.com/file/# +``` + +- Hash fragment is never sent to the server. +- Compression: DEFLATE (raw, no gzip/zlib wrapper) — better ratio than LZW for structured text like YAML. +- Encoding: Base64url (RFC 4648 §5) — no `+`, `/`, `=`, or `%` characters. + +Alternative: LZW + base64url if DEFLATE proves problematic. Both should be evaluated. + +### 4.2 Redirect Mechanism + +For files with many chunks, the YAML file description can exceed a practical URL length. The threshold is ~600 bytes of compressed+encoded description (configurable). + +**Flow when description is too large:** +1. Serialize recipient file description to YAML. +2. Encrypt YAML using fresh key + nonce (same XSalsa20-Poly1305 as files). +3. Upload encrypted YAML as a single-chunk "file" to one randomly chosen XFTP server. +4. Create redirect description pointing to this uploaded description. +5. Encode redirect description into URL (always small — single chunk). + +**Download with redirect:** +1. Parse URL → redirect description (has `redirect` field with `size` and `digest`). +2. Download the description "file" using the single chunk reference. +3. Decrypt → get full YAML description. +4. Validate size and digest match redirect metadata. +5. Proceed with normal download using full description. + +### 4.3 Estimated URL Lengths + +These estimates are preliminary and may be incorrect. + +| Scenario | Chunks | Compressed+encoded size | URL length | +|----------|--------|------------------------|------------| +| Small file (1 chunk, 1 server) | 1 | ~300 bytes | ~350 chars | +| Medium file (5 chunks, 1 server) | 5 | ~500 bytes | ~550 chars | +| Large file (25+ chunks) | 25 | Exceeds threshold → redirect | ~350 chars | + +## 5. TypeScript XFTP Client Library + +### 5.1 Module Structure + +``` +xftp-web/src/ # Separate npm project (see §12.19) +├── protocol/ +│ ├── encoding.ts # Binary encoding/decoding ← Simplex.Messaging.Encoding ✓ +│ ├── commands.ts # XFTP commands + responses ← Simplex.FileTransfer.Protocol ✓ +│ ├── transmission.ts # Transmission framing, signing, padding ✓ +│ ├── handshake.ts # XFTP handshake (standard + web) ← FileTransfer.Transport ✓ +│ ├── address.ts # XFTP server address parser ← Simplex.Messaging.Protocol ✓ +│ ├── chunks.ts # Chunk sizes + splitting ← FileTransfer.Chunks + Client.hs ✓ +│ ├── client.ts # Transport crypto (cbAuthenticate, transit encrypt/decrypt) ✓ +│ └── description.ts # Types, YAML, validation, base64url ← FileTransfer.Description ✓ +├── crypto/ +│ ├── secretbox.ts # XSalsa20-Poly1305 streaming encryption/decryption ✓ +│ ├── file.ts # File-level encryption/decryption (encryptFile, decryptChunks) ✓ +│ ├── keys.ts # Ed25519, X25519, Ed448 key generation and operations ✓ +│ ├── digest.ts # SHA-256/SHA-512 hashing ✓ +│ ├── padding.ts # Block padding/unpadding (2-byte length prefix + '#' fill) ✓ +│ └── identity.ts # Web handshake identity proof verification (Ed25519/Ed448) ✓ +├── download.ts # Download helper functions (DH, transit-decrypt, file-decrypt) ✓ +├── client.ts # HTTP/2 XFTP client ← Simplex.FileTransfer.Client +└── agent.ts # Upload/download orchestration + URI ← FileTransfer.Client.Main +``` + +### 5.2 Binary Encoding + +The XFTP wire format uses a custom binary encoding (from `Simplex.Messaging.Encoding`). Key patterns: + +- **Length-prefixed bytestrings:** `<1-byte length>` (`ByteString`, max 255 bytes — used for entity IDs, short fields) or `<2-byte big-endian length>` (`Large`, max 65535 bytes — used for larger data). +- **Transmission format:** ` ` + - Fields separated by space (0x20). + - `signature`: Ed25519 signature over `(sessionId ++ corrId ++ entityId ++ encodedCommand)`. + - `corrId`: Correlation ID (arbitrary, echoed in response). + - `entityId`: File/chunk ID on server. + - Command: tag + space-separated fields. +- **Padding:** 2-byte big-endian length prefix + message + `#` (0x23) fill to block size (16384 bytes). + +### 5.3 Crypto Operations Catalog + +| Operation | Algorithm | Key Size | Nonce Size | Tag Size | Library | +|-----------|-----------|----------|------------|----------|---------| +| File encryption | XSalsa20-Poly1305 | 32 B | 24 B | 16 B | libsodium.js | +| File decryption | XSalsa20-Poly1305 | 32 B | 24 B | 16 B | libsodium.js | +| Transit decryption (download) | XSalsa20-Poly1305 (streaming: `cbInit` + `sbDecryptChunk`) | DH shared secret | 24 B | 16 B | libsodium.js | +| Command signing | Ed25519 | 64 B (private) | — | 64 B (sig) | libsodium.js | +| DH key exchange | X25519 | 32 B | — | — | libsodium.js | +| Chunk digest | SHA-256 | — | — | 32 B | Web Crypto API | +| File digest | SHA-512 | — | — | 64 B | Web Crypto API | +| Random bytes | ChaCha20-DRBG | — | — | — | libsodium.js `randombytes_buf` | + +**Streaming encryption detail:** + +The Haskell implementation uses a custom streaming wrapper over XSalsa20-Poly1305: +1. Initialize: `(xsalsa20_state, poly1305_state) = sbInit(key, nonce)` + - Generate 32-byte Poly1305 key from first XSalsa20 output block + - Initialize Poly1305 state with this key +2. Encrypt chunk: XOR plaintext with XSalsa20 keystream, update Poly1305 with ciphertext +3. Finalize: Compute 16-byte Poly1305 tag, append to stream + +This is NOT compatible with standard NaCl `crypto_secretbox` (see §11.2). The TypeScript implementation must reimplement the exact streaming logic using libsodium's low-level XSalsa20 and Poly1305 APIs. See §12.4 for the complete function mapping. + +### 5.4 Transport via fetch() + +Each XFTP command is an HTTP/2 POST request: + +```typescript +async function sendXFTPCommand( + serverUrl: string, + commandBlock: Uint8Array, // 16384 bytes, padded + fileChunk?: ReadableStream // optional, for FPUT +): Promise<{ responseBlock: Uint8Array; body?: ReadableStream }> { + + const bodyStream = fileChunk + ? concatStreams(streamFromBytes(commandBlock), fileChunk) + : streamFromBytes(commandBlock); + + const response = await fetch(serverUrl, { + method: 'POST', + body: bodyStream, + duplex: 'half', // Required for streaming request bodies + // No Content-Type header — binary protocol + }); + + const reader = response.body!.getReader(); + const responseBlock = await readExactly(reader, 16384); + const body = hasMoreData(reader) ? wrapAsStream(reader) : undefined; + + return { responseBlock, body }; +} +``` + +**Browser compatibility for streaming uploads:** +- Chrome 105+, Edge 105+: `fetch()` with `ReadableStream` body + `duplex: 'half'` +- Firefox 102+: Supported +- Safari 16.4+: Supported + +For older browsers, fall back to `ArrayBuffer` body (buffer entire chunk in memory). + +### 5.5 Upload Orchestration + +``` +1. Read file via File API (drag-drop or file picker) +2. Validate size ≤ 100 MB +3. Generate random SbKey (32 bytes) + CbNonce (24 bytes) +4. Create FileHeader { fileName } +5. Encrypt file (see §12.8 for algorithm detail): + a. Init streaming state: `sbInit(key, nonce)` + b. Encrypt `smpEncode(fileSize') <> headerBytes` where `fileSize'` = headerLen + originalFileSize + c. Encrypt file data in 65536-byte chunks (threaded state) + d. Encrypt `'#'` padding in 65536-byte chunks to fill `encSize - authTagSize - fileSize' - 8` + e. Finalize: `sbAuth(state)` → append 16-byte auth tag +6. Compute SHA-512 digest of encrypted data +7. Split into chunks using prepareChunkSizes algorithm: + - > 75% of 4MB → 4MB chunks + - > 75% of 1MB → 1MB + 4MB chunks + - Otherwise → 64KB + 256KB chunks +8. For each chunk (parallel, up to 8 concurrent): + a. Generate Ed25519 sender keypair + b. Generate Ed25519 recipient keypair (1 recipient for web) + c. Compute SHA-256 chunk digest + d. Connect to XFTP server (handshake if new connection) + e. Send FNEW { sndKey, size, digest } + recipient keys → receive (senderId, [recipientId]) + f. Send FPUT with chunk data → receive OK + g. Report progress +9. Build FileDescription YAML from all chunk metadata +10. If YAML size (compressed+encoded) > threshold: + a. Encrypt YAML as a file + b. Upload encrypted YAML (single chunk) → get redirect description + c. Use redirect description for URL +11. Compress + base64url encode description +12. Display URL: https://example.com/file/# +``` + +### 5.6 Download Orchestration + +``` +1. Parse URL hash fragment +2. Base64url decode + decompress → YAML +3. Parse YAML → FileDescription +4. Validate description (sequential chunks, sizes match) +5. If redirect field present: + a. Download redirect file (single chunk) + b. Decrypt, validate size+digest, parse inner description + c. Continue with inner description +6. For each chunk (parallel, up to 8 concurrent): + a. Generate ephemeral X25519 keypair + b. Connect to XFTP server (web handshake) + c. Send FGET { recipientDhPubKey } → receive (serverDhPubKey, cbNonce) + encrypted body + d. Compute DH shared secret + e. Transit-decrypt chunk body (XSalsa20-Poly1305 with DH secret) + f. Verify chunk digest (SHA-256) + g. Send FACK → receive OK + h. Report progress +7. Concatenate all transit-decrypted chunks (in order) → encrypted file +8. Verify file digest (SHA-512) +9. File-decrypt entire stream (XSalsa20-Poly1305 with file key + nonce) +10. Extract FileHeader → get original fileName +11. Trigger browser download (Blob + or File System Access API) +``` + +## 6. XFTP Server Changes + +### 6.1 SNI-Based Certificate Switching + +The SMP server already implements SNI-based certificate switching (see `Transport/Server.hs:255-269`). The same mechanism must be added to the XFTP server. + +**Current SMP implementation:** +```haskell +T.onServerNameIndication = case sniCredential of + Nothing -> \_ -> pure $ T.Credentials [credential] + Just sniCred -> \case + Nothing -> pure $ T.Credentials [credential] + Just _host -> T.Credentials [sniCred] <$ atomically (writeTVar sniCredUsed True) +``` + +**XFTP changes needed:** +1. Add `httpCredentials :: Maybe T.Credential` to `XFTPServerConfig`. +2. Add configuration section `[WEB]` to `file-server.ini` for HTTPS cert/key paths. +3. Create `TLSServerCredential` with both XFTP and web certificates. +4. Pass combined credentials to `runHTTP2Server` → `runTransportServerState_`. +5. Use `sniCredUsed` flag to distinguish web vs. native clients. + +**Certificate setup:** +- XFTP identity certificate: Existing self-signed CA chain (used for protocol identity via fingerprint). +- Web certificate: Standard CA-issued TLS certificate (e.g., Let's Encrypt) for the server's FQDN. +- Both certificates served on the same port (443). + +### 6.2 CORS Support + +Browsers enforce same-origin policy. The web page (served from `example.com`) must make cross-origin requests to XFTP servers (`xftp1.simplex.im`, etc.). + +**Required server changes:** + +1. **Handle OPTIONS preflight requests:** + ``` + OPTIONS / + Response headers: + Access-Control-Allow-Origin: * + Access-Control-Allow-Methods: POST, OPTIONS + Access-Control-Allow-Headers: Content-Type + Access-Control-Max-Age: 86400 + Response body: empty + Response status: 200 + ``` + +2. **Add CORS headers to all POST responses (when Origin header present):** + ``` + Access-Control-Allow-Origin: * + Access-Control-Expose-Headers: * + ``` + +3. **Implementation location:** In `runHTTP2Server` handler or a wrapper around the XFTP request handler. Detect the `Origin` header → add CORS headers. This can be conditional on web mode being enabled in config. + +**Security consideration:** `Access-Control-Allow-Origin: *` is safe here because: +- All XFTP commands require Ed25519 authentication (per-chunk keys from file description). +- No cookies or browser credentials are involved. +- File content is end-to-end encrypted. + +### 6.3 Web Handshake with Server Identity Proof + +**Both SNI and web handshake are required.** They solve different problems: + +1. **SNI certificate switching** is required because browsers reject self-signed certificates. The XFTP identity certificate is self-signed (CA chain with offline root), so the server must present a standard CA-issued web certificate (e.g., Let's Encrypt) when a browser connects. SNI is how the server detects this. + +2. **Web handshake with challenge-response** is required because browsers cannot access the TLS certificate fingerprint or the TLS-unique channel binding (`sessionId`). The native client validates XFTP identity by checking the certificate chain fingerprint against the known `keyHash` and binding it to the TLS session. The browser gets none of this — it only knows TLS succeeded with some CA-issued cert. So the XFTP identity must be proven at the protocol level. + +**Standard handshake (unchanged for native clients):** +``` +1. Client → empty POST body → Server +2. Server → padded { vRange, sessionId, CertChainPubKey } → Client +3. Client → padded { version, keyHash } → Server +4. Server → empty → Client +``` + +**Web handshake (new, when SNI is detected):** +``` +1. Client → padded { challenge: 32 random bytes } → Server +2. Server → padded { vRange, sessionId, CertChainPubKey } (header block) + + extended body { fullCertChain, signature(challenge ++ sessionId) } → Client +3. Client validates: + - Certificate chain CA fingerprint matches known keyHash + - Signature over (challenge ++ sessionId) is valid under cert's public key + - This proves: server controls XFTP identity key AND is live (not replay) +4. Client → padded { version, keyHash } → Server +5. Server → empty → Client +``` + +**Detection mechanism:** The server detects web clients by the `sniCredUsed` flag (already available from the TLS layer). When SNI is detected, the server expects a challenge in the first POST body (non-empty, unlike standard handshake where it is empty). No marker byte is needed — SNI presence is the discriminator. + +**Block size note:** The XFTP block size is 16384 bytes (`Protocol.hs:65`). The XFTP identity certificate chain fits within this block. The signed challenge response is sent as an extended body (streamed after the 16384-byte header block), same mechanism as file chunk data. + +### 6.4 Protocol Version and Handshake Extension + +Current XFTP versions: v1 (initial), v2 (auth commands), v3 (blocked files). These version numbers refer to wire encoding format changes, not handshake changes. + +The XFTP handshake is binary-encoded via the `Encoding` typeclass (`Transport.hs:128-142`). Both `XFTPServerHandshake` and `XFTPClientHandshake` parsers end with `Tail _compat <- smpP`, which consumes any remaining bytes. This `Tail` extension field allows adding new fields to the handshake without breaking existing parsers — old clients/servers simply ignore the extra bytes. + +**No protocol version bump is needed** for the web handshake. The web handshake is detected via SNI (transport layer), and the challenge/response extension can use the existing `Tail` field. When SNI is detected: +1. Use web TLS certificate (existing SNI mechanism). +2. Expect challenge in first POST body (non-empty body = web client). +3. Include certificate proof in response extended body. +4. Add CORS headers to all responses for this connection. + +### 6.5 Serving the Static Page + +The XFTP server can optionally serve the static web page itself (similar to how SMP servers serve info pages). When a browser connects via SNI and sends a GET request (not POST), the server serves the HTML/JS/CSS bundle. + +This can be implemented identically to the SMP server's static page serving (`apps/smp-server/web/Static.hs`), using Warp to handle HTTP requests on the same TLS connection. + +Alternatively, the page is hosted on a separate web server (e.g., `files.simplex.chat`). The XFTP servers only need to handle XFTP protocol requests (POST) with CORS headers. + +## 7. Security Analysis + +### 7.1 Threat Model + +| Threat | Mitigation | Residual Risk | +|--------|-----------|---------------| +| Page substitution (malicious JS) | HTTPS, CSP, SRI; IPFS hosting with fingerprints in multiple locations | If web server is compromised and IPFS is not used, all guarantees lost. Fundamental limitation of web-based E2E crypto, mitigated by IPFS. | +| MITM between browser and XFTP server | XFTP identity verification via challenge-response handshake | Attacker can relay traffic (see §7.2) but cannot read file content due to E2E encryption. | +| File description leakage | Hash fragment (`#`) is never sent to server | If browser extension or malware reads URL bar, description is exposed. | +| Server learns file content | File encrypted client-side before upload (XSalsa20-Poly1305) | Server sees encrypted chunks only. | +| Traffic analysis | File size visible to network observers | Same as native XFTP client. | + +### 7.2 Relay Attack Analysis + +An attacker who controls the network could relay all traffic between the browser and the real XFTP server: + +1. Browser sends challenge to "attacker's server" +2. Attacker relays to real server +3. Real server signs challenge + sessionId with XFTP identity key +4. Attacker relays signed response to browser +5. Browser validates ✓ (signature is from the real server) + +However, the attacker **cannot read file content** because: +- File encryption key is in the hash fragment (never sent over network) +- Transit encryption uses DH key exchange (FGET) — attacker doesn't have server's DH private key +- The attacker can observe transfer sizes and timing, but this is already visible via traffic analysis + +The relay attack is equivalent to a passive network observer, which is the same threat model as native XFTP. + +### 7.3 Comparison with Native Client Security + +| Property | Native Client | Web Client | +|----------|--------------|------------| +| TLS certificate validation | XFTP identity cert via fingerprint pinning | Web CA cert via browser + XFTP identity via challenge-response | +| Session binding | TLS-unique binds to XFTP identity cert | TLS-unique binds to web cert; challenge binds to XFTP identity | +| Code integrity | Binary signed/distributed via app stores | Served over HTTPS; SRI for subresources; IPFS hosting option; vulnerable to server compromise | +| File encryption | XSalsa20-Poly1305 | Same | +| Transit encryption | DH + XSalsa20-Poly1305 | Same | + +### 7.4 Layman Security Summary (Displayed on Page) + +The web page should display a brief, non-technical security summary explaining to users: +- Files are encrypted in the browser before upload — the server never sees file contents. +- The file link (URL) contains the decryption key in the hash fragment, which the browser never sends to any server. +- Only someone with the exact link can download and decrypt the file. +- The main risk is if the web page itself is tampered with (page substitution attack). IPFS hosting mitigates this. +- For maximum security, use the SimpleX app instead. + +## 8. Implementation Approach Discussion + +### 8.1 Option 1: Haskell to WASM + +**Verdict: Not practical.** + +- Template Haskell is used extensively (`Data.Aeson.TH`, `deriveJSON`) — incompatible with GHC WASM backend. +- Deep dependencies on STM, IORef, SQLite (for agent) — would need extensive modification. +- GHC WASM backend is experimental, large binary output (~10+ MB). +- Hard to debug in browser context. + +### 8.2 Option 2: TypeScript Reimplementation (Recommended) + +**Verdict: Best approach.** + +- Well-understood, readable, auditable by the community. +- Rich crypto ecosystem (libsodium.js provides all needed NaCl primitives as WASM). +- Direct access to browser APIs (fetch, File, ReadableStream, Blob). +- Testable in Node.js against Haskell XFTP server. +- Small bundle size (~200 KB with libsodium WASM). + +**Risk:** Exact byte-level wire compatibility requires careful encoding implementation and thorough testing against the Haskell server. + +### 8.3 Option 3: C to WASM + +**Verdict: Viable but unnecessary.** + +- Could use libsodium C code directly for crypto (faster, reference implementation). +- But protocol encoding + YAML + orchestration still needs a higher-level language. +- Emscripten toolchain adds build complexity. +- In practice, libsodium.js already IS C-to-WASM, so Option 2 gets this benefit. + +### 8.4 Option 4: Hybrid (TypeScript + C/WASM crypto) + +**Verdict: This IS Option 2**, since libsodium.js is WASM-compiled C. The TypeScript code calls into WASM for crypto, implements protocol/transport/orchestration in TypeScript. + +## 9. Implementation Plan + +### Phase 1: TypeScript XFTP Building Blocks — DONE + +**Goal:** All per-function building blocks implemented and tested via Haskell-driven unit tests. + +**Completed** (164 tests passing across 16 test groups): +1. Binary encoding (protocol/encoding.ts) — 23 tests +2. Crypto: secretbox, keys, file, padding, digest (crypto/*.ts) — 72 tests +3. Protocol: commands, transmission (protocol/commands.ts, transmission.ts) — 40 tests +4. Handshake encoding/decoding (protocol/handshake.ts) — 18 tests +5. Identity proof verification (crypto/identity.ts) — 15 tests +6. File descriptions: types, YAML, validation (protocol/description.ts) — 13 tests +7. Chunk sizing: prepareChunkSizes, singleChunkSize, etc. (protocol/chunks.ts) — 4 tests +8. Transport crypto: cbAuthenticate/cbVerify, transit encrypt/decrypt (protocol/client.ts) — 10 tests +9. Server address parsing (protocol/address.ts) — 3 tests +10. Download helpers: DH, transit-decrypt, file-decrypt (download.ts) — 11 tests + +### Phase 2: XFTP Server Changes — DONE + +**Goal:** XFTP servers support web client connections. + +**Completed** (7 Haskell integration tests passing): +1. SNI certificate switching — `TLSServerCredential` mechanism for XFTP +2. CORS headers — OPTIONS handler + CORS response headers +3. Web handshake — challenge-response identity proof (Ed25519 + Ed448) +4. Integration tests — Ed25519 and Ed448 web handshake round-trips + +### Phase 3: HTTP/2 Client + Agent Orchestration + +**Goal:** Complete XFTP client that can upload and download files against a real Haskell XFTP server. + +1. **`client.ts`** ← `Simplex.FileTransfer.Client` — HTTP/2 client via `fetch()` / `node:http2`: connect + handshake, sendCommand, createChunk, uploadChunk, downloadChunk, deleteChunk, ackChunk, ping. +2. **`agent.ts`** ← `Simplex.FileTransfer.Client.Main` — Upload orchestration (encrypt → chunk → register → upload → build description), download orchestration (parse → download → verify → decrypt → ack), URL encoding with DEFLATE compression (§4.1). + +### Phase 4: Integration Testing + +**Goal:** Prove the TypeScript client is wire-compatible with the Haskell server. + +1. **Test harness** — Haskell-driven tests in `XFTPWebTests.hs` (same pattern as per-function tests). +2. **Upload test** — TypeScript uploads file → Haskell client downloads it → verify contents match. +3. **Download test** — Haskell client uploads file → TypeScript downloads it → verify contents match. +4. **Round-trip test** — TypeScript upload → TypeScript download → verify. +5. **Edge cases** — Single chunk, many chunks, exactly-sized chunks, redirect descriptions. + +### Phase 5: Web Page + +**Goal:** Static HTML page with upload/download UX. + +1. **Bundle TypeScript** — Compile to ES module bundle with libsodium.js WASM included. +2. **Upload UI** — Drag-drop zone, file picker, progress circle, link display. +3. **Download UI** — Parse URL, show file info, download button, progress circle. +4. **App install CTA** — Banner/messaging promoting SimpleX app for larger files. + +### Phase 6: Server-Hosted Page (Optional) + +**Goal:** XFTP servers can optionally serve the web page themselves. + +1. **Static file serving** — Similar to SMP server's `attachStaticFiles`. +2. **GET handler** — When web client sends HTTP GET (not POST), serve HTML page. +3. **Page generation** — Embed page bundle at server build time. + +## 10. Testing Strategy + +### 10.1 Per-Function Unit Tests (Haskell-driven) + +**Haskell is the test driver.** For each TypeScript function, there is one Haskell test case that: +1. Calls the Haskell function with known (or random) input → gets expected output. +2. Calls the same-named TypeScript function via `node` → gets actual output. +3. Asserts byte-identical results. + +This means **zero special test code on the TypeScript side** — node just `require`s the production module and calls the exported function. The Haskell test file is pure boilerplate. + +**Haskell helper** (defined once in the test file): +```haskell +callTS :: FilePath -> String -> ByteString -> IO ByteString +callTS modulePath funcName inputHex = do + let script = "const m = require('./" <> modulePath <> "'); " + <> "process.stdout.write(m." <> funcName + <> "(Buffer.from('" <> B.unpack (Base16.encode inputHex) <> "', 'hex')))" + (_, Just hout, _, ph) <- createProcess (proc "node" ["-e", script]) + {std_out = CreatePipe, cwd = Just xftpWebDir} + out <- B.hGetContents hout + void $ waitForProcess ph + pure out +``` + +**Example test cases:** +```haskell +describe "protocol/encoding" $ do + it "encodeWord16" $ do + let expected = smpEncode (42 :: Word16) + actual <- callTS "src/protocol/encoding" "encodeWord16" (smpEncode (42 :: Word16)) + actual `shouldBe` expected + +describe "crypto/secretbox" $ do + it "sbEncryptTailTag" $ do + let Right expected = LC.sbEncryptTailTag testKey testNonce testData testLen testPadLen + actual <- callTS "src/crypto/secretbox" "sbEncryptTailTag" + (smpEncode testKey <> smpEncode testNonce <> testData <> smpEncode testLen <> smpEncode testPadLen) + actual `shouldBe` LB.toStrict expected + it "sbEncryptTailTag round-trip" $ do + let Right ct = LC.sbEncryptTailTag testKey testNonce testData testLen testPadLen + actual <- callTS "src/crypto/secretbox" "sbDecryptTailTag" + (smpEncode testKey <> smpEncode testNonce <> smpEncode testPadLen <> LB.toStrict ct) + actual `shouldBe` LB.toStrict testData + +describe "crypto/padding" $ do + it "pad" $ do + let Right expected = C.pad testMsg 16384 + actual <- callTS "src/crypto/padding" "pad" (encodeTestArgs testMsg (16384 :: Int)) + actual `shouldBe` expected +``` + +**Each row in §12.1–12.17 function mapping tables becomes a test case.** The tables serve as the test case list. + +**Development workflow:** Implement one TS function → run its Haskell test → fix until it passes → move to next function. Bottom-up confidence building. No guessing what's broken. + +**Test execution:** Tests live in `tests/XFTPWebTests.hs` in the simplexmq repo, skipped by default (require compiled TS project path). Run with: +```bash +cabal test --ghc-options -O0 --test-option=--match="/XFTP Web Client/" +``` + +**Random inputs:** Haskell tests can use QuickCheck to generate random inputs each run, not just hardcoded values. This catches edge cases that fixed test vectors miss. + +### 10.2 Integration Tests (TS-driven, spawns Haskell server) + +**Only attempted after all per-function tests (§10.1) pass.** These are end-to-end tests that verify the full upload/download pipeline works against a real XFTP server. + +**Approach:** Node.js test (`xftp-web/test/integration.test.ts`) spawns `xftp-server` and `xftp` CLI as subprocesses. + +``` +┌────────────────────────────────────────────────────────────────┐ +│ Node.js test process (integration.test.ts) │ +│ │ +│ 1. Spawn xftp-server subprocess │ +│ 2. Run TypeScript XFTP client (under test) ──── HTTP/2 ────┐ │ +│ 3. Spawn xftp CLI to download/verify │ │ │ +│ │ │ │ +│ ┌──────────────────────┐ ┌─────────────────▼──────────┐ │ │ +│ │ xftp CLI (Haskell) │ │ xftp-server (Haskell) │ │ │ +│ │ (verify/upload) │◄───│ (subprocess) │ │ │ +│ └──────────────────────┘ └────────────────────────────┘ │ │ +└────────────────────────────────────────────────────────────────┘ +``` + +**Test scenarios:** +1. TypeScript uploads → Haskell `xftp` CLI downloads → content verified. +2. Haskell `xftp` CLI uploads → TypeScript downloads → content verified. +3. TypeScript upload + download round-trip. +4. Web handshake with challenge-response validation. +5. Redirect descriptions (large file → compressed description upload). +6. Multiple chunks across multiple servers. +7. Error cases: expired file, auth failure, digest mismatch. + +### 10.3 Browser Tests + +- Manual testing in Chrome, Firefox, Safari. +- Automated via Playwright or Puppeteer (optional, for CI). +- Focus on: streaming upload/download, progress reporting, URL parsing, CORS. + +### 10.4 Test Ordering (Bottom-Up) + +The per-function tests (§10.1) must pass before attempting integration tests (§10.2). Implementation and testing order: + +1. **Encoding primitives** — `encodeWord16`, `encodeBytes`, `encodeLarge`, `pad`, `unPad` (§12.1, §12.7) +2. **Crypto primitives** — `sha256`, `sha512`, `sign`, `verify`, `dh`, key generation (§12.5, §12.6) +3. **Streaming crypto** — `sbInit`, `sbEncryptChunk`, `sbDecryptChunk`, `sbAuth` (§12.4) +4. **File crypto** — `padLazy`, `unPadLazy` (§12.7), then `encryptFile`, `decryptChunks` (§12.8 — uses streaming crypto from step 3, not padLazy) +5. **Protocol encoding** — command/response encoding, transmission framing (§12.2, §12.3) +6. **Handshake** — handshake type encoding/decoding (§12.9) +7. **Description** — YAML serialization, validation (§12.12–§12.14) +8. **Chunk sizing** — `prepareChunkSizes`, `getChunkDigest` (§12.11) +9. **Transport client** — `sendCommand`, `createChunk`, `uploadChunk`, `downloadChunk` (§12.10) +10. **Integration** — full upload/download round-trips (§10.2) + +## 11. Resolved Design Decisions + +### 11.1 Block Size + +The XFTP block size is 16384 bytes (`Protocol.hs:65`). The XFTP identity certificate chain fits within a single block. The signed challenge response for web handshake is sent as an extended body after the header block. + +### 11.2 Streaming Encryption Compatibility + +**The Haskell streaming XSalsa20-Poly1305 is NOT compatible with standard NaCl `crypto_secretbox`.** Analysis of `Crypto/Lazy.hs` confirms: + +- `SbState` (line 196) is `(XSalsa.State, Poly1305.State)` — explicit state pair. +- `sbInit` (line 202) generates a 32-byte Poly1305 key from the first XSalsa20 keystream block, then initializes both states. +- `sbEncryptChunk` (line 229) XORs plaintext with keystream and updates Poly1305 with the ciphertext. +- `sbAuth` (line 241) finalizes Poly1305 → 16-byte auth tag. +- **Auth tag is appended at the END** for files (`sbEncryptTailTag`, line 134), unlike standard NaCl which prepends it. +- Standard `crypto_secretbox` produces `tag ++ ciphertext`; this produces `ciphertext ++ tag`. + +The TypeScript implementation must reimplement the exact streaming logic using libsodium's low-level XSalsa20 and Poly1305 APIs. `crypto_secretbox_easy` cannot be used. + +### 11.3 Web Client Detection + +Both SNI and web handshake are mandatory (see §6.3). SNI detection (`sniCredUsed` flag) is the discriminator — when SNI is detected, the server expects the web handshake variant. + +### 11.4 URL Compression + +DEFLATE (raw, no gzip/zlib wrapper). Available in modern browsers via `DecompressionStream`. Modern browsers only — no polyfill needed. + +### 11.5 Testing Architecture + +Two levels: (1) Haskell-driven per-function tests (`tests/XFTPWebTests.hs`) that call each TS function via `node` and compare output with the Haskell equivalent — zero TS test code needed, see §10.1. (2) TS-driven integration tests (`xftp-web/test/integration.test.ts`) that spawn `xftp-server` and `xftp` CLI as subprocesses for full round-trip verification — only attempted after all per-function tests pass, see §10.2. + +### 11.6 Memory Management for 100 MB Files + +XSalsa20-Poly1305 streaming encryption/decryption is sequential — each 64KB block's state depends on the previous block, and the auth tag is computed/verified at the end. This means both upload and download have the same structure: one sequential crypto pass + one parallel network pass. + +**Upload flow:** +1. `File.stream()` → encrypt sequentially (state threading) → buffer encrypted output +2. Compute SHA-512 digest of encrypted data +3. Split into chunks, upload in parallel to 8 randomly selected servers (from 6 default servers in `Presets.hs`) + +**Download flow:** +1. Download chunks in parallel from servers → buffer encrypted data +2. Decrypt sequentially (state threading) → verify auth tag +3. Trigger browser save + +Both directions buffer ~100 MB of encrypted data. The approach should be symmetric. + +**Option A — Memory buffer:** Buffer encrypted data as `ArrayBuffer`. 100 MB peak memory is feasible on modern devices. Simple implementation, no Web Worker needed. Chunk slicing is zero-copy via `ArrayBuffer.slice()`. + +**Option B — OPFS ([Origin Private File System](https://developer.mozilla.org/en-US/docs/Web/API/File_System_API/Origin_private_file_system)):** Write encrypted data to OPFS instead of holding in memory. OPFS storage quota is shared with IndexedDB/Cache API — typically hundreds of MB to several GB ([quota details](https://developer.mozilla.org/en-US/docs/Web/API/Storage_API/Storage_quotas_and_eviction_criteria)). The fast synchronous API (`createSyncAccessHandle()`) requires a [Web Worker](https://developer.mozilla.org/en-US/docs/Web/API/FileSystemFileHandle/createSyncAccessHandle) but is [3-4x faster than IndexedDB](https://web.dev/articles/origin-private-file-system). The async API (`createWritable()`) works on the main thread. + +**Decision:** Use OPFS with a Web Worker. While 100 MB fits in memory, OPFS future-proofs the implementation for raising the file size limit (250 MB, 500 MB, etc.) without code changes. The Web Worker also keeps the main thread responsive during encryption/decryption. The implementation cost is modest — a single worker that runs the sequential crypto pipeline, reading/writing OPFS files. + +### 11.7 Server Page Hosting + +Excluded from initial implementation. Added at the very end (Phase 5) as optional feature. Initial deployment serves the page from a separate web host. + +### 11.8 File Expiry Communication + +Hardcode 48 hours for standalone web page. Server-hosted page can use server-configurable TTL. The page should also display which XFTP servers were used for the upload. + +### 11.9 Concurrent Operations + +8 parallel operations in the browser. The Haskell CLI uses 16, but browsers have per-origin connection limits (6-8). Since chunks typically go to different servers (different origins), 8 provides good parallelism without hitting browser limits. + +## 12. Haskell-to-TypeScript Function Mapping + +This section maps every TypeScript module to the Haskell functions it must reimplement. File paths are relative to `src/`. Line numbers reference the current codebase. Each TypeScript function must produce byte-identical output to its Haskell counterpart — this is transpilation, not reimplementation. + +### 12.1 `protocol/encoding.ts` ← `Simplex/Messaging/Encoding.hs` + +Binary encoding primitives. Every XFTP type's wire format is built from these. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encodeWord16(n)` | `smpEncode :: Word16` | 70 | 2-byte big-endian | +| `decodeWord16(buf)` | `smpP :: Word16` | 70 | Parse 2-byte big-endian | +| `encodeWord32(n)` | `smpEncode :: Word32` | 76 | 4-byte big-endian | +| `decodeWord32(buf)` | `smpP :: Word32` | 76 | Parse 4-byte big-endian | +| `encodeInt64(n)` | `smpEncode :: Int64` | 82 | Two Word32s (high, low) | +| `decodeInt64(buf)` | `smpP :: Int64` | 82 | Parse two Word32s | +| `encodeBytes(bs)` | `smpEncode :: ByteString` | 100 | 1-byte length prefix + bytes | +| `decodeBytes(buf)` | `smpP :: ByteString` | 100 | Parse 1-byte length prefix | +| `encodeLarge(bs)` | `smpEncode :: Large` | 133 | 2-byte length prefix + bytes | +| `decodeLarge(buf)` | `smpP :: Large` | 133 | Parse 2-byte length prefix | +| `encodeTail(bs)` | `smpEncode :: Tail` | 124 | Raw bytes (no prefix) | +| `decodeTail(buf)` | `smpP :: Tail` | 124 | Take all remaining bytes | +| `encodeBool(b)` | `smpEncode :: Bool` | 58 | `'T'` or `'F'` | +| `decodeBool(buf)` | `smpP :: Bool` | 58 | Parse `'T'`/`'F'` | +| `encodeString(s)` | `smpEncode :: String` | 159 | Via ByteString encoding | +| `encodeMaybe(enc, v)` | `smpEncode :: Maybe a` | 114 | `'0'` for Nothing, `'1'` + value for Just | +| `decodeMaybe(dec, buf)` | `smpP :: Maybe a` | 114 | Parse optional value | +| `encodeNonEmpty(enc, xs)` | `smpEncode :: NonEmpty a` | 165 | 1-byte length + elements | +| `decodeNonEmpty(dec, buf)` | `smpP :: NonEmpty a` | 165 | Parse length-prefixed list | + +**Tuple encoding:** Tuples are encoded by concatenating encoded fields. Decoded by parsing fields sequentially. Instances at lines 172-212. + +### 12.2 `protocol/commands.ts` ← `Simplex/FileTransfer/Protocol.hs` + +XFTP commands and their wire encoding. + +| TypeScript type/function | Haskell type/function | Line | Description | +|---|---|---|---| +| `FileInfo` | `FileInfo` | 174 | `{sndKey, size :: Word32, digest :: ByteString}` | +| `encodeFNEW(info, rcvKeys, auth)` | `FNEW` encoding | 183 | `smpEncode (FNEW_)` + fields | +| `encodeFADD(rcvKeys)` | `FADD` encoding | 183 | Add recipient keys | +| `encodeFPUT()` | `FPUT` encoding | 183 | Upload marker (no fields) | +| `encodeFDEL()` | `FDEL` encoding | 183 | Delete marker | +| `encodeFGET(dhPubKey)` | `FGET` encoding | 183 | Download with DH key | +| `encodeFACK()` | `FACK` encoding | 183 | Acknowledge marker | +| `encodePING()` | `PING` encoding | 183 | Ping marker | +| `decodeFRSndIds(buf)` | `FRSndIds` parser | 285 | `(SenderId, NonEmpty RecipientId)` | +| `decodeFRRcvIds(buf)` | `FRRcvIds` parser | 285 | `NonEmpty RecipientId` | +| `decodeFRFile(buf)` | `FRFile` parser | 285 | `(RcvPublicDhKey, CbNonce)` | +| `decodeFROk()` | `FROk` parser | 285 | Success | +| `decodeFRErr(buf)` | `FRErr` parser | 285 | Error type | +| `decodeFRPong()` | `FRPong` parser | 285 | Pong | +| `XFTPErrorType` | `XFTPErrorType` | 206 | Error enumeration (Transport.hs) | + +**Command tags** (`FileCommandTag`, line 103): Each command is prefixed by its tag string (`"FNEW"`, `"FADD"`, etc.) encoded via `smpEncode`. + +### 12.3 `protocol/transmission.ts` ← `Simplex/FileTransfer/Protocol.hs` + +Transmission framing: sign, encode, pad to block size. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `xftpEncodeAuthTransmission(key, ...)` | `xftpEncodeAuthTransmission` | 340 | Sign + encode + pad to 16384 | +| `xftpDecodeTransmission(buf)` | `xftpDecodeTransmission` | 360 | Parse padded response block | +| `xftpBlockSize` | `xftpBlockSize` | 65 | `16384` constant | + +**Wire format:** ` ` padded with `#` to 16384 bytes. Signature is Ed25519 over `(sessionId ++ corrId ++ entityId ++ encodedCommand)`. + +**Padding:** Uses `Crypto.pad` (`Crypto.hs:1077`) — 2-byte big-endian length prefix + message + `#` (0x23) fill. + +### 12.4 `crypto/secretbox.ts` ← `Simplex/Messaging/Crypto.hs` + `Simplex/Messaging/Crypto/Lazy.hs` + +Streaming XSalsa20-Poly1305 encryption/decryption. + +| TypeScript function | Haskell function | File | Line | Description | +|---|---|---|---|---| +| `sbInit(key, nonce)` | `sbInit` | Crypto/Lazy.hs | 202 | Init `(XSalsa.State, Poly1305.State)` | +| `cbInit(dhSecret, nonce)` | `cbInit` | Crypto/Lazy.hs | 198 | Init from DH secret (transit) | +| `sbEncryptChunk(state, chunk)` | `sbEncryptChunk` | Crypto/Lazy.hs | 229 | XOR + Poly1305 update → `(ciphertext, newState)` | +| `sbDecryptChunk(state, chunk)` | `sbDecryptChunk` | Crypto/Lazy.hs | 235 | XOR + Poly1305 update → `(plaintext, newState)` | +| `sbAuth(state)` | `sbAuth` | Crypto/Lazy.hs | 241 | Finalize → 16-byte auth tag | +| `sbEncryptTailTag(key, nonce, data, len, padLen)` | `sbEncryptTailTag` | Crypto/Lazy.hs | 134 | Full encrypt, tag appended | +| `sbDecryptTailTag(key, nonce, paddedLen, data)` | `sbDecryptTailTag` | Crypto/Lazy.hs | 153 | Full decrypt, verify appended tag | +| `cryptoBox(key, iv, msg)` | `cryptoBox` | Crypto.hs | 1313 | XSalsa20 + Poly1305 (tag prepended) | +| `cbEncrypt(dhSecret, nonce, msg, padLen)` | `cbEncrypt` | Crypto.hs | 1286 | Crypto box with DH secret | +| `cbDecrypt(dhSecret, nonce, msg)` | `cbDecrypt` | Crypto.hs | 1320 | Crypto box decrypt | + +**Note:** `cryptoBox`, `cbEncrypt`, and `cbDecrypt` are included for completeness but are **not used by the web XFTP client**. They implement single-shot crypto_box (tag prepended) used for SMP protocol messages. The web client only needs `cbInit` (for transit decryption) and the streaming functions (`sbEncryptChunk`, `sbDecryptChunk`, `sbAuth`, `sbEncryptTailTag`, `sbDecryptTailTag`). + +**Internal init (`sbInit_`)** at `Crypto/Lazy.hs:210`: +1. Call `xSalsa20(key, nonce, zeroes_32)` → `(poly1305Key, xsalsaState)` +2. Initialize Poly1305 with `poly1305Key` +3. Return `(xsalsaState, poly1305State)` + +The `xSalsa20` function (`Crypto.hs:1467`) uses: `initialize 20 secret (zero8 ++ iv0)`, then `derive state0 iv1`, then `generate state1 32` for keystream, `combine state2 msg` for encryption. + +### 12.5 `crypto/keys.ts` ← `Simplex/Messaging/Crypto.hs` + +Key generation, signing, DH. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `generateEd25519KeyPair()` | `generateAuthKeyPair` | 726 | Ed25519 keypair from CSPRNG | +| `generateX25519KeyPair()` | via `generateKeyPair` | — | X25519 keypair for DH | +| `sign(privateKey, msg)` | `sign'` | 1175 | Ed25519 signature (64 bytes) | +| `verify(publicKey, sig, msg)` | `verify'` | 1270 | Ed25519 verification | +| `dh(pubKey, privKey)` | `dh'` | 1280 | X25519 DH → shared secret | + +**Key types:** +- `SbKey` (`Crypto.hs:1411`): 32-byte symmetric key (newtype over ByteString) +- `CbNonce` (`Crypto.hs:1368`): 24-byte nonce (newtype over ByteString) +- `KeyHash` (`Crypto.hs:981`): SHA-256 of certificate public key + +### 12.6 `crypto/digest.ts` ← `Simplex/Messaging/Crypto.hs` + +Hash functions. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `sha256(data)` | `sha256Hash` | 1006 | SHA-256 digest (32 bytes) | +| `sha512(data)` | `sha512Hash` | 1011 | SHA-512 digest (64 bytes) | + +### 12.7 `crypto/padding.ts` ← `Simplex/Messaging/Crypto.hs` + `Simplex/Messaging/Crypto/Lazy.hs` + +Block padding used for protocol messages and file encryption. + +| TypeScript function | Haskell function | File | Line | Description | +|---|---|---|---|---| +| `pad(msg, blockSize)` | `pad` | Crypto.hs | 1077 | 2-byte BE length + msg + `#` fill | +| `unPad(buf)` | `unPad` | Crypto.hs | 1085 | Extract msg from padded block | +| `padLazy(msg, msgLen, padLen)` | `pad` | Crypto/Lazy.hs | 70 | 8-byte Int64 length + msg + `#` fill | +| `unPadLazy(buf)` | `unPad` | Crypto/Lazy.hs | 91 | Extract msg from lazy-padded block | + +**Strict pad format (protocol messages):** `[2-byte BE length][message][# # # ...]` +**Lazy pad format (file encryption):** `[8-byte Int64 length][message][# # # ...]` + +### 12.8 `crypto/file.ts` ← `Simplex/FileTransfer/Crypto.hs` + +File-level encryption/decryption orchestrating the streaming primitives. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encryptFile(source, header, key, nonce, fileSize, padSize, dest)` | `encryptFile` | 30 | Stream-encrypt file with header, 64KB chunks, appended auth tag | +| `decryptChunks(paddedSize, chunks, key, nonce)` | `decryptChunks` | 57 | Decrypt concatenated chunks, verify auth tag, extract header | +| `readChunks(paths)` | `readChunks` | 113 | Concatenate chunk files | + +**`encryptFile` algorithm** (lines 30-42): +1. Init state: `sbInit(key, nonce)` +2. Encrypt header: `sbEncryptChunk(state, smpEncode(fileSize') <> headerBytes)` — `fileSize'` = headerLen + originalFileSize; `smpEncode(fileSize')` produces the 8-byte Int64 length prefix, which is concatenated with `headerBytes` and encrypted together as one piece +3. Encrypt file data in 65536-byte chunks: `sbEncryptChunk(state, chunk)` → thread state through each chunk +4. Encrypt padding in 65536-byte chunks: same chunked loop as step 3 using `'#'` fill. `padLen = encSize - authTagSize - fileSize' - 8` +5. Finalize: `sbAuth(state)` → append 16-byte auth tag + +Note: `encryptFile` does NOT use `padLazy` or `sbEncryptTailTag`. It manually prepends the length, encrypts header+data+padding as separate chunk sequences, and appends the auth tag. The `sbEncryptTailTag` function (which does use `padLazy`) is used elsewhere but not by `encryptFile`. + +**`decryptChunks` algorithm** (lines 57-111) — two paths: + +**Single chunk (one file, line 60):** Calls `sbDecryptTailTag(key, nonce, encSize - authTagSize, data)` directly. This internally decrypts, verifies auth tag, and strips the 8-byte length prefix + padding via `unPad`. Returns `(authOk, content)`. Then parses `FileHeader` from content. + +**Multi-chunk (line 67):** +1. `sbInit(key, nonce)` → init state +2. Decrypt first chunk file: `sbDecryptChunkLazy(state, chunk)` → `splitLen` extracts 8-byte `expectedLen` → parse `FileHeader` +3. Decrypt middle chunk files: `sbDecryptChunkLazy(state, chunk)` loop, write to output, accumulate `len` +4. Decrypt last chunk file: split off last 16 bytes as auth tag → `sbDecryptChunkLazy(state, remaining)` → truncate padding using `expectedLen` vs accumulated `len` → verify `sbAuth(finalState) == authTag` + +**`FileHeader`** (`Types.hs:35`): `{fileName :: String, fileExtra :: Maybe String}`, parsed via `smpP`. + +### 12.9 `transport/handshake.ts` ← `Simplex/FileTransfer/Transport.hs` + +XFTP handshake types and encoding. + +| TypeScript type/function | Haskell type/function | Line | Description | +|---|---|---|---| +| `XFTPServerHandshake` | `XFTPServerHandshake` | 114 | `{xftpVersionRange, sessionId, authPubKey}` | +| `encodeServerHandshake(hs)` | `smpEncode :: XFTPServerHandshake` | 136 | Binary encode | +| `decodeServerHandshake(buf)` | `smpP :: XFTPServerHandshake` | 136 | Parse with `Tail _compat` (line 142) | +| `XFTPClientHandshake` | `XFTPClientHandshake` | 121 | `{xftpVersion, keyHash}` | +| `encodeClientHandshake(hs)` | `smpEncode :: XFTPClientHandshake` | 128 | Binary encode | +| `decodeClientHandshake(buf)` | `smpP :: XFTPClientHandshake` | 128 | Parse with `Tail _compat` (line 133) | +| `XFTP_VERSION_RANGE` | `supportedFileServerVRange` | 101 | Version 1..3 | +| `CURRENT_XFTP_VERSION` | `currentXFTPVersion` | 98 | Version 3 | + +### 12.10 `protocol/client.ts` ← `Simplex/FileTransfer/Client.hs` (crypto primitives) — DONE + +Transport-level crypto for command authentication and chunk encryption/decryption. + +| TypeScript function | Haskell function | Description | Status | +|---|---|---|---| +| `cbAuthenticate(peerPub, ownPriv, nonce, msg)` | `C.cbAuthenticate` | 80-byte crypto_box authenticator | ✓ | +| `cbVerify(peerPub, ownPriv, nonce, auth, msg)` | `C.cbVerify` | Verify authenticator | ✓ | +| `encryptTransportChunk(dhSecret, nonce, plain)` | `sendEncFile` | Encrypt chunk (tag appended) | ✓ | +| `decryptTransportChunk(dhSecret, nonce, enc)` | `receiveEncFile` | Decrypt chunk (tag verified) | ✓ | + +### 12.11 `protocol/chunks.ts` ← `Simplex/FileTransfer/Chunks.hs` + `Client.hs` — DONE + +Chunk size selection and file splitting. + +| TypeScript function/constant | Haskell equivalent | Status | +|---|---|---| +| `chunkSize0..3` | `chunkSize0..3` (Chunks.hs) | ✓ | +| `serverChunkSizes` | `serverChunkSizes` | ✓ | +| `prepareChunkSizes(size)` | `prepareChunkSizes` (Client.hs:322) | ✓ | +| `singleChunkSize(size)` | `singleChunkSize` (Client.hs:316) | ✓ | +| `prepareChunkSpecs(sizes)` | `prepareChunkSpecs` (Client.hs:339) | ✓ | +| `getChunkDigest(chunk)` | `getChunkDigest` (Client.hs:347) | ✓ | + +### 12.12–12.14 `protocol/description.ts` ← `Simplex/FileTransfer/Description.hs` — DONE + +Types, YAML encode/decode, base64url, FileSize, replica grouping/folding, validation — all in one file. + +| TypeScript function/type | Haskell equivalent | Status | +|---|---|---| +| `FileDescription`, `FileChunk`, `FileChunkReplica`, `RedirectFileInfo` | Matching record types | ✓ | +| `base64urlEncode/Decode` | `strEncode`/`strDecode` for `ByteString` | ✓ | +| `encodeFileSize/decodeFileSize` | `StrEncoding (FileSize a)` | ✓ | +| `encodeFileDescription(fd)` | `encodeFileDescription` (line 230) | ✓ | +| `decodeFileDescription(yaml)` | `decodeFileDescription` (line 356) | ✓ | +| `validateFileDescription(fd)` | `validateFileDescription` (line 221) | ✓ | +| `fdSeparator` | `fdSeparator` (line 111) | ✓ | +| Internal: `unfoldChunksToReplicas`, `foldReplicasToChunks`, `encodeFileReplicas` | Matching functions | ✓ | + +### 12.15 `client.ts` ← `Simplex/FileTransfer/Client.hs` (HTTP/2 operations) + +HTTP/2 XFTP client using `node:http2` (Node.js) or `fetch()` (browser). Transpilation of `Client.hs` network operations. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `connectXFTP(server, config)` | `getXFTPClient` | 111 | HTTP/2 connect + handshake → XFTPClient state | +| `sendXFTPCommand(client, key, fileId, cmd, chunk?)` | `sendXFTPCommand` | 200 | Encode auth transmission + POST + parse response | +| `createXFTPChunk(client, spKey, info, rcvKeys, auth?)` | `createXFTPChunk` | 232 | FNEW → (SenderId, RecipientId[]) | +| `addXFTPRecipients(client, spKey, fileId, rcvKeys)` | `addXFTPRecipients` | 244 | FADD → RecipientId[] | +| `uploadXFTPChunk(client, spKey, fileId, chunkData)` | `uploadXFTPChunk` | 250 | FPUT with streaming body | +| `downloadXFTPChunk(client, rpKey, fileId, chunkSize)` | `downloadXFTPChunk` | 254 | FGET → DH → transit-decrypt → Uint8Array | +| `deleteXFTPChunk(client, spKey, senderId)` | `deleteXFTPChunk` | 286 | FDEL | +| `ackXFTPChunk(client, rpKey, recipientId)` | `ackXFTPChunk` | 289 | FACK | +| `pingXFTP(client)` | `pingXFTP` | 292 | PING → FRPong | + +**XFTPClient state** (returned by `connectXFTP`): +- HTTP/2 session (node: `ClientHttp2Session`, browser: base URL for fetch) +- `thParams`: `{sessionId, blockSize, thVersion, thAuth}` from handshake +- Server address for reconnection + +**sendXFTPCommand wire format:** +1. `xftpEncodeAuthTransmission(thParams, pKey, (corrId, fId, cmd))` → padded 16KB block +2. POST to "/" with body = block + optional chunk data (streaming) +3. Response: read 16KB `bodyHead`, decode via `xftpDecodeTClient` +4. For FGET: response also has streaming body (encrypted chunk) + +### 12.16 `agent.ts` ← `Simplex/FileTransfer/Client/Main.hs` + +Upload/download orchestration and URL encoding. Combines what the RFC originally split across `agent/upload.ts`, `agent/download.ts`, and `description/uri.ts`. + +**Upload functions:** + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encryptFileForUpload(file, fileName)` | `encryptFileForUpload` | 264 | key/nonce → encrypt → digest → chunk specs | +| `uploadFile(client, chunkSpecs, servers, numRcps)` | `uploadFile` | 285 | Parallel upload (up to 16 concurrent) | +| `uploadFileChunk(client, chunkNo, spec, server)` | `uploadFileChunk` | 301 | FNEW + FPUT for one chunk | +| `createRcvFileDescriptions(fd, sentChunks)` | `createRcvFileDescriptions` | 329 | Build per-recipient descriptions | +| `createSndFileDescription(fd, sentChunks)` | `createSndFileDescription` | 361 | Build sender (deletion) description | + +**Upload call sequence** (`cliSendFileOpts`, line 243): +1. `encryptFileForUpload` — `randomSbKey` + `randomCbNonce` → `encryptFile` → `sha512Hash` digest → `prepareChunkSpecs` +2. `uploadFile` — for each chunk: generate sender/recipient key pairs, `createXFTPChunk`, `uploadXFTPChunk` +3. `createRcvFileDescriptions` — assemble `FileDescription` per recipient from sent chunks +4. `createSndFileDescription` — assemble sender description with deletion keys + +**Download functions:** + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `downloadFile(description)` | `cliReceiveFile` | 388 | Full download: parse → download → verify → decrypt | +| `downloadFileChunk(client, chunk)` | `downloadFileChunk` | 418 | FGET + transit-decrypt one chunk | +| `ackFileChunk(client, chunk)` | `acknowledgeFileChunk` | 440 | FACK one chunk | +| `deleteFile(description)` | `cliDeleteFile` | 455 | FDEL for all chunks | + +**Download call sequence** (`cliReceiveFile`, line 388): +1. Parse and validate `FileDescription` from YAML +2. Group chunks by server +3. Parallel download: `downloadXFTPChunk` per chunk (up to 16 concurrent) +4. Verify file digest (SHA-512) over concatenated encrypted chunks +5. `decryptChunks` — file-level decrypt with auth tag verification +6. Parallel acknowledge: `ackXFTPChunk` per chunk + +**URL encoding (§4.1):** + +| TypeScript function | Description | +|---|---| +| `encodeDescriptionURI(fd)` | DEFLATE compress YAML → base64url → URL hash fragment | +| `decodeDescriptionURI(url)` | Parse hash fragment → base64url decode → inflate → YAML parse | + +### 12.17 Transit Encryption Detail ← `Simplex/FileTransfer/Client.hs:253-275` + +`downloadXFTPChunk` performs transit decryption after FGET: + +1. Generate ephemeral X25519 keypair +2. Send `FGET(rcvDhPubKey)` → receive `FRFile(sndDhPubKey, cbNonce)` + encrypted body +3. Compute DH shared secret: `dh'(sndDhPubKey, rcvDhPrivKey)` (`Crypto.hs:1280`) +4. Transit-decrypt body via `receiveSbFile` (`Transport.hs:176`): `cbInit(dhSecret, cbNonce)` → `sbDecryptChunk` loop (`fileBlockSize` = 16384-byte blocks, `Transport/HTTP2/File.hs:14`) → `sbAuth` tag verification at end +5. Verify chunk digest (SHA-256): `getChunkDigest` (`Client.hs:346`) + +### 12.18 Per-Function Testing: Haskell Drives Node + +**Mechanism:** Haskell test file (`tests/XFTPWebTests.hs`) imports the real Haskell library functions, calls each one, then calls the corresponding TypeScript function via `node`, and asserts byte-identical output. See §10.1 for the `callTS` helper and example test cases. + +**Each row in the tables in §12.1–12.17 is one test case.** The function mapping tables serve as the exhaustive test case list. For example, §12.1 has 19 encoding functions → 19 Haskell test cases. §12.4 has 10 crypto functions → 10 test cases. Total: ~100 per-function test cases across all modules. + +**TS function contract:** Each TypeScript function exported from a module must accept a `Buffer` of serialized input arguments and return a `Buffer` of serialized output. The serialization format is simple concatenation of the same binary encoding used by the protocol (using the encoding primitives from §12.1). This means the TS functions can be called both from production code (with native types) and from the Haskell test harness (with raw buffers). A thin wrapper per module handles deserialization. + +**Stateful functions (streaming crypto):** `XSalsa.State` and `Poly1305.State` are opaque types in the crypton library — they cannot be serialized to bytes. Therefore `sbEncryptChunk` / `sbDecryptChunk` cannot be tested individually across the Haskell↔TS boundary. Instead, test the composite operations: +- `sbEncryptTailTag(key, nonce, data, len, padLen)` — Haskell encrypts, TS encrypts same input, compare ciphertext + tag. +- `sbDecryptTailTag(key, nonce, paddedLen, ciphertext)` — Haskell decrypts, TS decrypts, compare plaintext. +- Round-trip: Haskell encrypts → TS decrypts (and vice versa) → compare content. +- Multi-chunk: Haskell runs `sbInit` + N × `sbEncryptChunk` + `sbAuth` as one sequence, TS does the same, compare final ciphertext and tag. The `callTS` script runs the full sequence in one node invocation. + +**Development workflow:** +1. Implement `encodeWord16` in `src/protocol/encoding.ts` +2. Run `cabal test --ghc-options -O0 --test-option=--match="/XFTP Web Client/encoding/encodeWord16"` +3. If it fails: Haskell says `expected 002a, got 2a00` → immediately know it's an endianness bug +4. Fix → rerun → passes → move to `encodeWord32` +5. Repeat until all per-function tests pass +6. Then attempt integration tests (§10.2) — by this point, every building block is verified + +**Integration tests** (separate, TS-driven via Node.js spawning `xftp-server`): +1. Node.js test spawns `xftp-server` binary as subprocess. +2. TypeScript client connects, uploads file, gets description. +3. Haskell `xftp` CLI (spawned as subprocess) downloads and verifies content. +4. Reverse: Haskell CLI uploads, TypeScript downloads and verifies. +5. Round-trip: TypeScript uploads → TypeScript downloads → verify. + +### 12.19 Project Structure Summary + +**TypeScript project (`xftp-web/`):** +``` +xftp-web/ # Separate npm project +├── src/ +│ ├── protocol/ +│ │ ├── encoding.ts # ← Simplex.Messaging.Encoding ✓ +│ │ ├── commands.ts # ← Simplex.FileTransfer.Protocol (commands+responses) ✓ +│ │ ├── transmission.ts # ← Simplex.FileTransfer.Protocol (framing) ✓ +│ │ ├── handshake.ts # ← Simplex.FileTransfer.Transport (handshake) ✓ +│ │ ├── address.ts # ← Simplex.Messaging.Protocol (server address) ✓ +│ │ ├── chunks.ts # ← Simplex.FileTransfer.Chunks + Client.hs (sizing) ✓ +│ │ ├── client.ts # ← Transport crypto (cbAuth, transit encrypt/decrypt) ✓ +│ │ └── description.ts # ← Simplex.FileTransfer.Description (types+yaml+val) ✓ +│ ├── crypto/ +│ │ ├── secretbox.ts # ← Simplex.Messaging.Crypto + Crypto.Lazy ✓ +│ │ ├── file.ts # ← Simplex.FileTransfer.Crypto ✓ +│ │ ├── keys.ts # ← Simplex.Messaging.Crypto (Ed25519/X25519/Ed448) ✓ +│ │ ├── digest.ts # ← Simplex.Messaging.Crypto (sha256, sha512) ✓ +│ │ ├── padding.ts # ← Simplex.Messaging.Crypto (pad/unPad) ✓ +│ │ └── identity.ts # ← Web handshake identity proof (Ed25519/Ed448) ✓ +│ ├── download.ts # Download helpers (DH, transit-decrypt, file-decrypt) ✓ +│ ├── client.ts # ← Simplex.FileTransfer.Client (HTTP/2 operations) +│ └── agent.ts # ← Simplex.FileTransfer.Client.Main (orchestration) +├── web/ # Browser UI (Phase 5) +│ ├── index.html +│ ├── upload.ts +│ ├── download.ts +│ └── progress.ts # Circular progress component +├── package.json +└── tsconfig.json +``` + +**Haskell tests (in simplexmq repo):** +``` +tests/ +├── XFTPWebTests.hs # Haskell-driven: calls each TS function via node, +│ # compares output with Haskell function (see §10.1) +│ # 164 test cases across 16 test groups +└── fixtures/ed25519/ # Ed25519 test certs for web handshake integration tests +``` + +No fixture files, no TS test harness for unit tests. The Haskell test file IS the test — it calls both Haskell and TypeScript functions directly and compares outputs. TS-side integration tests (`test/integration.test.ts`) are separate and only run after all per-function tests pass. diff --git a/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md b/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md new file mode 100644 index 0000000000..a1a2f47d54 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md @@ -0,0 +1,154 @@ +# XFTP Server: SNI, CORS, and Web Support + +Implementation details for Phase 3 of `rfcs/2026-01-30-send-file-page.md` (sections 6.1-6.4). + +## 1. Overview + +The XFTP server is extended to support web browser clients by: + +1. **SNI-based TLS certificate switching** — Present a CA-issued web certificate (e.g., Let's Encrypt) to browsers, while continuing to present the self-signed XFTP identity certificate to native clients. +2. **CORS headers** — Add CORS response headers on SNI connections so browsers allow cross-origin XFTP requests. +3. **Configuration** — `[WEB]` INI section for HTTPS cert/key paths; opt-in (commented out by default). + +Web handshake (challenge-response identity proof, §6.3 of parent RFC) is not yet implemented and will be added separately. + +## 2. SNI Certificate Switching + +### 2.1 Reusing the SMP Pattern + +The SMP server already implements SNI-based certificate switching via `TLSServerCredential` and `runTransportServerState_` (see `rfcs/2024-09-15-shared-port.md`). The XFTP server applies the same pattern with one key difference: both native and web XFTP clients use HTTP/2 transport, whereas SMP switches between raw SMP protocol and HTTP entirely. + +### 2.2 Approach + +When `httpServerCreds` is configured, the XFTP server bypasses `runHTTP2Server` and uses `runTransportServerState_` directly to obtain the per-connection `sniUsed` flag. It then sets up HTTP/2 manually on each TLS connection using `withHTTP2` (same internals as `runHTTP2ServerWith_`). The `sniUsed` flag is captured in the closure and shared by all HTTP/2 requests on that connection. + +When `httpServerCreds` is absent, the existing `runHTTP2Server` path is unchanged. + +``` +Native client (no SNI) ──TLS──> XFTP identity cert ──HTTP/2──> processRequest (no CORS) +Browser client (SNI) ──TLS──> Web CA cert ──HTTP/2──> processRequest (+ CORS) +``` + +### 2.3 Certificate Chain + +The web certificate file (e.g., `web.crt`) must contain the full chain: leaf certificate followed by the signing CA certificate. `loadServerCredential` uses `T.credentialLoadX509Chain` which reads all PEM blocks from the file. + +The client validates the chain by comparing `idCert` fingerprint (the CA cert, second in the 2-cert chain) against the known `keyHash`. This is the same validation as for XFTP identity certificates — the CA that signed the web cert must match the XFTP server's identity. + +## 3. CORS Support + +### 3.1 Design + +CORS headers are only added when both conditions are true: +- `addCORSHeaders` is `True` in `TransportServerConfig` (set in XFTP `Main.hs`) +- `sniUsed` is `True` for the current TLS connection + +This ensures native clients never see CORS headers. + +### 3.2 Response Headers + +All POST responses on SNI connections include: +``` +Access-Control-Allow-Origin: * +Access-Control-Expose-Headers: * +``` + +### 3.3 OPTIONS Preflight + +OPTIONS requests are intercepted at the HTTP/2 dispatch level, before `processRequest`. This is necessary because `processRequest` rejects bodies that don't match `xftpBlockSize`. + +Preflight response: +``` +HTTP/2 200 +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: POST, OPTIONS +Access-Control-Allow-Headers: * +Access-Control-Max-Age: 86400 +``` + +### 3.4 Security + +`Access-Control-Allow-Origin: *` is safe because: +- All XFTP commands require Ed25519 authentication (per-chunk keys from file description). +- No cookies or browser credentials are involved. +- File content is end-to-end encrypted. + +## 4. Configuration + +### 4.1 INI Template + +```ini +[WEB] +# cert: /etc/opt/simplex-xftp/web.crt +# key: /etc/opt/simplex-xftp/web.key +``` + +Commented out by default — web support is opt-in. + +### 4.2 Behavior + +- `[WEB]` section not configured: silently ignored, server operates normally for native clients only. +- `[WEB]` section configured with valid cert/key paths: SNI + CORS enabled. +- `[WEB]` section configured with missing cert files: warning + continue (non-fatal, unlike SMP where it is fatal). + +## 5. Files Modified + +### 5.1 `src/Simplex/Messaging/Transport/Server.hs` + +Added `addCORSHeaders :: Bool` field to `TransportServerConfig`. Updated `mkTransportServerConfig` to accept the new parameter. All existing SMP call sites pass `False`. + +### 5.2 `src/Simplex/Messaging/Transport/HTTP2/Server.hs` + +- Extracted `expireInactiveClient` from `runHTTP2ServerWith_`'s `where` clause to a module-level function. +- Parameterized `runHTTP2ServerWith_`: setup type changed from `((TLS p -> IO ()) -> a)` to `(((Bool, TLS p) -> IO ()) -> a)`, callback from `HTTP2ServerFunc` to `Bool -> HTTP2ServerFunc`. The `Bool` is the per-connection `sniUsed` flag, threaded through `H.run` to the callback. +- Extended `runHTTP2Server` with `Maybe T.Credential` parameter for SNI web certificate. Its setup uses `runTransportServerState_` with `TLSServerCredential`, which naturally provides `(sniUsed, tls)` pairs matching the new `runHTTP2ServerWith_` setup type. +- Adapted `runHTTP2ServerWith` (client-side HTTP/2, no SNI): wraps its setup to inject `(False, tls)` and its callback with `const`. +- Updated `getHTTP2Server` (test helper) to pass `Nothing` for httpCreds. + +### 5.3 `src/Simplex/FileTransfer/Server/Env.hs` + +- Added `httpCredentials :: Maybe ServerCredentials` to `XFTPServerConfig`. +- Added `httpServerCreds :: Maybe T.Credential` to `XFTPEnv`. +- `newXFTPServerEnv` loads HTTP credentials when configured. + +### 5.4 `src/Simplex/FileTransfer/Server/Main.hs` + +- Added `[WEB]` section to INI template. +- Added `httpCredentials` parsing from INI `[WEB]` section (`cert` and `key` fields). +- Set `addCORSHeaders = isJust httpCredentials_` in transport config (conditional on web cert presence). + +### 5.5 `src/Simplex/FileTransfer/Server.hs` + +Core server changes: + +- `runServer` calls `runHTTP2Server` with `httpCreds_` and a `\sniUsed -> handleRequest (sniUsed && addCORSHeaders transportConfig)` callback. TLS params are `defaultSupportedParamsHTTPS` when web creds present, `defaultSupportedParams` otherwise. SNI routing, HTTP/2 setup, and client expiration are handled inside `runHTTP2Server`. + +- `XFTPTransportRequest` carries `addCORS :: Bool` field, threaded through to `sendXFTPResponse`. + +- `sendXFTPResponse` conditionally includes CORS headers based on `addCORS`. + +- OPTIONS requests on SNI connections return CORS preflight headers before reaching `processRequest`. + +- Helper functions: `corsHeaders` (response headers), `corsPreflightHeaders` (preflight headers). + +### 5.6 `tests/XFTPClient.hs` + +- Added `httpCredentials = Nothing` to `testXFTPServerConfig`. +- Added `testXFTPServerConfigSNI` with web cert config and `addCORSHeaders = True`. +- Added `withXFTPServerSNI` helper. + +### 5.7 `tests/XFTPServerTests.hs` + +Added SNI and CORS tests as a subsection within `xftpServerTests` (6 tests): + +1. **SNI cert selection** — Connect with SNI + `h2` ALPN, verify RSA web certificate is presented. +2. **Non-SNI cert selection** — Connect without SNI + `xftp/1` ALPN, verify Ed448 XFTP certificate is presented. +3. **CORS headers** — SNI POST request includes `Access-Control-Allow-Origin: *` and `Access-Control-Expose-Headers: *`. +4. **OPTIONS preflight** — SNI OPTIONS request returns all CORS preflight headers. +5. **No CORS without SNI** — Non-SNI POST request has no CORS headers. +6. **File chunk delivery** — Full XFTP file chunk upload/download through SNI-enabled server verifying no regression. + +## 6. Remaining Work + +- **Web handshake** (§6.3 of parent RFC): Challenge-response identity proof for SNI connections. The server detects web clients via the `sniUsed` flag and expects a 32-byte challenge in the first POST body (non-empty, unlike standard handshake). Response includes full cert chain + signature over `(challenge ++ sessionId)`. +- **Static page serving** (§6.5 of parent RFC): Optional serving of the web page HTML/JS bundle on GET requests. diff --git a/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md b/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md new file mode 100644 index 0000000000..de23bbf8b4 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md @@ -0,0 +1,246 @@ +# Web Handshake — Challenge-Response Identity Proof + +RFC §6.3: Server proves XFTP identity to web clients independently of TLS CA infrastructure. + +## 1. Protocol + +**Standard handshake** (unchanged): +``` +Client → empty POST → Server +Server → padded {vRange, sessionId, authPubKey, Nothing} → Client +Client → padded {version, keyHash, Nothing} → Server +Server → empty → Client +``` + +**Web handshake** (SNI connection, non-empty hello): +``` +Client → padded {32 random bytes} → Server +Server → padded {vRange, sessionId, authPubKey, Just sigBytes} → Client + sigBytes = signatureBytes(sign(identityLeafKey, challenge <> sessionId)) +Client validates: + 1. chainIdCaCerts(authPubKey.certChain) → CCValid {leafCert, idCert} + 2. SHA-256(idCert) == keyHash (server identity) + 3. verify(leafCert.pubKey, sigBytes, challenge <> sessionId) (challenge-response) + 4. verify(leafCert.pubKey, signedPubKey.signature, signedPubKey.objectDer) (DH key auth) +Client → padded {version, keyHash, Just challenge} → Server +Server verifies: echoed challenge == stored challenge from step 1 +Server → empty → Client +``` + +**Detection**: `sniUsed` per-connection flag. Non-empty hello allowed only when `sniUsed`. Empty hello with SNI → standard handshake. + +**Why both steps 3 and 4**: Native clients verify `signedPubKey` using the TLS peer certificate (`serverKey` from `getServerVerifyKey`), which is the XFTP identity cert in non-SNI connections — TLS provides this binding. Web clients cannot access TLS peer certificate data (browser API limitation; TLS presents the web CA cert but provides no API to extract it). So web clients must verify at the application layer using `authPubKey.certChain`, which always contains the XFTP identity chain regardless of which cert TLS used. Step 3 proves the server holds its identity key *right now* (freshness via random challenge). Step 4 proves the DH session key was signed by the identity key holder (prevents MITM key substitution). Together they give web clients some assurance native clients get from TLS, except channel binding for commands. + +## 2. Type Changes — `src/Simplex/FileTransfer/Transport.hs` + +### `XFTPServerHandshake` (line 114) + +Add field: `webIdentityProof :: Maybe ByteString` — raw Ed448 signature bytes (114 bytes), or `Nothing` for standard handshake. No record needed — the cert chain is already in `authPubKey.certChain`. + +### `Encoding XFTPServerHandshake` (line 136) + +- `smpEncode`: append `smpEncode webIdentityProof` +- `smpP`: `Tail compat`, if non-empty `eitherToMaybe $ smpDecode compat` + +Backward compat: old clients ignore via `Tail _compat`; new client + old server → empty compat → `Nothing`. + +### `XFTPClientHandshake` (line 121) + +Add field: `webChallenge :: Maybe ByteString` + +### `Encoding XFTPClientHandshake` (line 128) + +Same `Tail compat` pattern as server handshake. + +### Export list + +Both types use `(..)` export — new fields auto-exported. + +## 3. Server Changes — `src/Simplex/FileTransfer/Server.hs` + +### `XFTPTransportRequest` (line 88) + +Add field: `sniUsed :: SNICredentialUsed` (`Bool` from `Transport.Server`). Add import. + +### `Handshake` (line 117) + +`HandshakeSent C.PrivateKeyX25519` → `HandshakeSent C.PrivateKeyX25519 (Maybe ByteString)` — stores 32-byte web challenge or `Nothing`. + +### `runServer` handler (line 145–161) + +- Pass `sniUsed` into request construction (line 154) +- SNI-first routing: when `sniUsed`, always route to `xftpServerHandshakeV1` (web ALPN `h2` would otherwise fall to `_` catch-all) + +### `xftpServerHandshakeV1` (line 162) + +- Destructure `sniUsed` from request +- Match `HandshakeSent pk challenge_` → `processClientHandshake pk challenge_` + +### `processHello` (line 171) + +- Branch `(sniUsed, B.null bodyHead)`: + - `(_, True)` → standard: `challenge_ = Nothing` + - `(True, False)` → web: unpad, verify 32 bytes, `challenge_ = Just` + - `(False, False)` → `throwE HANDSHAKE` +- Store: `HandshakeSent pk challenge_` +- Compute: `webIdentityProof = C.signatureBytes . C.sign serverSignKey . (<> sessionId) <$> challenge_` +- Construct `XFTPServerHandshake` with `webIdentityProof` + +### `processClientHandshake` (line 183) + +- Accept `challenge_` parameter +- Decode `webChallenge` from `XFTPClientHandshake` +- Add: `unless (challenge_ == webChallenge) $ throwE HANDSHAKE` + (standard: both `Nothing` → passes) + +## 4. Native Client — `src/Simplex/FileTransfer/Client.hs` + +### `xftpClientHandshakeV1` (line 142) + +Add `webChallenge = Nothing` in `sendClientHandshake` call. + +No other changes — parser handles new fields via `Tail`, native client ignores `webIdentityProof`. + +## 5. TypeScript Changes (DONE except Ed448) + +Sections 5.1 and 5.2 are implemented. Section 5.3 needs Ed448 support. + +## 10. Ed448 Support via `@noble/curves` + +**Problem**: Production servers use Ed448 certificates (default). `identity.ts` only supports Ed25519 via libsodium. libsodium has no Ed448 support and never will. + +**Solution**: Add `@noble/curves` dependency for Ed448 verification only. All other crypto stays with libsodium. + +### 10.1 `xftp-web/package.json` — Add dependency + +```json +"dependencies": { + "libsodium-wrappers-sumo": "^0.7.13", + "@noble/curves": "^1.9.7" +} +``` + +Use v1.x (supports both CJS and ESM). v2.x is ESM-only with `.js` extension requirement. + +### 10.2 `xftp-web/src/crypto/keys.ts` — Ed448 DER constants and decode + +Add Ed448 SPKI DER prefix (12 bytes, same prefix length as Ed25519): +``` +30 43 30 05 06 03 2b 65 71 03 3a 00 +``` + +| Property | Ed25519 | Ed448 | +|----------|---------|-------| +| OID | `2b 65 70` | `2b 65 71` | +| SPKI prefix | `30 2a ...` | `30 43 ...` | +| Raw key size | 32 bytes | 57 bytes | +| SPKI total | 44 bytes | 69 bytes | +| Signature size | 64 bytes | 114 bytes | + +New functions: +- `decodePubKeyEd448(der: Uint8Array): Uint8Array` — 69 bytes → 57 bytes raw +- `encodePubKeyEd448(raw: Uint8Array): Uint8Array` — 57 bytes → 69 bytes DER +- `verifyEd448(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean` — uses `ed448.verify(sig, msg, publicKey)` from `@noble/curves/ed448` + +Note: `@noble/curves` parameter order is `(signature, message, publicKey)`, not `(publicKey, signature, message)`. + +### 10.3 `xftp-web/src/crypto/identity.ts` — Algorithm-agnostic verification + +Replace `extractCertEd25519Key` + hardcoded Ed25519 `verify` with algorithm detection: + +1. `extractCertPublicKeyInfo(certDer)` → SPKI DER (already exists, works for any algorithm) +2. Detect algorithm from SPKI: byte at offset 8 is `0x70` (Ed25519) or `0x71` (Ed448) +3. Extract raw key with appropriate decoder +4. Verify signatures with appropriate function + +```typescript +type CertKeyAlgorithm = 'ed25519' | 'ed448' + +function detectKeyAlgorithm(spki: Uint8Array): CertKeyAlgorithm { + if (spki.length === 44 && spki[8] === 0x70) return 'ed25519' + if (spki.length === 69 && spki[8] === 0x71) return 'ed448' + throw new Error("unsupported certificate key algorithm") +} +``` + +`verifyIdentityProof` changes: +- Extract SPKI from leaf cert +- Detect algorithm → choose `decodePubKeyEd25519`/`decodePubKeyEd448` and `verify`/`verifyEd448` +- Both challenge signature and DH key signature use the same leaf key + algorithm + +Remove `extractCertEd25519Key` (replaced by generic path). Keep `extractCertPublicKeyInfo` (already generic). + +### 10.4 `xftp-web/src/protocol/handshake.ts` — Comment update + +`SignedKey.signature` comment: "raw Ed25519 signature bytes (64 bytes)" → "raw signature bytes (Ed25519: 64, Ed448: 114)" + +### 10.5 Tests — `tests/XFTPWebTests.hs` + +**Integration test**: Switch from `withXFTPServerEd25519SNI` (Ed25519 fixtures) to `withXFTPServerSNI` (default Ed448 fixtures). Update fingerprint source from `tests/fixtures/ed25519/ca.crt` to `tests/fixtures/ca.crt`. + +Optionally add a second integration test with Ed25519 to cover both paths, or rely on existing unit tests for Ed25519 coverage. + +### 10.6 Implementation order + +1. `npm install @noble/curves` in `xftp-web/` +2. `keys.ts` — Ed448 constants, decode, encode, verifyEd448 +3. `identity.ts` — algorithm detection, generic verification +4. `handshake.ts` — comment fix +5. `XFTPWebTests.hs` — switch integration test to Ed448 +6. Build TS + run all tests + +## 6. Haskell Integration Test — `tests/XFTPServerTests.hs` + +Add `testWebHandshake` to "XFTP SNI and CORS" describe block. + +1. `withXFTPServerSNI` — server with web credentials +2. Connect with SNI + `h2` ALPN +3. Send padded 32-byte challenge +4. Decode `XFTPServerHandshake`, assert `webIdentityProof` is `Just` +5. `chainIdCaCerts` on `authPubKey.certChain` → `CCValid {leafCert, idCert}` +6. Verify `SHA-256(idCert) == keyHash` +7. Extract `leafCert` public key, verify challenge signature +8. Verify `signedPubKey` signature using `leafCert` key (DH key auth) +9. Send `XFTPClientHandshake` with `webChallenge = Just challenge` +10. Assert empty response + +Imports: `XFTPServerHandshake (..)`, `XFTPClientHandshake (..)`, `ChainCertificates (..)`, `chainIdCaCerts`. + +## 7. TS Tests — `tests/XFTPWebTests.hs` + +### Unit tests + +- **`decodeServerHandshake` with proof**: Haskell-encode with `Just sigBytes`, TS-decode, verify bytes match. +- **`encodeClientHandshake` with challenge**: TS-encode, compare with Haskell-encoded. +- **`chainIdCaCerts`**: 2/3/4-cert chains return correct positions. +- **`caFingerprint` (fixed)**: matches `sha256(idCert)` for 2 and 3-cert chains. + +### Integration test + +Node.js inline script against `withXFTPServerSNI`: +1. Connect with SNI via `http2.connect` +2. Send padded challenge, decode `XFTPServerHandshake` with TS +3. `verifyIdentityProof` — full chain validation + challenge sig + DH key sig +4. Send client handshake with echoed challenge +5. Assert empty response + +## 8. Implementation Order + +1. `Transport.hs` — `Maybe` fields + encoding instances +2. `Server.hs` — `sniUsed`, challenge in `Handshake`, `processHello`, `processClientHandshake`, SNI routing +3. `Client.hs` — `webChallenge = Nothing` +4. Build: `cabal build --ghc-options -O0` +5. Run existing SNI/CORS tests +6. `XFTPServerTests.hs` — `testWebHandshake` +7. `handshake.ts` — types, decoding, `chainIdCaCerts`, fix `caFingerprint` +8. `crypto/identity.ts` — Node.js verification functions +9. `XFTPWebTests.hs` — unit + integration tests +10. Build TS + run all tests + +## 9. Verification + +```bash +cd xftp-web && npm install && npm run build && cd .. +cabal test --ghc-options=-O0 --test-option='--match=/XFTP/XFTP server/XFTP SNI and CORS/' --test-show-details=streaming +cabal test --ghc-options=-O0 --test-option='--match=/XFTP Web Client/' --test-show-details=streaming +``` diff --git a/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md b/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md new file mode 100644 index 0000000000..2e08a2efb7 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md @@ -0,0 +1,208 @@ +# Plan: Browser ↔ Haskell File Transfer Tests + +## Table of Contents +1. Goal +2. Current State +3. Implementation +4. Success Criteria +5. Files +6. Order + +## 1. Goal +Run browser upload/download tests in headless Chromium via Vitest, proving fetch-based transport works in real browser environment. + +## 2. Current State +- `client.ts`: Transport abstraction done — http2 for Node, fetch for browser ✓ +- `agent.ts`: Uses `node:crypto` (randomBytes) and `node:zlib` (deflateRawSync/inflateRawSync) — **won't run in browser** +- `XFTPWebTests.hs`: Cross-language tests exist (Haskell calls TS via Node.js) ✓ + +## 3. Implementation + +### 3.1 Make agent.ts isomorphic + +| Current (Node.js only) | Isomorphic replacement | +|------------------------|------------------------| +| `import crypto from "node:crypto"` | Remove import | +| `import zlib from "node:zlib"` | `import pako from "pako"` | +| `crypto.randomBytes(32)` | `crypto.getRandomValues(new Uint8Array(32))` | +| `zlib.deflateRawSync(buf)` | `pako.deflateRaw(buf)` | +| `zlib.inflateRawSync(buf)` | `pako.inflateRaw(buf)` | + +Note: `crypto.getRandomValues` available in both browser and Node.js (globalThis.crypto). + +### 3.2 Vitest browser mode setup + +`package.json` additions: +```json +"devDependencies": { + "vitest": "^3.0.0", + "@vitest/browser": "^3.0.0", + "playwright": "^1.50.0", + "@types/pako": "^2.0.3" +}, +"dependencies": { + "pako": "^2.1.0" +} +``` + +`vitest.config.ts`: +```typescript +import {defineConfig} from 'vitest/config' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' + +// Compute fingerprint from ca.crt (same as Haskell's loadFileFingerprint) +const caCert = readFileSync('../tests/fixtures/ca.crt') +const fingerprint = createHash('sha256').update(caCert).digest('base64url') +const serverAddr = `xftp://${fingerprint}@localhost:7000` + +export default defineConfig({ + define: { + 'import.meta.env.XFTP_SERVER': JSON.stringify(serverAddr) + }, + test: { + browser: { + enabled: true, + provider: 'playwright', + instances: [{browser: 'chromium'}], + headless: true, + providerOptions: { + launch: {ignoreHTTPSErrors: true} + } + }, + globalSetup: './test/globalSetup.ts' + } +}) +``` + +### 3.3 Server startup + +`test/globalSetup.ts`: +```typescript +import {spawn, ChildProcess} from 'child_process' +import {resolve, join} from 'path' +import {mkdtempSync, writeFileSync, copyFileSync} from 'fs' +import {tmpdir} from 'os' + +let server: ChildProcess | null = null + +export async function setup() { + const fixtures = resolve(__dirname, '../../tests/fixtures') + + // Create temp directories + const cfgDir = mkdtempSync(join(tmpdir(), 'xftp-cfg-')) + const logDir = mkdtempSync(join(tmpdir(), 'xftp-log-')) + const filesDir = mkdtempSync(join(tmpdir(), 'xftp-files-')) + + // Copy certificates to cfgDir (xftp-server expects ca.crt, server.key, server.crt there) + copyFileSync(join(fixtures, 'ca.crt'), join(cfgDir, 'ca.crt')) + copyFileSync(join(fixtures, 'server.key'), join(cfgDir, 'server.key')) + copyFileSync(join(fixtures, 'server.crt'), join(cfgDir, 'server.crt')) + + // Write INI config file + const iniContent = `[STORE_LOG] +enable: off + +[TRANSPORT] +host: localhost +port: 7000 + +[FILES] +path: ${filesDir} + +[WEB] +cert: ${join(fixtures, 'web.crt')} +key: ${join(fixtures, 'web.key')} +` + writeFileSync(join(cfgDir, 'file-server.ini'), iniContent) + + // Spawn xftp-server with env vars + server = spawn('cabal', ['exec', 'xftp-server', '--', 'start'], { + env: { + ...process.env, + XFTP_SERVER_CFG_PATH: cfgDir, + XFTP_SERVER_LOG_PATH: logDir + }, + stdio: ['ignore', 'pipe', 'pipe'] + }) + + // Wait for "Listening on port 7000..." + await waitForServerReady(server) +} + +export async function teardown() { + server?.kill('SIGTERM') + await new Promise(r => setTimeout(r, 500)) +} + +function waitForServerReady(proc: ChildProcess): Promise { + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error('Server start timeout')), 15000) + proc.stdout?.on('data', (data: Buffer) => { + if (data.toString().includes('Listening on port')) { + clearTimeout(timeout) + resolve() + } + }) + proc.stderr?.on('data', (data: Buffer) => { + console.error('[xftp-server]', data.toString()) + }) + proc.on('error', reject) + proc.on('exit', (code) => { + clearTimeout(timeout) + if (code !== 0) reject(new Error(`Server exited with code ${code}`)) + }) + }) +} +``` + +Server env vars (from `apps/xftp-server/Main.hs` + `getEnvPath`): +- `XFTP_SERVER_CFG_PATH` — directory containing `file-server.ini` and certs (`ca.crt`, `server.key`, `server.crt`) +- `XFTP_SERVER_LOG_PATH` — directory for logs + +### 3.4 Browser test + +`test/browser.test.ts`: +```typescript +import {test, expect} from 'vitest' +import {encryptFileForUpload, uploadFile, downloadFile} from '../src/agent.js' +import {parseXFTPServer} from '../src/protocol/address.js' + +const server = parseXFTPServer(import.meta.env.XFTP_SERVER) + +test('browser upload + download round-trip', async () => { + const data = new Uint8Array(50000) + crypto.getRandomValues(data) + const encrypted = encryptFileForUpload(data, 'test.bin') + const {rcvDescription} = await uploadFile(server, encrypted) + const {content} = await downloadFile(rcvDescription) + expect(content).toEqual(data) +}) +``` + +## 4. Success Criteria + +1. `npm run build` — agent.ts compiles without node: imports +2. `cabal test --test-option='--match=/XFTP Web Client/'` — existing Node.js tests still pass +3. `npm run test:browser` — browser round-trip test passes in headless Chromium + +## 5. Files to Create/Modify + +**Modify:** +- `xftp-web/package.json` — add vitest, @vitest/browser, playwright, pako, @types/pako +- `xftp-web/src/agent.ts` — replace node:crypto, node:zlib with isomorphic alternatives + +**Create:** +- `xftp-web/vitest.config.ts` — browser mode config +- `xftp-web/test/globalSetup.ts` — xftp-server lifecycle +- `xftp-web/test/browser.test.ts` — browser round-trip test + +## 6. Order of Implementation + +1. **Add pako dependency** — `npm install pako @types/pako` +2. **Make agent.ts isomorphic** — replace node:crypto, node:zlib +3. **Verify Node.js tests pass** — `cabal test --test-option='--match=/XFTP Web Client/'` +4. **Set up Vitest** — add devDeps, create vitest.config.ts +5. **Create globalSetup.ts** — write INI config, spawn xftp-server +6. **Write browser test** — upload + download round-trip +7. **Verify browser test passes** — `npm run test:browser` diff --git a/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md new file mode 100644 index 0000000000..41915bf64d --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md @@ -0,0 +1,920 @@ +# Browser Transport & Web Worker Architecture + +## TOC + +1. Executive Summary +2. Transport: fetch() API +3. Architecture: Environment Abstraction +4. Web Worker Implementation +5. OPFS Implementation +6. Implementation Plan +7. Testing Strategy + +## 1. Executive Summary + +Adapt `client.ts` from `node:http2` to `fetch()` API for isomorphic Node.js/browser support. Add environment abstraction layer so the same upload/download pipeline works with or without Web Workers and with or without OPFS. In browsers, crypto runs in a Web Worker to keep UI responsive; in Node.js tests, crypto runs directly. + +**Key architectural constraint:** Existing crypto functions (`encryptFile`, `decryptChunks`, etc.) remain unchanged. The abstraction layer wraps them, choosing execution context (direct vs Worker) and storage (memory vs OPFS) based on environment. + +**Scope:** +- Replace `node:http2` with `fetch()` in `client.ts` +- Add `CryptoBackend` abstraction with three implementations +- Create Web Worker that calls existing crypto functions +- Add OPFS storage for large files in browser + +**Out of scope:** Web page UI (Phase 5 in main RFC). + +## 2. Transport: fetch() API + +### 2.1 Current State + +`client.ts` uses `node:http2`: +```typescript +import http2 from "node:http2" +const session = http2.connect(url) +const stream = session.request({':method': 'POST', ':path': '/'}) +stream.write(commandBlock) +stream.end(chunkData) +``` + +### 2.2 Target State + +Isomorphic `fetch()` (Node.js 18+ and browsers): +```typescript +const response = await fetch(url, { + method: 'POST', + body: concatStreams(commandBlock, chunkData), + duplex: 'half', // Required for streaming request body +}) +const reader = response.body!.getReader() +``` + +### 2.3 Key Differences + +| Aspect | node:http2 | fetch() | +|--------|-----------|---------| +| Session management | Explicit `session.connect()` / `session.close()` | Per-request (HTTP/2 connection reuse is automatic) | +| Streaming upload | `stream.write()` chunks | `ReadableStream` body + `duplex: 'half'` | +| Streaming download | `stream.on('data')` | `response.body.getReader()` | +| Connection pooling | Manual | Automatic per origin | + +### 2.4 API Changes + +```typescript +// Before (node:http2) +export interface XFTPClient { + session: http2.ClientHttp2Session + thParams: THParams + server: XFTPServer +} + +// After (fetch) +export interface XFTPClient { + baseUrl: string // "https://host:port" + thParams: THParams + server: XFTPServer +} +``` + +`connectXFTP()` performs handshake via fetch, returns `XFTPClient` with `baseUrl`. +Subsequent commands use `fetch(client.baseUrl, ...)`. + +### 2.5 Handshake via fetch() + +**TLS session binding:** Multiple fetch() requests to the same origin reuse the HTTP/2 connection, which means they share the same TLS session. The server's `sessionId` (derived from TLS channel binding) remains consistent across the handshake round-trips and subsequent commands. + +```typescript +async function connectXFTP(server: XFTPServer): Promise { + const baseUrl = `https://${server.host}:${server.port}` + + // Round-trip 1: challenge → server handshake + identity proof + const challenge = crypto.getRandomValues(new Uint8Array(32)) + const req1 = pad(encodeWebClientHello(challenge), xftpBlockSize) + const resp1 = await fetch(baseUrl, {method: 'POST', body: req1}) + + const reader = resp1.body!.getReader() + const serverBlock = await readExactly(reader, xftpBlockSize) + const serverHs = decodeServerHandshake(unPad(serverBlock)) + const proofBody = await readRemaining(reader) + verifyIdentityProof(server.keyHash, challenge, serverHs.sessionId, proofBody) + + // Round-trip 2: client handshake → server ack + const clientHs = encodeClientHandshake({xftpVersion: 3, keyHash: server.keyHash}) + const req2 = pad(clientHs, xftpBlockSize) + await fetch(baseUrl, {method: 'POST', body: req2}) + + return {baseUrl, thParams: {sessionId: serverHs.sessionId, ...}, server} +} +``` + +### 2.6 Command Execution + +```typescript +async function sendXFTPCommand( + client: XFTPClient, + key: Uint8Array, + entityId: Uint8Array, + cmd: Uint8Array, + chunkData?: Uint8Array +): Promise<{response: Uint8Array, body?: ReadableStream}> { + const block = xftpEncodeAuthTransmission(client.thParams, key, entityId, cmd) + + const reqBody = chunkData + ? concatBytes(block, chunkData) + : block + + const resp = await fetch(client.baseUrl, { + method: 'POST', + body: reqBody, + duplex: 'half', + }) + + const reader = resp.body!.getReader() + const responseBlock = await readExactly(reader, xftpBlockSize) + const parsed = xftpDecodeTransmission(responseBlock) + + // For FGET: remaining body is encrypted chunk + const hasMore = await peekReader(reader) + return { + response: parsed, + body: hasMore ? wrapAsStream(reader) : undefined + } +} +``` + +## 3. Architecture: Environment Abstraction + +### 3.1 Core Principle + +**Existing crypto functions remain unchanged.** The functions `encryptFile()`, `decryptChunks()`, `sha512()`, etc. in `crypto/file.ts` and `crypto/digest.ts` are pure computation — they take input bytes and produce output bytes. They have no knowledge of Workers, OPFS, or execution context. + +The abstraction layer sits between `agent.ts` (upload/download orchestration) and these crypto functions: + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ agent.ts (upload/download orchestration) │ +│ - Unchanged logic: encrypt → chunk → upload → build description │ +│ - Calls CryptoBackend interface, not crypto functions directly │ +├─────────────────────────────────────────────────────────────────────┤ +│ CryptoBackend interface (env.ts) │ +│ - Abstract interface for encrypt/decrypt/readChunk/writeChunk │ +│ - Factory function selects implementation based on environment │ +├──────────────┬──────────────────────┬───────────────────────────────┤ +│ DirectMemory │ WorkerMemory │ WorkerOPFS │ +│ Backend │ Backend │ Backend │ +│ (Node.js) │ (Browser, ≤50MB) │ (Browser, >50MB) │ +├──────────────┼──────────────────────┼───────────────────────────────┤ +│ Calls crypto │ Posts to Worker, │ Posts to Worker, │ +│ functions │ Worker calls crypto │ Worker calls crypto, │ +│ directly │ functions, returns │ streams through OPFS │ +│ │ via postMessage │ │ +├──────────────┴──────────────────────┴───────────────────────────────┤ +│ crypto/file.ts, crypto/digest.ts (unchanged) │ +│ - encryptFile(), decryptChunks(), sha512(), etc. │ +│ - Pure functions, no environment dependencies │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +### 3.2 CryptoBackend Interface + +```typescript +// env.ts +export interface CryptoBackend { + // Encrypt file, store result (in memory or OPFS depending on backend) + encrypt( + data: Uint8Array, + fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + + // Decrypt from stored encrypted data + decrypt( + key: Uint8Array, + nonce: Uint8Array, + size: number, + onProgress?: (done: number, total: number) => void + ): Promise + + // Read chunk from stored encrypted data (for upload) + readChunk(offset: number, size: number): Promise + + // Write chunk to storage (for download, before decrypt) + writeChunk(data: Uint8Array, offset: number): Promise + + // Clean up temporary storage + cleanup(): Promise +} + +export interface EncryptResult { + digest: Uint8Array // SHA-512 of encrypted data + key: Uint8Array // Generated encryption key + nonce: Uint8Array // Generated nonce + chunkSizes: number[] // Chunk sizes for upload + totalSize: number // Total encrypted size +} + +export interface DecryptResult { + header: FileHeader // Extracted file header (fileName, etc.) + content: Uint8Array // Decrypted file content +} +``` + +### 3.3 Backend Implementations + +**DirectMemoryBackend** (Node.js): +```typescript +class DirectMemoryBackend implements CryptoBackend { + private encryptedData: Uint8Array | null = null + + async encrypt(data: Uint8Array, fileName: string, onProgress?): Promise { + const key = randomBytes(32) + const nonce = randomBytes(24) + // Call existing crypto function directly + this.encryptedData = encryptFile(data, fileName, key, nonce, onProgress) + const digest = sha512(this.encryptedData) + const chunkSizes = prepareChunkSizes(this.encryptedData.length) + return { digest, key, nonce, chunkSizes, totalSize: this.encryptedData.length } + } + + async decrypt(key, nonce, size, onProgress): Promise { + // Call existing crypto function directly + return decryptChunks([this.encryptedData!], key, nonce, size, onProgress) + } + + async readChunk(offset: number, size: number): Promise { + return this.encryptedData!.slice(offset, offset + size) + } + + async writeChunk(data: Uint8Array, offset: number): Promise { + if (!this.encryptedData) this.encryptedData = new Uint8Array(offset + data.length) + this.encryptedData.set(data, offset) + } + + async cleanup(): Promise { + this.encryptedData = null + } +} +``` + +**WorkerMemoryBackend** and **WorkerOPFSBackend** are similar but post messages to a Web Worker instead of calling crypto directly. The Worker then calls the same `encryptFile()`, `decryptChunks()` functions. See §4 for Worker implementation details. + +### 3.4 Factory Function + +```typescript +// env.ts +export function createCryptoBackend(fileSize: number): CryptoBackend { + const hasWorker = typeof Worker !== 'undefined' + const hasOPFS = typeof navigator?.storage?.getDirectory !== 'undefined' + const isLargeFile = fileSize > 50 * 1024 * 1024 + + if (hasWorker && hasOPFS && isLargeFile) { + return new WorkerOPFSBackend() // Browser + large file + } else if (hasWorker) { + return new WorkerMemoryBackend() // Browser + small file + } else { + return new DirectMemoryBackend() // Node.js + } +} +``` + +### 3.5 Usage in agent.ts + +```typescript +// agent.ts - upload orchestration (simplified) +export async function uploadFile( + server: XFTPServer, + fileData: Uint8Array, + fileName: string, + onProgress?: ProgressCallback +): Promise { + // Create backend based on environment + const backend = createCryptoBackend(fileData.length) + + try { + // Encrypt (runs in Worker in browser, directly in Node) + const enc = await backend.encrypt(fileData, fileName, onProgress) + + // Upload chunks (same code regardless of backend) + const client = await connectXFTP(server) + const sentChunks = [] + let offset = 0 + for (const size of enc.chunkSizes) { + const chunk = await backend.readChunk(offset, size) + const sent = await uploadChunk(client, chunk, enc.digest) + sentChunks.push(sent) + offset += size + } + + // Build description and URI + const fd = buildFileDescription(enc, sentChunks) + return encodeFileDescriptionURI(fd) + } finally { + await backend.cleanup() + } +} +``` + +The key point: `uploadFile()` logic is identical regardless of whether crypto runs in a Worker or directly. The `CryptoBackend` abstraction hides that detail. + +### 3.6 Why This Matters for Testing + +- **Layer 1 tests** (per-function): Call `encryptFile()`, `decryptChunks()` directly via Node — unchanged +- **Layer 2 tests** (full flow): Call `uploadFile()`, `downloadFile()` in Node — uses `DirectMemoryBackend`, same code path as browser except for Worker +- **Layer 3 tests** (browser): Call `uploadFile()`, `downloadFile()` in Playwright — uses `WorkerMemoryBackend` or `WorkerOPFSBackend` + +All three layers exercise the same crypto functions. The only difference is execution context. + +## 4. Web Worker Implementation + +### 4.1 Why Web Worker + +File encryption (XSalsa20-Poly1305) is sequential and CPU-bound: +- 100 MB file ≈ 1-2 seconds of continuous computation +- Running on main thread blocks UI (no progress updates, frozen page) +- Chunking into async microtasks adds complexity and still causes jank + +Web Worker runs crypto in parallel thread. Main thread stays responsive. + +### 4.2 Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Main Thread │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ UI (upload/ │ │ Progress │ │ Network (fetch) │ │ +│ │ download) │ │ display │ │ │ │ +│ └──────┬──────┘ └──────▲──────┘ └──────────▲──────────┘ │ +│ │ │ │ │ +│ │ postMessage │ progress │ encrypted │ +│ ▼ │ events │ chunks │ +├─────────────────────────────────────────────────────────────┤ +│ Web Worker │ +│ ┌─────────────────────────────────────────────────────────┐│ +│ │ Crypto Pipeline ││ +│ │ - encryptFile() with progress callbacks ││ +│ │ - decryptChunks() with progress callbacks ││ +│ │ - OPFS read/write for temp storage ││ +│ └─────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────┘ +``` + +### 4.3 Message Protocol + +**Main → Worker:** + +```typescript +type WorkerRequest = + // Encrypt file, store result in OPFS (large) or memory (small) + | {type: 'encrypt', file: File, fileName: string, useOPFS: boolean} + // Read encrypted chunk from OPFS for upload + | {type: 'readChunk', offset: number, size: number} + // Write downloaded chunk to OPFS for later decryption + | {type: 'writeChunk', data: ArrayBuffer, offset: number} + // Decrypt from OPFS or provided chunks + | {type: 'decrypt', key: Uint8Array, nonce: Uint8Array, size: number, chunks?: ArrayBuffer[]} + // Delete OPFS temp files + | {type: 'cleanup'} + | {type: 'cancel'} +``` + +**Worker → Main:** + +```typescript +type WorkerResponse = + | {type: 'progress', phase: 'encrypt' | 'decrypt', done: number, total: number} + // For OPFS: encData is empty, data lives in OPFS temp file + | {type: 'encrypted', encData: ArrayBuffer | null, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array, chunkSizes: number[]} + | {type: 'chunk', data: ArrayBuffer} // Response to readChunk + | {type: 'chunkWritten'} // Response to writeChunk + | {type: 'decrypted', header: FileHeader, content: ArrayBuffer} + | {type: 'cleaned'} // Response to cleanup + | {type: 'error', message: string} +``` + +### 4.4 Worker Implementation + +```typescript +// crypto.worker.ts +import {encryptFile, encryptFileStreaming, decryptChunks, decryptFromOPFS} from './crypto/file.js' +import {sha512} from './crypto/digest.js' +import {prepareChunkSizes} from './protocol/chunks.js' + +let opfsHandle: FileSystemSyncAccessHandle | null = null + +self.onmessage = async (e: MessageEvent) => { + const req = e.data + + if (req.type === 'encrypt') { + const key = crypto.getRandomValues(new Uint8Array(32)) + const nonce = crypto.getRandomValues(new Uint8Array(24)) + + if (req.useOPFS) { + // Large file: stream through OPFS to avoid memory pressure + const root = await navigator.storage.getDirectory() + const fileHandle = await root.getFileHandle('encrypted-temp', {create: true}) + opfsHandle = await fileHandle.createSyncAccessHandle() + + // Stream encrypt: read 64KB from File, encrypt, write to OPFS + const digest = await encryptFileStreaming( + req.file, + req.fileName, + key, + nonce, + opfsHandle, + (done, total) => self.postMessage({type: 'progress', phase: 'encrypt', done, total}) + ) + + const encSize = opfsHandle.getSize() + const chunkSizes = prepareChunkSizes(encSize) + + self.postMessage({ + type: 'encrypted', + encData: null, // Data in OPFS, not memory + digest, key, nonce, chunkSizes + }) + } else { + // Small file: in-memory is fine + const source = new Uint8Array(await req.file.arrayBuffer()) + const encData = encryptFile(source, req.fileName, key, nonce, (done, total) => { + self.postMessage({type: 'progress', phase: 'encrypt', done, total}) + }) + + const digest = sha512(encData) + const chunkSizes = prepareChunkSizes(encData.length) + + self.postMessage({ + type: 'encrypted', + encData: encData.buffer, + digest, key, nonce, chunkSizes + }, [encData.buffer]) + } + } + + if (req.type === 'readChunk') { + // Read chunk from OPFS for upload + const chunk = new Uint8Array(req.size) + opfsHandle!.read(chunk, {at: req.offset}) + self.postMessage({type: 'chunk', data: chunk.buffer}, [chunk.buffer]) + } + + if (req.type === 'writeChunk') { + // Write downloaded chunk to OPFS + if (!opfsHandle) { + const root = await navigator.storage.getDirectory() + const fileHandle = await root.getFileHandle('download-temp', {create: true}) + opfsHandle = await fileHandle.createSyncAccessHandle() + } + opfsHandle.write(new Uint8Array(req.data), {at: req.offset}) + self.postMessage({type: 'chunkWritten'}) + } + + if (req.type === 'decrypt') { + let result + if (req.chunks) { + // Small file: chunks provided in memory + const chunks = req.chunks.map(b => new Uint8Array(b)) + result = decryptChunks(chunks, req.key, req.nonce, req.size, (done, total) => { + self.postMessage({type: 'progress', phase: 'decrypt', done, total}) + }) + } else { + // Large file: read from OPFS + result = decryptFromOPFS(opfsHandle!, req.key, req.nonce, req.size, (done, total) => { + self.postMessage({type: 'progress', phase: 'decrypt', done, total}) + }) + } + + self.postMessage({ + type: 'decrypted', + header: result.header, + content: result.content.buffer + }, [result.content.buffer]) + } + + if (req.type === 'cleanup') { + if (opfsHandle) { + opfsHandle.close() + opfsHandle = null + } + const root = await navigator.storage.getDirectory() + try { await root.removeEntry('encrypted-temp') } catch {} + try { await root.removeEntry('download-temp') } catch {} + self.postMessage({type: 'cleaned'}) + } +} +``` + +### 4.5 Main Thread Wrapper + +```typescript +// crypto-worker.ts (main thread) +export class CryptoWorker { + private worker: Worker + private pending: Map = new Map() + private onProgress?: (done: number, total: number) => void + + constructor() { + this.worker = new Worker(new URL('./crypto.worker.js', import.meta.url), {type: 'module'}) + this.worker.onmessage = (e) => this.handleMessage(e.data) + } + + async encrypt(file: File, onProgress?: (done: number, total: number) => void): Promise { + const useOPFS = file.size > 50 * 1024 * 1024 // 50 MB threshold + return new Promise((resolve, reject) => { + this.pending.set('encrypt', {resolve, reject}) + this.onProgress = onProgress + this.worker.postMessage({type: 'encrypt', file, fileName: file.name, useOPFS}) + }) + } + + async decrypt( + chunks: Uint8Array[], + key: Uint8Array, + nonce: Uint8Array, + size: number, + onProgress?: (done: number, total: number) => void + ): Promise { + return new Promise((resolve, reject) => { + this.pending.set('decrypt', {resolve, reject}) + this.onProgress = onProgress + this.worker.postMessage({ + type: 'decrypt', + chunks: chunks.map(c => c.buffer), + key, nonce, size + }, chunks.map(c => c.buffer)) + }) + } + + private handleMessage(msg: WorkerResponse) { + if (msg.type === 'progress') { + this.onProgress?.(msg.done, msg.total) + } else if (msg.type === 'encrypted') { + this.pending.get('encrypt')?.resolve({ + encData: msg.encData ? new Uint8Array(msg.encData) : null, // null when using OPFS + digest: msg.digest, + key: msg.key, + nonce: msg.nonce, + chunkSizes: msg.chunkSizes + }) + } else if (msg.type === 'decrypted') { + this.pending.get('decrypt')?.resolve({ + header: msg.header, + content: new Uint8Array(msg.content) + }) + } else if (msg.type === 'error') { + // Reject all pending + for (const p of this.pending.values()) p.reject(new Error(msg.message)) + } + } +} +``` + +## 5. OPFS Implementation + +### 5.1 Purpose + +For files approaching 100 MB, holding encrypted data in memory while uploading creates memory pressure. OPFS provides temporary file storage: +- Write encrypted data to OPFS as it's generated +- Read chunks from OPFS for upload +- Delete after upload completes + +### 5.2 When to Use + +- Files > 50 MB: Use OPFS +- Files ≤ 50 MB: In-memory (simpler, no OPFS overhead) + +Threshold is configurable. + +### 5.3 OPFS API + +```typescript +// In Web Worker (synchronous API for performance) +const root = await navigator.storage.getDirectory() +const fileHandle = await root.getFileHandle('encrypted-temp', {create: true}) +const accessHandle = await fileHandle.createSyncAccessHandle() + +// Write encrypted chunks as they're generated +accessHandle.write(encryptedChunk, {at: offset}) + +// Read chunk for upload +const chunk = new Uint8Array(chunkSize) +accessHandle.read(chunk, {at: chunkOffset}) + +// Cleanup +accessHandle.close() +await root.removeEntry('encrypted-temp') +``` + +### 5.4 Upload Flow with OPFS + +``` +1. Main: user drops file +2. Main → Worker: {type: 'encrypt', file} +3. Worker: + - Create OPFS temp file + - Encrypt 64KB at a time, write to OPFS + - Post progress every 64KB + - Compute digest + - Return {digest, key, nonce, chunkSizes} (data stays in OPFS) +4. Main: for each chunk: + - Main → Worker: {type: 'readChunk', offset, size} + - Worker: read from OPFS, return chunk + - Main: upload chunk via fetch() +5. Main → Worker: {type: 'cleanup'} +6. Worker: delete OPFS temp file +``` + +### 5.5 Download Flow with OPFS + +``` +1. Main: parse URL, get FileDescription +2. Main: for each chunk: + - Download via fetch() + - Main → Worker: {type: 'writeChunk', data, offset} + - Worker: write to OPFS temp file +3. Main → Worker: {type: 'decrypt', key, nonce, size} +4. Worker: + - Read from OPFS + - Decrypt, verify auth tag + - Return {header, content} +5. Main: trigger browser download +6. Main → Worker: {type: 'cleanup'} +``` + +## 6. Implementation Plan + +### 6.1 Phase A: fetch() Transport + +**Goal:** Replace `node:http2` with `fetch()` in `client.ts`. All existing Node.js tests pass. + +1. Rewrite `connectXFTP()` to use fetch() for handshake +2. Rewrite `sendXFTPCommand()` to use fetch() +3. Update `createXFTPChunk`, `uploadXFTPChunk`, `downloadXFTPChunk`, etc. +4. Remove `node:http2` import +5. Run existing Haskell integration tests — must pass + +**Files:** `client.ts` + +### 6.2 Phase B: Environment Abstraction + Web Worker + +**Goal:** Add `CryptoBackend` abstraction (§3) so the same code works in Node (direct) and browser (Worker). + +1. Create `env.ts` with `CryptoBackend` interface and `createCryptoBackend()` factory (as specified in §3) +2. Implement `DirectMemoryBackend` for Node.js +3. Create `crypto.worker.ts` that imports and calls existing crypto functions +4. Implement `WorkerMemoryBackend` for browser +5. Update `agent.ts` to use `createCryptoBackend()` instead of direct crypto calls +6. Existing tests pass (now using `DirectMemoryBackend`) + +**Files:** `env.ts`, `crypto.worker.ts`, `agent.ts` + +### 6.3 Phase C: OPFS Backend + +**Goal:** Large files (>50 MB) use OPFS for temp storage in browser. + +1. Implement `WorkerOPFSBackend` — uses OPFS sync API in worker +2. Add OPFS helpers in worker: read/write to temp file +3. Factory function now returns `WorkerOPFSBackend` for large files +4. Same `agent.ts` code works — only backend implementation differs + +**Files:** `env.ts`, `crypto.worker.ts` + +### 6.4 Phase D: Browser Testing + +**Goal:** Verify everything works in real browsers. + +1. Create minimal test HTML page +2. Test upload flow in Chrome, Firefox, Safari +3. Test download flow +4. Test progress reporting +5. Test cancellation +6. Test error handling (network failure, invalid file) + +## 7. Testing Strategy + +### 7.1 Test Layers + +The `CryptoBackend` abstraction (§3) enables testing at multiple levels without code duplication: + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Layer 3: Browser Integration (Playwright) │ +│ - Web Worker message passing │ +│ - OPFS read/write │ +│ - Progress UI updates │ +│ - Real browser fetch() with CORS │ +├─────────────────────────────────────────────────────────────────┤ +│ Layer 2: Full Flow (Haskell-driven, Node.js) │ +│ - fetch() transport against real xftp-server │ +│ - Upload: encrypt → chunk → upload → build description │ +│ - Download: parse → download → verify → decrypt │ +│ - Cross-language: TS upload ↔ Haskell download (and vice versa) │ +├─────────────────────────────────────────────────────────────────┤ +│ Layer 1: Per-Function (Haskell-driven, Node.js) │ +│ - 172 existing tests │ +│ - Byte-identical output vs Haskell functions │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 7.2 Layer 1: Per-Function Tests (Existing) + +Existing Haskell-driven tests in `XFTPWebTests.hs`. Each test calls a TypeScript function via Node and compares output with Haskell. + +```bash +cabal test --ghc-options -O0 --test-option='--match=/XFTP Web Client/' +``` + +All 172 tests must pass. No changes needed for browser transport work. + +### 7.3 Layer 2: Full Flow Tests (Node.js + fetch) + +Haskell-driven integration tests using Node.js native fetch(). These test the complete upload/download flow without Worker/OPFS. + +```haskell +-- XFTPWebTests.hs (extends existing test file) +it "fetch transport: upload and download round-trip" $ do + withXFTPServer testXFTPServerConfigSNI $ \server -> do + -- TypeScript uploads via fetch(), returns URI + uri <- jsOut $ callTS "src/agent" "uploadFileTest" serverAddrHex <> testFileHex + -- TypeScript downloads via fetch() + content <- jsOut $ callTS "src/agent" "downloadFileTest" uriHex + content `shouldBe` testFileContent + +it "fetch transport: TS upload, Haskell download" $ do + withXFTPServer testXFTPServerConfigSNI $ \server -> do + uri <- jsOut $ callTS "src/agent" "uploadFileTest" serverAddrHex <> testFileHex + -- Haskell agent downloads using existing xftp CLI pattern + outPath <- withAgent 1 agentCfg initAgentServers testDB $ \a -> do + rfId <- xftpReceiveFile' a 1 uri Nothing + waitRfDone a + content <- B.readFile outPath + content `shouldBe` testFileContent +``` + +**What this tests:** +- fetch() handshake (challenge-response, TLS session binding) +- fetch() command execution (FNEW, FPUT, FGET, FACK) +- Streaming request/response bodies +- Full encrypt → upload → download → decrypt flow + +**What this doesn't test:** +- Web Worker message passing +- OPFS storage +- Browser-specific fetch() behavior (CORS preflight, etc.) + +### 7.4 Layer 3: Browser Integration Tests (Playwright) + +Playwright tests run in real browsers, testing browser-specific functionality. + +**Test infrastructure:** + +``` +xftp-web/ +├── test/ +│ ├── browser.test.ts # Playwright test file +│ └── test-server.ts # Spawns xftp-server for tests +└── test-page/ + ├── index.html # Minimal test UI + └── test-harness.ts # Exposes test functions to window +``` + +**Running browser tests:** + +```bash +cd xftp-web +npm run test:browser # Spawns xftp-server, runs Playwright +``` + +**Test cases:** + +```typescript +// test/browser.test.ts +import { test, expect } from '@playwright/test' +import { spawn } from 'child_process' + +let serverProcess: ChildProcess + +test.beforeAll(async () => { + // Spawn xftp-server with SNI cert for browser TLS + serverProcess = spawn('xftp-server', ['start', '-c', 'test-config.ini']) + await waitForServer() +}) + +test.afterAll(async () => { + serverProcess.kill() +}) + +test('small file upload/download (in-memory)', async ({ page }) => { + await page.goto('/test-page/') + + const result = await page.evaluate(async () => { + const data = new Uint8Array(1024 * 1024) // 1 MB + crypto.getRandomValues(data) + const file = new File([data], 'small.bin') + + const uri = await window.xftp.uploadFile(file) + const downloaded = await window.xftp.downloadFile(uri) + + return { + uploadedSize: data.length, + downloadedSize: downloaded.length, + match: arraysEqual(data, downloaded), + usedOPFS: window.xftp.lastUploadUsedOPFS + } + }) + + expect(result.match).toBe(true) + expect(result.usedOPFS).toBe(false) // Small file, no OPFS +}) + +test('large file upload/download (OPFS)', async ({ page }) => { + await page.goto('/test-page/') + + const result = await page.evaluate(async () => { + const data = new Uint8Array(60 * 1024 * 1024) // 60 MB + crypto.getRandomValues(data) + const file = new File([data], 'large.bin') + + const uri = await window.xftp.uploadFile(file) + const downloaded = await window.xftp.downloadFile(uri) + + return { + match: arraysEqual(data, downloaded), + usedOPFS: window.xftp.lastUploadUsedOPFS + } + }) + + expect(result.match).toBe(true) + expect(result.usedOPFS).toBe(true) // Large file, used OPFS +}) + +test('progress events fire during upload', async ({ page }) => { + await page.goto('/test-page/') + + const progressEvents = await page.evaluate(async () => { + const events: number[] = [] + const data = new Uint8Array(10 * 1024 * 1024) // 10 MB + const file = new File([data], 'progress.bin') + + await window.xftp.uploadFile(file, (done, total) => { + events.push(done / total) + }) + + return events + }) + + expect(progressEvents.length).toBeGreaterThan(1) + expect(progressEvents[progressEvents.length - 1]).toBe(1) // 100% at end +}) + +test('Web Worker keeps UI responsive', async ({ page }) => { + await page.goto('/test-page/') + + // Start upload and measure main thread responsiveness + const result = await page.evaluate(async () => { + const data = new Uint8Array(50 * 1024 * 1024) // 50 MB + const file = new File([data], 'responsive.bin') + + let frameCount = 0 + let uploadDone = false + + // Count animation frames during upload + function countFrames() { + frameCount++ + if (!uploadDone) requestAnimationFrame(countFrames) + } + requestAnimationFrame(countFrames) + + const start = performance.now() + await window.xftp.uploadFile(file) + uploadDone = true + const elapsed = performance.now() - start + + // If main thread was blocked, frameCount would be very low + const expectedFrames = (elapsed / 1000) * 30 // ~30 fps minimum + return { frameCount, expectedFrames, elapsed } + }) + + // Should maintain reasonable frame rate (Worker offloaded crypto) + expect(result.frameCount).toBeGreaterThan(result.expectedFrames * 0.5) +}) +``` + +### 7.5 Cross-Browser Matrix + +| Browser | fetch streaming | Web Worker | OPFS sync | Status | +|---------|----------------|------------|-----------|--------| +| Chrome 105+ | ✓ | ✓ | ✓ | Primary target | +| Firefox 111+ | ✓ | ✓ | ✓ | Supported | +| Safari 16.4+ | ✓ | ✓ | ✓ | Supported | +| Edge 105+ | ✓ | ✓ | ✓ | Supported (Chromium) | + +Playwright tests run against Chrome by default. CI can run against all browsers. + +### 7.6 Test Execution Summary + +| Phase | Test Layer | Command | What's Verified | +|-------|-----------|---------|-----------------| +| A | Layer 1 + 2 | `cabal test --test-option='--match=/XFTP Web Client/'` | fetch() transport, full flow | +| B | Layer 3 | `npm run test:browser` | Worker message passing, progress | +| C | Layer 3 | `npm run test:browser` | OPFS storage for large files | +| D | Layer 3 | `npm run test:browser -- --project=firefox,webkit` | Cross-browser | diff --git a/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md new file mode 100644 index 0000000000..b69234de82 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md @@ -0,0 +1,772 @@ +# Send File Web Page — Implementation Plan + +## TOC +1. Executive Summary +2. Architecture +3. CryptoBackend & Web Worker +4. Server Configuration +5. Page Structure & UI +6. Upload Flow +7. Download Flow +8. Build & Dev Setup +9. agent.ts Changes +10. Testing +11. Files +12. Implementation Order + +## 1. Executive Summary + +Build a static web page for browser-based XFTP file transfer (Phase 5 of master RFC). The page supports upload (drag-drop → encrypt → upload → shareable link) and download (open link → download → decrypt → save). Crypto runs in a Web Worker; large files use OPFS temp storage. + +Two build variants: +- **Local**: single test server at `localhost:7000` (development/testing) +- **Production**: 12 preset XFTP servers (6 SimpleX + 6 Flux) + +Uses Vite for bundling (already a dependency via vitest). No CSS framework — plain CSS per RFC spec. + +## 2. Architecture + +``` +xftp-web/ +├── src/ # Library (existing, targeted changes) +│ ├── agent.ts # Modified: uploadFile readChunk, downloadFileRaw +│ ├── client.ts # Modified: downloadXFTPChunkRaw +│ ├── crypto/ # Unchanged +│ ├── download.ts # Unchanged +│ └── protocol/ +│ └── description.ts # Fix: SHA-256 → SHA-512 comment on digest field +├── web/ # Web page (new) +│ ├── index.html # Entry point (CSP meta tag) +│ ├── main.ts # Router + sodium.ready init +│ ├── upload.ts # Upload UI + orchestration +│ ├── download.ts # Download UI + orchestration +│ ├── progress.ts # Circular progress canvas component +│ ├── servers.ts # Server list (build-time configured, imports servers.json) +│ ├── servers.json # Preset server addresses (shared with vite.config.ts) +│ ├── crypto-backend.ts # CryptoBackend interface + WorkerBackend +│ ├── crypto.worker.ts # Web Worker: encrypt/decrypt/OPFS +│ └── style.css # Minimal styling +├── vite.config.ts # Page build config (new) +├── tsconfig.web.json # IDE/CI type-check for web/ (new) +├── tsconfig.worker.json # IDE/CI type-check for worker (new) +├── playwright.config.ts # Page E2E test config (new) +├── vitest.config.ts # Test config (existing) +├── .gitignore # Existing (add dist-web/) +└── test/ # Tests (existing + new page test) +``` + +Data flow: + +``` + ┌───────────────────────────────────────────┐ + │ Main Thread │ + │ │ + │ Upload: upload.ts ──► agent.ts ──► fetch()│ + │ Download: download.ts ──► agent.ts ──► fetch() + │ │ │ + │ postMessage HTTP/2 │ + │ ▼ ▼ + │ ┌─────────────────┐ ┌──────────┐│ + │ │ Web Worker │ │ XFTP ││ + │ │ crypto.worker.ts │ │ Server ││ + │ │ ┌─────────────┐ │ └──────────┘│ + │ │ │ OPFS temp │ │ │ + │ │ └─────────────┘ │ │ + │ └─────────────────┘ │ + └───────────────────────────────────────────┘ +``` + +Both upload and download use `agent.ts` for orchestration (connection pooling, parallel chunk transfers, redirect handling). Upload uses a `readChunk` callback for Worker data access. Download uses an `onRawChunk` callback to route raw encrypted chunks to the Worker for decryption (see §7.2). ACK is the caller's responsibility — `downloadFileRaw` returns the resolved `FileDescription` without ACKing, so the caller can verify integrity before acknowledging. + +## 3. CryptoBackend & Web Worker + +### 3.1 Interface + +```typescript +// crypto-backend.ts +export interface CryptoBackend { + // Upload: encrypt file, store encrypted data in OPFS + encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + + // Upload: read encrypted chunk from OPFS (called by agent.ts via readChunk callback) + readChunk(offset: number, size: number): Promise + + // Download: transit-decrypt raw chunk and store in OPFS + decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise + + // Download: verify digest + file-level decrypt all stored chunks + // Only needs size/digest/key/nonce — not the full FileDescription (avoids sending private keys to Worker) + verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> + + cleanup(): Promise +} + +// Structurally identical to EncryptedFileMetadata from agent.ts (§9.1). +// Kept separate to avoid crypto-backend.ts importing from agent.ts +// (which would pull in node:http2 via client.ts, breaking Worker bundling). +// TypeScript structural typing makes them assignment-compatible. +export interface EncryptResult { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} +``` + +### 3.2 Factory + +```typescript +export function createCryptoBackend(): CryptoBackend { + if (typeof Worker === 'undefined') { + throw new Error('Web Workers required — update your browser') + } + return new WorkerBackend() +} +``` + +The Worker always uses OPFS for temp storage (single code path — no memory/disk branching). OPFS I/O overhead is negligible relative to crypto and network time. Each Worker session creates a unique directory in OPFS root named `session--`, containing `upload.bin` and `download.bin` as needed. `cleanup()` deletes the entire session directory. On Worker startup (before processing messages), sweep OPFS root and delete any `session-*` directories whose embedded timestamp (parsed from the name) is older than 1 hour — this handles stale files from crashed tabs. The OPFS API does not expose directory timestamps, so the name-encoded timestamp is the only reliable mechanism. This prevents cross-tab collisions and unbounded OPFS growth. + +### 3.3 Worker message protocol + +Every request carries a numeric `id`. Responses carry the same `id`. WorkerBackend maintains a `Map` to match responses to pending promises. + +Main → Worker (fields marked `†` are Transferable — arrive as `ArrayBuffer` in Worker, must be wrapped with `new Uint8Array(...)` before use): +- `{id: number, type: 'encrypt', data†: ArrayBuffer, fileName: string}` — encrypt file, store in OPFS +- `{id: number, type: 'readChunk', offset: number, size: number}` — read encrypted chunk from OPFS +- `{id: number, type: 'decryptAndStoreChunk', dhSecret: Uint8Array, nonce: Uint8Array, body†: ArrayBuffer, chunkDigest: Uint8Array, chunkNo: number}` — transit-decrypt + store in OPFS. `chunkDigest` is the per-chunk SHA-256 digest (verified by `decryptReceivedChunk`). Distinct from the file-level SHA-512 digest in `verifyAndDecrypt`. +- `{id: number, type: 'verifyAndDecrypt', size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array}` — verify digest + file-level decrypt all chunks. Only the four fields needed for verification/decryption are sent — not the full `FileDescription`, which contains private replica keys that the Worker doesn't need. +- `{id: number, type: 'cleanup'}` — delete OPFS temp files + +Worker → Main (fields marked `†` are Transferable): +- `{id: number, type: 'progress', done: number, total: number}` — encryption/decryption progress (fire-and-forget, no promise) +- `{id: number, type: 'encrypted', digest: Uint8Array, key: Uint8Array, nonce: Uint8Array, chunkSizes: number[]}` — all fields structured-cloned (not transferred) +- `{id: number, type: 'chunk', data†: ArrayBuffer}` — readChunk response +- `{id: number, type: 'stored'}` — decryptAndStore acknowledgment +- `{id: number, type: 'decrypted', header: FileHeader, content†: ArrayBuffer}` — verifyAndDecrypt response +- `{id: number, type: 'cleaned'}` +- `{id: number, type: 'error', message: string}` — rejects the pending promise for this `id` + +All messages carrying large `ArrayBuffer` payloads use `postMessage(msg, [transferables])` to transfer ownership instead of structured-clone copying. Only `ArrayBuffer` can be transferred — `Uint8Array`, `number[]`, and other types are always structured-cloned. This applies to: `encrypt` request (`data`), `readChunk` response (`data`), `decryptAndStoreChunk` request (`body`), and `verifyAndDecrypt` response (`content`). The `WorkerBackend` implementation must ensure the transferred `ArrayBuffer` covers the full `Uint8Array` — if `byteOffset !== 0` or `byteLength !== buffer.byteLength`, slice first: `data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength)`. This is required for `decryptAndStore` request bodies: `sendXFTPCommand` returns `body = fullResp.subarray(XFTP_BLOCK_SIZE)`, which has `byteOffset = XFTP_BLOCK_SIZE`. Other payloads are full-buffer views (§6 step 3 creates `new Uint8Array(await file.arrayBuffer())`; Worker responses allocate fresh buffers) but `WorkerBackend` should guard unconditionally. + +### 3.4 Worker internals + +**Imports:** The Worker imports directly from `libsodium-wrappers-sumo` (for `await sodium.ready`), `src/crypto/file.js` (`encryptFile`, `encodeFileHeader`, `decryptChunks`), `src/crypto/digest.js` (`sha512`), `src/protocol/chunks.js` (`prepareChunkSizes`, `fileSizeLen`, `authTagSize`), `src/protocol/encoding.js` (`concatBytes`), and `src/download.js` (`decryptReceivedChunk`). `download.js` directly imports `src/protocol/client.js` (for `decryptTransportChunk`). These transitively pull in `src/crypto/secretbox.js`, `src/crypto/keys.js`, and `src/crypto/padding.js`. None of these import `src/agent.ts` or `src/client.ts` — those pull in `node:http2` via dynamic import which would break Worker bundling. Vite tree-shakes the transitive deps automatically. Note: `download.js` → `protocol/client.js` → `crypto/keys.js` transitively pulls in `@noble/curves` (~50-80KB). This is unavoidable since `decryptTransportChunk` needs `dh` from `keys.js`. If Worker bundle size becomes a concern, `decryptReceivedChunk` could be refactored out of `download.js` into a separate module that doesn't import `protocol/client.js`. + +**ArrayBuffer → Uint8Array conversion:** All Transferable fields arrive in the Worker as `ArrayBuffer`. The Worker's message handler must wrap them before passing to library functions: `new Uint8Array(msg.data)` for encrypt, `new Uint8Array(msg.body)` for decryptAndStore. Non-transferred fields (`dhSecret`, `nonce`, `digest`, `chunkSizes`) arrive as their original types (`Uint8Array` / `number[]`) via structured clone. + +The Worker's encrypt handler calls the same functions as `encryptFileForUpload` in agent.ts (key/nonce generation → `encryptFile` → `sha512` → `prepareChunkSizes`). This is not reimplementation — it's calling the same library functions from a different entry point. + +**Libsodium init:** Both the Worker and the main thread must `await sodium.ready` before calling any crypto functions that use libsodium. The Worker does this once on startup before processing messages. The main thread needs it before `connectXFTP` (which uses libsodium via `verifyIdentityProof`) and before `downloadXFTPChunkRaw` (which uses libsodium via `generateX25519KeyPair` + `dh`). In practice, `main.ts` calls `await sodium.ready` at page load, before any XFTP calls. + +Encrypt (mirrors `encryptFileForUpload` in agent.ts): +1. Generate key (32B) + nonce (24B) via `crypto.getRandomValues` +2. `fileHdr = encodeFileHeader({fileName, fileExtra: null})` +3. `fileSize = BigInt(fileHdr.length + source.length)` +4. `payloadSize = Number(fileSize) + fileSizeLen + authTagSize` +5. `chunkSizes = prepareChunkSizes(payloadSize)` +6. `encSize = BigInt(chunkSizes.reduce((a, b) => a + b, 0))` +7. `encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize)` +8. `digest = sha512(encData)` — note: the `digest` field comment in `FileDescription` in `description.ts` says "SHA-256" but the actual hash is SHA-512 everywhere (`sha512` in agent.ts and download.ts). Fix the comment during implementation. +9. Open OPFS upload file via `createSyncAccessHandle`, write `encData`, flush, close handle. Null out `encData` reference. +10. Reopen the same OPFS file with `createSyncAccessHandle` as a persistent read handle (stored on the Worker module scope). This handle is used by all subsequent `readChunk` calls and closed on `cleanup`. +11. Post back `{digest, key, nonce, chunkSizes}` (no encData transfer — data stays in OPFS) + +readChunk: +- Use the persistent read handle: `handle.read(buf, {at: offset})` → return slice as transferable ArrayBuffer. OPFS allows only one `FileSystemSyncAccessHandle` per file; the persistent handle avoids per-call open/close overhead. + +decryptAndStoreChunk (removes transport encryption only — stored data is still file-level encrypted): +1. `decryptReceivedChunk(dhSecret, nonce, new Uint8Array(body), chunkDigest)` → transit-decrypted chunk data (still file-level encrypted — only the transport layer is removed). Argument order matches signature `(dhSecret, cbNonce, encData, expectedDigest)` from download.ts. `body` arrives as `ArrayBuffer` via Transferable and must be wrapped; `dhSecret`, `nonce`, `chunkDigest` arrive as `Uint8Array` via structured clone. +2. On first call, open the OPFS download temp file via `createSyncAccessHandle` and store as a persistent write handle. Record `{chunkNo, size: decrypted.length}` in an in-memory `chunkMeta: Map` — offset is the running sum of sizes for chunks stored so far (chunks may arrive out of order with `concurrency > 1`, so offset is assigned as `currentFileOffset`, then `currentFileOffset += size`) +3. Write decrypted chunk to the persistent handle at the recorded offset + +verifyAndDecrypt (mirrors size/digest checks in agent.ts `downloadFile`): +1. Close the persistent download write handle (flush first), then reopen as a read handle. Read each chunk from OPFS into a `Uint8Array[]` array, ordered by `chunkNo`: for each entry in `chunkMeta` sorted by `chunkNo`, `handle.read(buf, {at: offset})` with the recorded offset and size +2. Concatenate for verification: `combined = concatBytes(...chunks)` +3. Verify total size: `combined.length === params.size` +4. Verify SHA-512 digest: `sha512(combined)` matches `params.digest` +5. Decrypt: `decryptChunks(BigInt(params.size), chunks, params.key, params.nonce)` — `params.size` is the encrypted file size (`fd.size` = `sum(chunkSizes)` = `decryptChunks`' first param `encSize`). Called directly instead of via `processDownloadedFile` (which expects a full `FileDescription`). Pass the original `chunks` array (not `combined`), as `decryptChunks` handles concatenation internally. +6. Delete OPFS download temp file +7. Return `{header, content}` via transferable ArrayBuffer + +### 3.5 Browser requirements + +The page requires a modern browser with Web Worker and OPFS support: +- Chrome 102+, Firefox 114+, Safari 15.2+ (Workers + OPFS + ES module Workers — Firefox added module Worker support in 114) +- If Worker or OPFS is unavailable, the page shows an error message rather than falling back silently. + +No `DirectBackend` is needed — the page is browser-only, and tests run in vitest browser mode (real Chromium). The existing library tests (`test/browser.test.ts`) test the crypto/upload/download pipeline directly without Workers. + +## 4. Server Configuration + +### 4.1 Server lists + +`web/servers.json` — single source of truth for preset server addresses (imported by both `servers.ts` and `vite.config.ts`): + +```json +{ + "simplex": [ + "xftp://da1aH3nOT-9G8lV7bWamhxpDYdJ1xmW7j3JpGaDR5Ug=@xftp1.simplex.im", + "xftp://5vog2Imy1ExJB_7zDZrkV1KDWi96jYFyy9CL6fndBVw=@xftp2.simplex.im", + "xftp://PYa32DdYNFWi0uZZOprWQoQpIk5qyjRJ3EF7bVpbsn8=@xftp3.simplex.im", + "xftp://k_GgQl40UZVV0Y4BX9ZTyMVqX5ZewcLW0waQIl7AYDE=@xftp4.simplex.im", + "xftp://-bIo6o8wuVc4wpZkZD3tH-rCeYaeER_0lz1ffQcSJDs=@xftp5.simplex.im", + "xftp://6nSvtY9pJn6PXWTAIMNl95E1Kk1vD7FM2TeOA64CFLg=@xftp6.simplex.im" + ], + "flux": [ + "xftp://92Sctlc09vHl_nAqF2min88zKyjdYJ9mgxRCJns5K2U=@xftp1.simplexonflux.com", + "xftp://YBXy4f5zU1CEhnbbCzVWTNVNsaETcAGmYqGNxHntiE8=@xftp2.simplexonflux.com", + "xftp://ARQO74ZSvv2OrulRF3CdgwPz_AMy27r0phtLSq5b664=@xftp3.simplexonflux.com", + "xftp://ub2jmAa9U0uQCy90O-fSUNaYCj6sdhl49Jh3VpNXP58=@xftp4.simplexonflux.com", + "xftp://Rh19D5e4Eez37DEE9hAlXDB3gZa1BdFYJTPgJWPO9OI=@xftp5.simplexonflux.com", + "xftp://0AznwoyfX8Od9T_acp1QeeKtxUi676IBIiQjXVwbdyU=@xftp6.simplexonflux.com" + ] +} +``` + +`web/servers.ts`: + +```typescript +import {parseXFTPServer, type XFTPServer} from '../src/protocol/address.js' +import presets from './servers.json' + +declare const __XFTP_SERVERS__: string[] + +const serverAddresses: string[] = typeof __XFTP_SERVERS__ !== 'undefined' + ? __XFTP_SERVERS__ + : [...presets.simplex, ...presets.flux] + +export function getServers(): XFTPServer[] { + return serverAddresses.map(parseXFTPServer) +} + +export function pickRandomServer(servers: XFTPServer[]): XFTPServer { + return servers[Math.floor(Math.random() * servers.length)] +} +``` + +### 4.2 Build-time injection + +`vite.config.ts` defines `__XFTP_SERVERS__`: +- `mode === 'local'`: `["xftp://@localhost:7000"]` +- `mode === 'production'`: not defined → falls through to hardcoded list + +### 4.3 Assumption + +Production XFTP servers must have `[WEB]` section configured with a CA-signed certificate for browser TLS. Without this, browsers will reject the self-signed XFTP identity cert. The local test server uses `tests/fixtures/` certs which Chromium accepts via `ignoreHTTPSErrors`. + +## 5. Page Structure & UI + +### 5.1 Routing + +`main.ts` checks `window.location.hash` once on page load: +- Hash present → download mode +- Hash absent → upload mode + +No `hashchange` listener — the shareable link opens in a new tab. Simple page-load routing. + +### 5.2 Upload UI states + +1. **Landing**: Drag-drop zone centered, file picker button, size limit note +2. **Uploading**: Circular progress (canvas), percentage, cancel button +3. **Complete**: Shareable link (input + copy button), "Install SimpleX" CTA +4. **Error**: Error message + retry button. On server-unreachable, auto-retry with exponential backoff (1s, 2s, 4s, up to 3 attempts) before showing the error state. + +### 5.3 Download UI states + +1. **Ready**: Approximate file size displayed (encrypted size from `fd.size` or `fd.redirect.size` — see §7 step 2; file name is unavailable — it's inside the encrypted content), download button +2. **Downloading**: Circular progress, percentage +3. **Complete**: Browser save dialog triggered automatically +4. **Error**: Error message (expired, corrupted, unreachable) + +### 5.4 Security summary (RFC §7.4) + +Both upload-complete and download-ready states display a brief non-technical security summary: +- Files are encrypted in the browser before upload — the server never sees file contents. +- The link contains the decryption key in the hash fragment, which the browser never sends to any server. +- For maximum security, use the SimpleX app. + +### 5.5 File expiry + +Display on upload-complete state: "Files are typically available for 48 hours." This is an approximation — actual expiry depends on each XFTP server's `[STORE_LOG]` retention configuration. The 48-hour figure matches the current preset server defaults. + +### 5.6 Styling + +Plain CSS, no framework. White background, centered content, responsive. Circular progress via `` (arc drawing, percentage text in center). + +File size limit: 100MB. Displayed on upload page. + +### 5.7 CSP + +`index.html` includes a `` Content-Security-Policy tag with a build-time placeholder: + +```html + +``` + +Vite's `transformIndexHtml` hook (in `vite.config.ts`) replaces `__CSP_CONNECT_SRC__` at build time with origins derived from the server list: +- Local mode: `https://localhost:7000` +- Production: `https://xftp1.simplex.im:443 https://xftp2.simplex.im:443 ...` (all 12 servers) + +## 6. Upload Flow + +`web/upload.ts`: + +1. User drops/picks file → `File` object +2. Validate `file.size <= 100 * 1024 * 1024` — show error if exceeded +3. Read file: `new Uint8Array(await file.arrayBuffer())` — note: after `backend.encrypt()` transfers the buffer to the Worker, `fileData` is detached (zero-length). Peak memory is ~2× file size (main thread holds original until transfer, Worker holds encrypted copy before OPFS write). Acceptable for the 100MB limit; do not raise the limit without considering memory implications. +4. Create `CryptoBackend` via factory +5. Create `XFTPClientAgent` +6. `backend.encrypt(fileData, file.name, onProgress)` → `EncryptResult` + - Encryption progress shown on canvas (Worker posts progress messages) +7. Pick one random server from configured list (V1: all chunks to same server) +8. Call `uploadFile(agent, server, metadata, {onProgress, readChunk: (off, sz) => backend.readChunk(off, sz)})`: + - `metadata` = `{digest, key, nonce, chunkSizes}` from EncryptResult + - Network progress shown on canvas + - Returns `{rcvDescription, sndDescription, uri}` +9. Construct full URL: `window.location.origin + window.location.pathname + '#' + uri` +10. Display link, copy button +11. Cleanup: `backend.cleanup()`, `closeXFTPAgent(agent)` + +**Cancel:** User can abort via cancel button. Sets an `AbortController` signal that: +- Sends `{type: 'cleanup'}` to Worker +- Closes the XFTPClientAgent (drops HTTP/2 connections) +- Resets UI to landing state + +## 7. Download Flow + +`web/download.ts`: + +1. Parse `window.location.hash.slice(1)` → `decodeDescriptionURI(fragment)` → `FileDescription` +2. Display file size (`fd.size` bytes, formatted human-readable). Note: `fd.size` is the encrypted size (slightly larger than plaintext due to padding + auth tag). The plaintext size is not available until decryption — display it as an approximate file size. If `fd.redirect !== null`, size comes from `fd.redirect.size` (which is the inner encrypted size). +3. User clicks "Download" +4. Create `CryptoBackend` and `XFTPClientAgent` +5. Call `downloadFileRaw(agent, fd, onRawChunk, {onProgress, concurrency: 3})`: + - `onRawChunk` forwards each raw chunk to the Worker: `backend.decryptAndStoreChunk(raw.dhSecret, raw.nonce, raw.body, raw.digest, raw.chunkNo)` + - `downloadFileRaw` handles redirect resolution internally (see §7.1), parallel downloads, and connection pooling + - Returns the resolved `FileDescription` (inner fd for redirect case, original fd otherwise) +6. `backend.verifyAndDecrypt({size: resolvedFd.size, digest: resolvedFd.digest, key: resolvedFd.key, nonce: resolvedFd.nonce})` → `{header, content}` + - Verifies size + SHA-512 digest + file-level decryption inside Worker. Only the four needed fields are sent — private replica keys stay on the main thread. +7. ACK: `ackFileChunks(agent, resolvedFd)` — best-effort, after verification succeeds +8. Sanitize `header.fileName` before use: strip path separators (`/`, `\`), replace null/control characters (U+0000-U+001F, U+007F), strip Unicode bidi override characters (U+202A-U+202E, U+2066-U+2069 — prevents `doc.pdf.exe` appearing as `doc.exe.pdf`), limit length to 255 chars. The filename is user-controlled (set by the uploader) and arrives via decrypted content. Then trigger browser save: `new Blob([content])` → `` click +9. Cleanup: `backend.cleanup()`, `closeXFTPAgent(agent)` + +### 7.1 Redirect handling + +Handled inside `downloadFileRaw` in agent.ts — the web page doesn't see it. When `fd.redirect !== null`: + +1. Download redirect chunks via `downloadXFTPChunkRaw` (parallel, same as regular chunks) +2. Transit-decrypt + verify + file-level decrypt on main thread (redirect data is always small — a few KB of YAML, so main thread decryption is fine) +3. Parse YAML → inner `FileDescription`, validate against `fd.redirect.{size, digest}` +4. ACK redirect chunks (best-effort) +5. Continue downloading inner description's chunks, calling `onRawChunk` for each + +### 7.2 Architecture note: download refactoring + +Both upload and download use `agent.ts` for orchestration. The key difference is where the crypto/network split happens: + +- **Upload**: agent.ts reads encrypted chunks from the Worker via `readChunk` callback, sends them over the network. +- **Download**: agent.ts receives raw encrypted responses from the network via `downloadXFTPChunkRaw` (DH key exchange + network only, no decryption), passes them to the web page via `onRawChunk` callback, which routes them to the Worker for transit decryption. + +This split keeps all expensive crypto off the main thread. Transit decryption uses a custom JS Salsa20 implementation (`xorKeystream` in secretbox.ts) that would block the UI for ~50-200ms on a 4MB chunk. File-level decryption (`decryptChunks`) is similarly expensive. Both happen in the Worker. + +The cheap operations stay on the main thread: DH key exchange (`generateX25519KeyPair` + `dh` — ~1ms via libsodium WASM), XFTP command encoding/decoding, connection management. + +## 8. Build & Dev Setup + +### 8.1 vite.config.ts (new, separate from vitest.config.ts) + +```typescript +import {defineConfig, type Plugin} from 'vite' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import presets from './web/servers.json' + +function parseHost(addr: string): string { + const m = addr.match(/@(.+)$/) + if (!m) throw new Error('bad server address: ' + addr) + const host = m[1].split(',')[0] + return host.includes(':') ? host : host + ':443' +} + +function cspPlugin(servers: string[]): Plugin { + const origins = servers.map(s => 'https://' + parseHost(s)).join(' ') + return { + name: 'csp-connect-src', + transformIndexHtml: { + order: 'pre', + handler(html, ctx) { + if (ctx.server) { + // Dev mode: remove CSP meta tag entirely — Vite HMR needs inline scripts + return html.replace(/]*?Content-Security-Policy[\s\S]*?>/i, '') + } + return html.replace('__CSP_CONNECT_SRC__', origins) + } + } + } +} + +export default defineConfig(({mode}) => { + const define: Record = {} + let servers: string[] + + if (mode === 'local') { + const pem = readFileSync('../tests/fixtures/ca.crt', 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + const fp = createHash('sha256').update(der).digest('base64') + .replace(/\+/g, '-').replace(/\//g, '_') + servers = [`xftp://${fp}@localhost:7000`] + define['__XFTP_SERVERS__'] = JSON.stringify(servers) + } else { + servers = [...presets.simplex, ...presets.flux] + } + + return { + root: 'web', + build: {outDir: '../dist-web'}, + define, + worker: {format: 'es'}, + plugins: [cspPlugin(servers)], + } +}) +``` + +### 8.2 package.json scripts + +```json +"dev": "vite --mode local", +"build:local": "vite build --mode local", +"build:prod": "vite build --mode production", +"preview": "vite preview", +"check:web": "tsc -p tsconfig.web.json --noEmit && tsc -p tsconfig.worker.json --noEmit" +``` + +Note: `check:web` type-checks `src/` twice (once per config) — acceptable for this small library. + +Add `vite` as an explicit devDependency (`^6.0.0` — matching the version vitest 3.x depends on transitively). Relying on transitive resolution is fragile across package managers. + +### 8.3 TypeScript configuration + +The existing `tsconfig.json` has `rootDir: "src"` and `include: ["src/**/*.ts"]` — this is for library compilation only (output to `dist/`). Vite handles `web/` TypeScript compilation independently via esbuild, so the main tsconfig is unchanged. `web/*.ts` files import from `../src/*.js` using relative paths. + +Add two tsconfigs for `web/` type-checking — split by environment to avoid type pollution between DOM and WebWorker globals: + +`tsconfig.web.json` — main-thread files (DOM globals: `document`, `window`, etc.): + +```json +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "DOM"] + }, + "include": ["web/**/*.ts", "src/**/*.ts"], + "exclude": ["web/crypto.worker.ts"] +} +``` + +`tsconfig.worker.json` — Worker file (`self`, `FileSystemSyncAccessHandle`, etc.): + +```json +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "WebWorker"] + }, + "include": ["web/crypto.worker.ts", "src/**/*.ts"] +} +``` + +Both configs set `"types": []` to prevent auto-inclusion of `@types/node` and `"moduleResolution": "bundler"` for Vite-compatible resolution (JSON imports, `.js` extension mapping). The base config's `"moduleResolution": "node"` would cause false type errors on `import ... from './servers.json'`. Both override `@types/node`, which would pollute DOM/WebWorker environments with Node.js globals (`process`, `Buffer`, etc.). This means `src/client.ts`'s dynamic `import("node:http2")` will produce a type error in these configs. This is acceptable — `src/client.ts` provides `createNodeTransport` which is never used in browser code (Vite tree-shakes it out), and full `src/` type-checking is handled by the base `tsconfig.json`. If the error is distracting, add `src/client.ts` to both configs' `exclude` arrays. + +Both extend the library tsconfig (inheriting `strict`, `module`, etc.) and include `src/**/*.ts` so imports from `../src/*.js` resolve. `"noEmit": true` means they're only used for type-checking — Vite handles actual compilation. The inherited `"exclude": ["node_modules", "dist", "test"]` intentionally excludes `test/` — test files are type-checked by their own vitest/playwright configs, not by `check:web`. + +### 8.4 Dev workflow + +`npm run dev` → Vite dev server at `localhost:5173`, configured for local test server. Start `xftp-server` on port 7000 separately (or via the existing globalSetup). + +Note: The CSP meta tag's `default-src 'self'` blocks Vite's injected HMR inline scripts in dev mode. The `cspPlugin` handles this by removing the entire CSP `` tag in serve mode (dev server), so HMR works without restrictions. Production builds always have the correct CSP. + +## 9. Library Changes (agent.ts + client.ts) + +Changes to support the web page: upload `readChunk` callback, download `onRawChunk` callback with parallel chunk downloads. + +### 9.1 Type changes + +Split the existing `EncryptedFileInfo` (which currently has `encData`, `digest`, `key`, `nonce`, `chunkSizes` as direct fields) into a metadata-only base and an extension: + +```typescript +// Metadata-only variant (no encData — data lives in Worker/OPFS) +export interface EncryptedFileMetadata { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} + +// Full variant (existing, extends metadata with data) +export interface EncryptedFileInfo extends EncryptedFileMetadata { + encData: Uint8Array +} +``` + +### 9.2 uploadFile signature change + +Replace positional optional params with an options bag. Add optional `readChunk`. When provided, `encrypted.encData` is not accessed. + +```typescript +export interface UploadOptions { + onProgress?: (uploaded: number, total: number) => void + redirectThreshold?: number + readChunk?: (offset: number, size: number) => Promise +} + +export async function uploadFile( + agent: XFTPClientAgent, + server: XFTPServer, + encrypted: EncryptedFileMetadata, + options?: UploadOptions +): Promise +``` + +Inside `uploadFile`: +- Chunk read: if `options?.readChunk` is provided, use it. Otherwise, verify `'encData' in encrypted` at runtime (throws `"uploadFile: readChunk required when encData is absent"` if missing), then use `(off, sz) => Promise.resolve((encrypted as EncryptedFileInfo).encData.subarray(off, off + sz))`. This guards against calling `uploadFile` with `EncryptedFileMetadata` but no `readChunk`. For each chunk, call `readChunk(offset, size)` once and use the returned `Uint8Array` for both `getChunkDigest(chunkData)` and `uploadXFTPChunk(..., chunkData)` — do not call `readChunk` twice per chunk. +- Progress total: `const total = encrypted.chunkSizes.reduce((a, b) => a + b, 0)` — replaces `encrypted.encData.length` (line 129) since `EncryptedFileMetadata` has no `encData`. The values are identical: `encData.length === sum(chunkSizes)`. +- `buildDescription` parameter type: change from `EncryptedFileInfo` to `EncryptedFileMetadata` — it only accesses `chunkSizes`, `digest`, `key`, `nonce` (not `encData`). + +`uploadRedirectDescription` (internal) is unchanged — redirect descriptions are always small and created in-memory by `encryptFileForUpload`. + +### 9.3 Backward compatibility + +The signature change from positional params `(agent, server, encrypted, onProgress?, redirectThreshold?)` to `(agent, server, encrypted, options?)` is a breaking change for callers that pass `onProgress` or `redirectThreshold`. In practice, the only callers are the browser test (which passes no options — no change needed) and the web page (new code). `EncryptedFileInfo` extends `EncryptedFileMetadata`, so existing callers that pass `EncryptedFileInfo` work without change. + +### 9.4 client.ts: downloadXFTPChunkRaw + +Split `downloadXFTPChunk` at the network/crypto boundary. The new function does DH key exchange and network I/O but skips transit decryption: + +```typescript +export interface RawChunkResponse { + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array +} + +export async function downloadXFTPChunkRaw( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array +): Promise { + const {publicKey, privateKey} = generateX25519KeyPair() + const cmd = encodeFGET(encodePubKeyX25519(publicKey)) + const {response, body} = await sendXFTPCommand(c, rpKey, fId, cmd) + if (response.type !== "FRFile") throw new Error("unexpected response: " + response.type) + const dhSecret = dh(response.rcvDhKey, privateKey) + return {dhSecret, nonce: response.nonce, body} +} +``` + +`RawChunkResponse` contains only what client.ts produces (DH secret, nonce, encrypted body). The chunk metadata (`chunkNo`, `digest`) is added by agent.ts when constructing `RawDownloadedChunk` (see §9.5). + +The existing `downloadXFTPChunk` is refactored to call `downloadXFTPChunkRaw` + `decryptReceivedChunk`: + +```typescript +export async function downloadXFTPChunk( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array, digest?: Uint8Array +): Promise { + const {dhSecret, nonce, body} = await downloadXFTPChunkRaw(c, rpKey, fId) + return decryptReceivedChunk(dhSecret, nonce, body, digest ?? null) +} +``` + +### 9.5 agent.ts: downloadFileRaw, ackFileChunks, RawDownloadedChunk + +New type combining client.ts's `RawChunkResponse` with chunk metadata from agent.ts: + +```typescript +export interface RawDownloadedChunk { + chunkNo: number + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array + digest: Uint8Array +} +``` + +New function providing download orchestration with a raw chunk callback. Handles connection pooling, parallel downloads, redirect resolution, and progress. Does **not** ACK — the caller ACKs after verification. + +```typescript +export interface DownloadRawOptions { + onProgress?: (downloaded: number, total: number) => void + concurrency?: number // max parallel chunk downloads, default 1 +} + +export async function downloadFileRaw( + agent: XFTPClientAgent, + fd: FileDescription, + onRawChunk: (chunk: RawDownloadedChunk) => Promise, + options?: DownloadRawOptions +): Promise +``` + +Returns the resolved `FileDescription` — for redirect files this is the inner fd, for non-redirect files this is the original fd. The caller uses this for verification and ACK. + +Internal structure: + +1. Validate `fd` via `validateFileDescription` (may double-validate if caller already validated via `decodeDescriptionURI` — harmless) +2. If `fd.redirect !== null`: resolve redirect on main thread (redirect data is small): + a. Download redirect chunks via `downloadXFTPChunk` (not raw — main thread decryption is fine for a few KB) + b. Verify size + digest, `processDownloadedFile` → YAML bytes + c. Parse inner `FileDescription`, validate against `fd.redirect.{size, digest}` + d. ACK redirect chunks (best-effort — redirect chunks are small and separate from the file chunks) + e. Replace `fd` with inner description +3. Pre-connect: call `getXFTPServerClient(agent, server)` for each unique server before launching concurrent workers. This ensures the client connection exists in the agent's map, avoiding a race condition where multiple concurrent workers all see the client as missing and each call `connectXFTP` independently (leaking all but the last connection). Known limitation: if a connection drops mid-download and multiple workers attempt reconnection simultaneously, the same TOCTOU race reappears. This is a pre-existing issue in `getXFTPServerClient`; a proper fix (per-key connection promise) is out of scope for this plan but should be tracked for follow-up. +4. Download file chunks in parallel (concurrency-limited via sliding window): + - Create a queue of chunk indices `[0, 1, ..., N-1]`. Launch `min(concurrency, N)` async workers, each pulling the next index from the queue until empty. Each worker loops: pull index → derive key → `getXFTPServerClient` → `downloadXFTPChunkRaw` → `await onRawChunk(...)` → update progress → next index. `await Promise.all(workers)` to wait for completion. + - For each chunk: derive key (`decodePrivKeyEd25519` → `ed25519KeyPairFromSeed`), get client (`getXFTPServerClient`), call `downloadXFTPChunkRaw`, `await onRawChunk(...)` with result + `chunkNo` + `chunk.digest` + - Each concurrency slot awaits its `onRawChunk` before starting the next download on that slot. With `concurrency > 1`, multiple `onRawChunk` calls may be in-flight concurrently (one per slot). The Worker handles this correctly — messages are queued and processed sequentially. + - Update progress after each chunk: `downloaded += chunk.chunkSize; onProgress?.(downloaded, resolvedFd.size)` — both values use encrypted sizes for consistency +5. Return the resolved `fd` + +New helper for ACKing after verification: + +```typescript +export async function ackFileChunks( + agent: XFTPClientAgent, fd: FileDescription +): Promise { + for (const chunk of fd.chunks) { + const replica = chunk.replicas[0] + if (!replica) continue + try { + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + await ackXFTPChunk(client, kp.privateKey, replica.replicaId) + } catch (_) {} + } +} +``` + +The existing `downloadFile` is refactored to use `downloadFileRaw` internally: + +```typescript +export async function downloadFile( + agent: XFTPClientAgent, + fd: FileDescription, + onProgress?: (downloaded: number, total: number) => void +): Promise { + const chunks: Uint8Array[] = [] + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + chunks[raw.chunkNo - 1] = decryptReceivedChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest + ) + }, {onProgress}) + // verify + file-level decrypt using resolvedFd (inner fd for redirect case) + const combined = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + if (combined.length !== resolvedFd.size) throw new Error("downloadFile: file size mismatch") + const digest = sha512(combined) + if (!digestEqual(digest, resolvedFd.digest)) throw new Error("downloadFile: file digest mismatch") + // processDownloadedFile re-concatenates chunks internally — this mirrors the + // existing downloadFile pattern (verify on concatenated data, then pass chunks + // array to decryptChunks which concatenates again). Acceptable overhead for + // correctness: verification must happen on transit-decrypted data before + // file-level decryption transforms it. + const result = processDownloadedFile(resolvedFd, chunks) + await ackFileChunks(agent, resolvedFd) + return result +} +``` + +Existing callers retain serial behavior (`concurrency` defaults to 1). The web page opts into parallelism by passing `concurrency: 3`. The browser test (`test/browser.test.ts`) continues to work unchanged. The chunks array is initialized empty (`[]`) and populated by sparse index assignment (`chunks[raw.chunkNo - 1] = ...`), so it correctly handles both redirect and non-redirect cases regardless of the outer fd's chunk count. `digestEqual` is an existing module-private helper in agent.ts (line 327) that performs constant-time byte comparison. + +### 9.6 Backward compatibility (download) + +`downloadFile` signature is unchanged — existing callers are unaffected. The refactoring adds `downloadFileRaw`, `ackFileChunks`, and `RawDownloadedChunk` as new exports from agent.ts, and `downloadXFTPChunkRaw` + `RawChunkResponse` as new exports from client.ts. + +## 10. Testing + +### 10.1 Existing tests (unchanged) + +- `npm run test:browser` — vitest browser round-trip (library-level) +- `cabal test --test-option='--match=/XFTP Web Client/'` — Haskell per-function tests + +### 10.2 New: page E2E test + +Add `test/page.spec.ts` using `@playwright/test` (not vitest browser mode — vitest tests run IN the browser and can't control page navigation; Playwright tests run in Node.js and control the browser). Add `@playwright/test` as a devDependency. + +Add `playwright.config.ts` at the project root (`xftp-web/`): +- `webServer: { command: 'vite build --mode local && vite preview', url: 'http://localhost:4173', reuseExistingServer: !process.env.CI }` — the `url` property tells Playwright to wait until the preview server is ready before running tests +- `use.ignoreHTTPSErrors: true` (test server uses self-signed cert) +- `use.launchOptions: { args: ['--ignore-certificate-errors'] }` — required because Playwright's `ignoreHTTPSErrors` only affects page navigation, not `fetch()` calls from in-page JavaScript. Without this flag, the page's `createBrowserTransport` fetch to `https://localhost:7000` would fail TLS validation. +- `globalSetup`: `'./test/globalSetup.ts'` (starts xftp-server, shared with vitest) + +```typescript +import {test, expect} from '@playwright/test' + +test('page upload + download round-trip', async ({page}) => { + await page.goto(PAGE_URL) + // Set file input via page.setInputFiles() + // Wait for upload link to appear: page.waitForSelector('[data-testid="share-link"]') + // Extract hash from link text + // Navigate to PAGE_URL + '#' + hash + // Wait for download complete state + // Verify file was offered for save (check download event) +}) +``` + +Add script: `"test:page": "playwright test test/page.spec.ts"` + +This tests the real bundle including Worker loading, OPFS, and CSP. The existing `test/browser.test.ts` continues to test the library-level pipeline (vitest browser mode, no Workers). + +### 10.3 Manual testing + +`npm run dev` → open `localhost:5173` in browser → drag file → get link → open link in new tab → download. Requires xftp-server running on port 7000 (local mode). + +## 11. Files + +**Create:** +- `xftp-web/web/index.html` — page entry point (includes CSP meta tag) +- `xftp-web/web/main.ts` — router + libsodium init +- `xftp-web/web/upload.ts` — upload UI + orchestration +- `xftp-web/web/download.ts` — download UI + orchestration +- `xftp-web/web/progress.ts` — circular progress canvas component +- `xftp-web/web/servers.json` — preset server addresses (shared by servers.ts and vite.config.ts) +- `xftp-web/web/servers.ts` — server configuration (imports servers.json) +- `xftp-web/web/crypto-backend.ts` — CryptoBackend interface + WorkerBackend + factory +- `xftp-web/web/crypto.worker.ts` — Web Worker implementation +- `xftp-web/web/style.css` — styles +- `xftp-web/vite.config.ts` — page build config (CSP generation, server list) +- `xftp-web/tsconfig.web.json` — IDE/CI type-checking for `web/` main-thread files (DOM) +- `xftp-web/tsconfig.worker.json` — IDE/CI type-checking for `web/crypto.worker.ts` (WebWorker) +- `xftp-web/playwright.config.ts` — Playwright E2E test config (webServer, globalSetup) +- `xftp-web/test/page.spec.ts` — page E2E test (Playwright) + +**Modify:** +- `xftp-web/src/agent.ts` — add `EncryptedFileMetadata` type, `uploadFile` options bag with `readChunk`, `downloadFileRaw` with `onRawChunk` callback + parallel downloads, `ackFileChunks`, `RawDownloadedChunk` type, refactor `downloadFile` on top of `downloadFileRaw`, add `import {decryptReceivedChunk} from "./download.js"` (needed by refactored `downloadFile`) +- `xftp-web/src/client.ts` — add `downloadXFTPChunkRaw`, `RawChunkResponse` type, refactor `downloadXFTPChunk` to use raw variant +- `xftp-web/package.json` — add dev/build/check:web/test:page scripts, add `vite` + `@playwright/test` devDeps +- `xftp-web/src/protocol/description.ts` — fix stale "SHA-256" comment on `FileDescription.digest` to "SHA-512" +- `xftp-web/.gitignore` — add `dist-web/` + +## 12. Implementation Order + +1. **Library refactoring** — `client.ts`: add `downloadXFTPChunkRaw`; `agent.ts`: add `downloadFileRaw` + parallel downloads, `uploadFile` options bag with `readChunk`; refactor existing `downloadFile` on top of `downloadFileRaw`. Run existing tests to verify no regressions. +2. **Vite config + HTML shell** — `vite.config.ts`, `index.html`, `main.ts`, verify dev server works +3. **Server config** — `servers.ts` with both local and production server lists +4. **CryptoBackend + Worker** — interface, WorkerBackend, Worker implementation, OPFS logic +5. **Upload flow** — `upload.ts` with drag-drop, encrypt via Worker, upload via agent, show link +6. **Download flow** — `download.ts` with URL parsing, download via agent `downloadFileRaw`, Worker decrypt, browser save +7. **Progress component** — `progress.ts` canvas drawing +8. **Styling** — `style.css` +9. **Testing** — page E2E test, manual browser verification +10. **Build scripts** — `build:local`, `build:prod` in package.json diff --git a/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-persistent-connections.md b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-persistent-connections.md new file mode 100644 index 0000000000..6aa8c2f51c --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-persistent-connections.md @@ -0,0 +1,53 @@ +# XFTPClientAgent Pattern + +## TOC +1. Executive Summary +2. Changes: client.ts +3. Changes: agent.ts +4. Changes: test/browser.test.ts +5. Verification + +## Executive Summary + +Add `XFTPClientAgent` — a per-server connection pool matching the Haskell pattern. The agent caches `XFTPClient` instances by server URL. All orchestration functions (`uploadFile`, `downloadFile`, `deleteFile`) take `agent` as first parameter and use `getXFTPServerClient(agent, server)` instead of calling `connectXFTP` directly. Connections stay open on success; the caller creates and closes the agent. + +`connectXFTP` and `closeXFTP` stay exported (used by `XFTPWebTests.hs` Haskell tests). The `browserClients` hack, per-function `connections: Map`, and `getOrConnect` are deleted. + +## Changes: client.ts + +**Add** after types section: `XFTPClientAgent` interface, `newXFTPAgent`, `getXFTPServerClient`, `closeXFTPServerClient`, `closeXFTPAgent`. + +**Delete**: `browserClients` Map and all `isNode` browser-cache checks in `connectXFTP` and `closeXFTP`. + +**Revert `closeXFTP`** to unconditional `c.transport.close()` (browser transport.close() is already a no-op). + +`connectXFTP` stays exported (backward compat) but becomes a raw low-level function — no caching. + +## Changes: agent.ts + +**Imports**: replace `connectXFTP`/`closeXFTP` with `getXFTPServerClient`/`closeXFTPAgent` etc. + +**Re-export** from agent.ts: `newXFTPAgent`, `closeXFTPAgent`, `XFTPClientAgent`. + +**`uploadFile`**: add `agent: XFTPClientAgent` as first param. Replace `connectXFTP` → `getXFTPServerClient`. Remove `finally { closeXFTP }`. Pass `agent` to `uploadRedirectDescription`. + +**`uploadRedirectDescription`**: change from `(client, server, innerFd)` to `(agent, server, innerFd)`. Get client via `getXFTPServerClient`. + +**`downloadFile`**: add `agent` param. Delete local `connections: Map`. Replace `getOrConnect` → `getXFTPServerClient`. Remove finally cleanup. Pass `agent` to `downloadWithRedirect`. + +**`downloadWithRedirect`**: add `agent` param. Same replacements. Remove try/catch cleanup. Recursive call passes `agent`. + +**`deleteFile`**: add `agent` param. Same pattern. + +**Delete**: `getOrConnect` function entirely. + +## Changes: test/browser.test.ts + +Create agent before operations, pass to upload/download, close in finally. + +## Verification + +1. `npx vitest --run` — browser round-trip test passes +2. No remaining `browserClients`, `getOrConnect`, or per-function `connections: Map` locals +3. `connectXFTP` and `closeXFTP` still exported (XFTPWebTests.hs compat) +4. All orchestration functions take `agent` as first param diff --git a/rfcs/2026-01-30-send-file-page/2026-02-05-xftp-web-e2e-tests.md b/rfcs/2026-01-30-send-file-page/2026-02-05-xftp-web-e2e-tests.md new file mode 100644 index 0000000000..2dda76aeee --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-05-xftp-web-e2e-tests.md @@ -0,0 +1,859 @@ +# XFTP Web Page E2E Tests Plan + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Test Infrastructure](#2-test-infrastructure) +3. [Test Infrastructure - Page Objects](#3-test-infrastructure---page-objects) +4. [Upload Flow Tests](#4-upload-flow-tests) +5. [Download Flow Tests](#5-download-flow-tests) +6. [Edge Cases](#6-edge-cases) +7. [Implementation Order](#7-implementation-order) +8. [Test Utilities](#8-test-utilities) + +--- + +## 1. Executive Summary + +This document specifies comprehensive Playwright E2E tests for the XFTP web page. The existing test (`page.spec.ts`) performs a basic upload/download round-trip. This plan extends coverage to: + +- **Upload flow**: File selection (picker + drag-drop), validation, progress, cancellation, link sharing, error handling +- **Download flow**: Invalid link handling, download button, progress, file save, error states +- **Edge cases**: Boundary file sizes, special characters, network failures, multi-chunk files with redirect, UI information display + +**Key constraints**: +- Tests run against a local XFTP server (started via `globalSetup.ts`) +- Server port is dynamic (read from `/tmp/xftp-test-server.port`) +- Browser uses `--ignore-certificate-errors` for self-signed certs +- OPFS and Web Workers are required (Chromium supports both) + +**Test file location**: `/code/simplexmq/xftp-web/test/page.spec.ts` + +**Architecture**: Tests use the Page Object Model pattern to encapsulate UI interactions, making tests read as domain-specific scenarios rather than raw Playwright API calls. + +--- + +## 2. Test Infrastructure + +### 2.1 Current Setup + +``` +xftp-web/ +├── playwright.config.ts # Playwright config (webServer, globalSetup) +├── test/ +│ ├── globalSetup.ts # Starts xftp-server, writes port to PORT_FILE +│ ├── page.spec.ts # E2E tests (to be extended) +│ └── pages/ # Page Objects (new) +│ ├── UploadPage.ts +│ └── DownloadPage.ts +``` + +### 2.2 Prerequisites + +- `globalSetup.ts` starts the XFTP server and writes port to `PORT_FILE` +- Tests must read the port dynamically: `readFileSync(PORT_FILE, 'utf-8').trim()` +- Vite builds and serves the page at `http://localhost:4173` + +--- + +## 3. Test Infrastructure - Page Objects + +Page Objects encapsulate page-specific selectors and actions, providing a clean API for tests. This follows the standard Page Object Model pattern used in simplex-chat and most professional test suites. + +### 3.1 UploadPage + +```typescript +// test/pages/UploadPage.ts +import {Page, Locator, expect} from '@playwright/test' + +export class UploadPage { + readonly page: Page + readonly dropZone: Locator + readonly fileInput: Locator + readonly progressStage: Locator + readonly progressCanvas: Locator + readonly statusText: Locator + readonly cancelButton: Locator + readonly completeStage: Locator + readonly shareLink: Locator + readonly copyButton: Locator + readonly errorStage: Locator + readonly errorMessage: Locator + readonly retryButton: Locator + readonly expiryNote: Locator + readonly securityNote: Locator + + constructor(page: Page) { + this.page = page + this.dropZone = page.locator('#drop-zone') + this.fileInput = page.locator('#file-input') + this.progressStage = page.locator('#upload-progress') + this.progressCanvas = page.locator('#progress-container canvas') + this.statusText = page.locator('#upload-status') + this.cancelButton = page.locator('#cancel-btn') + this.completeStage = page.locator('#upload-complete') + this.shareLink = page.locator('[data-testid="share-link"]') + this.copyButton = page.locator('#copy-btn') + this.errorStage = page.locator('#upload-error') + this.errorMessage = page.locator('#error-msg') + this.retryButton = page.locator('#retry-btn') + this.expiryNote = page.locator('.expiry') + this.securityNote = page.locator('.security-note') + } + + async goto() { + await this.page.goto('http://localhost:4173') + } + + async selectFile(name: string, content: Buffer, mimeType = 'application/octet-stream') { + await this.fileInput.setInputFiles({name, mimeType, buffer: content}) + } + + async selectTextFile(name: string, content: string) { + await this.selectFile(name, Buffer.from(content, 'utf-8'), 'text/plain') + } + + async selectLargeFile(name: string, sizeBytes: number) { + // Create large file in browser to avoid memory issues in test process + await this.page.evaluate(({name, size}) => { + const input = document.getElementById('file-input') as HTMLInputElement + const buffer = new ArrayBuffer(size) + new Uint8Array(buffer).fill(0x55) + const file = new File([buffer], name, {type: 'application/octet-stream'}) + const dt = new DataTransfer() + dt.items.add(file) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + }, {name, size: sizeBytes}) + } + + async dragDropFile(name: string, content: Buffer) { + // Drag-drop uses same file input handler internally + await this.selectFile(name, content) + } + + async waitForEncrypting(timeout = 10_000) { + await expect(this.statusText).toContainText('Encrypting', {timeout}) + } + + async waitForUploading(timeout = 30_000) { + await expect(this.statusText).toContainText('Uploading', {timeout}) + } + + async waitForShareLink(timeout = 60_000): Promise { + await expect(this.shareLink).toBeVisible({timeout}) + return await this.shareLink.inputValue() + } + + async clickCopy() { + await this.copyButton.click() + await expect(this.copyButton).toContainText('Copied!') + } + + async clickCancel() { + await this.cancelButton.click() + } + + async clickRetry() { + await this.retryButton.click() + } + + async expectError(messagePattern: string | RegExp) { + await expect(this.errorStage).toBeVisible() + await expect(this.errorMessage).toContainText(messagePattern) + } + + async expectDropZoneVisible() { + await expect(this.dropZone).toBeVisible() + } + + async expectProgressVisible() { + await expect(this.progressStage).toBeVisible() + await expect(this.progressCanvas).toBeVisible() + } + + async expectCompleteWithExpiry() { + await expect(this.completeStage).toBeVisible() + await expect(this.expiryNote).toContainText('48 hours') + } + + async expectSecurityNote() { + await expect(this.securityNote).toBeVisible() + await expect(this.securityNote).toContainText('encrypted') + } + + getHashFromLink(url: string): string { + return new URL(url).hash + } +} +``` + +### 3.2 DownloadPage + +```typescript +// test/pages/DownloadPage.ts +import {Page, Locator, expect, Download} from '@playwright/test' + +export class DownloadPage { + readonly page: Page + readonly readyStage: Locator + readonly downloadButton: Locator + readonly progressStage: Locator + readonly progressCanvas: Locator + readonly statusText: Locator + readonly errorStage: Locator + readonly errorMessage: Locator + readonly retryButton: Locator + readonly securityNote: Locator + + constructor(page: Page) { + this.page = page + this.readyStage = page.locator('#dl-ready') + this.downloadButton = page.locator('#dl-btn') + this.progressStage = page.locator('#dl-progress') + this.progressCanvas = page.locator('#dl-progress-container canvas') + this.statusText = page.locator('#dl-status') + this.errorStage = page.locator('#dl-error') + this.errorMessage = page.locator('#dl-error-msg') + this.retryButton = page.locator('#dl-retry-btn') + this.securityNote = page.locator('.security-note') + } + + async goto(hash: string) { + await this.page.goto(`http://localhost:4173${hash}`) + } + + async gotoWithLink(fullUrl: string) { + const hash = new URL(fullUrl).hash + await this.goto(hash) + } + + async expectFileReady() { + await expect(this.readyStage).toBeVisible() + await expect(this.downloadButton).toBeVisible() + } + + async expectFileSizeDisplayed() { + await expect(this.readyStage).toContainText(/\d+(?:\.\d+)?\s*(?:KB|MB|B)/) + } + + async clickDownload(): Promise { + const downloadPromise = this.page.waitForEvent('download') + await this.downloadButton.click() + return downloadPromise + } + + async waitForDownloading(timeout = 30_000) { + await expect(this.statusText).toContainText('Downloading', {timeout}) + } + + async waitForDecrypting(timeout = 30_000) { + await expect(this.statusText).toContainText('Decrypting', {timeout}) + } + + async expectProgressVisible() { + await expect(this.progressStage).toBeVisible() + await expect(this.progressCanvas).toBeVisible() + } + + async expectInitialError(messagePattern: string | RegExp) { + // For malformed links - error shown in card without #dl-error stage + await expect(this.page.locator('.card .error')).toBeVisible() + await expect(this.page.locator('.card .error')).toContainText(messagePattern) + } + + async expectRuntimeError(messagePattern: string | RegExp) { + // For runtime download errors - uses #dl-error stage + await expect(this.errorStage).toBeVisible() + await expect(this.errorMessage).toContainText(messagePattern) + } + + async expectSecurityNote() { + await expect(this.securityNote).toBeVisible() + await expect(this.securityNote).toContainText('encrypted') + } +} +``` + +### 3.3 Test Fixtures + +```typescript +// test/fixtures.ts +import {test as base} from '@playwright/test' +import {UploadPage} from './pages/UploadPage' +import {DownloadPage} from './pages/DownloadPage' +import {readFileSync} from 'fs' + +// Extend Playwright test with page objects +export const test = base.extend<{ + uploadPage: UploadPage + downloadPage: DownloadPage +}>({ + uploadPage: async ({page}, use) => { + const uploadPage = new UploadPage(page) + await uploadPage.goto() + await use(uploadPage) + }, + downloadPage: async ({page}, use) => { + await use(new DownloadPage(page)) + }, +}) + +export {expect} from '@playwright/test' + +// Test data helpers +export function createTestContent(size: number, fill = 0x41): Buffer { + return Buffer.alloc(size, fill) +} + +export function createTextContent(text: string): Buffer { + return Buffer.from(text, 'utf-8') +} + +export function uniqueFileName(base: string, ext = 'txt'): string { + return `${base}-${Date.now()}.${ext}` +} +``` + +--- + +## 4. Upload Flow Tests + +### 4.1 File Selection - File Picker Button + +**Test ID**: `upload-file-picker` + +```typescript +test('upload via file picker button', async ({uploadPage}) => { + await uploadPage.expectDropZoneVisible() + + await uploadPage.selectTextFile('picker-test.txt', 'test content ' + Date.now()) + await uploadPage.waitForEncrypting() + await uploadPage.waitForUploading() + + const link = await uploadPage.waitForShareLink() + expect(link).toMatch(/^http:\/\/localhost:\d+\/#/) +}) +``` + +### 4.2 File Selection - Drag and Drop + +**Test ID**: `upload-drag-drop` + +```typescript +test('upload via drag and drop', async ({uploadPage}) => { + await uploadPage.dragDropFile('dragdrop-test.txt', createTextContent('drag drop test')) + await uploadPage.expectProgressVisible() + + const link = await uploadPage.waitForShareLink() + expect(link).toContain('#') +}) +``` + +### 4.3 File Size Validation - Too Large + +**Test ID**: `upload-file-too-large` + +```typescript +test('upload rejects file over 100MB', async ({uploadPage}) => { + await uploadPage.selectLargeFile('large.bin', 100 * 1024 * 1024 + 1) + await uploadPage.expectError('too large') + await uploadPage.expectError('100 MB') +}) +``` + +### 4.4 File Size Validation - Empty File + +**Test ID**: `upload-file-empty` + +```typescript +test('upload rejects empty file', async ({uploadPage}) => { + await uploadPage.selectFile('empty.txt', Buffer.alloc(0)) + await uploadPage.expectError('empty') +}) +``` + +### 4.5 Progress Display + +**Test ID**: `upload-progress-display` + +```typescript +test('upload shows progress during encryption and upload', async ({uploadPage}) => { + await uploadPage.selectFile('progress-test.bin', createTestContent(500 * 1024)) + + await uploadPage.expectProgressVisible() + await uploadPage.waitForEncrypting() + await uploadPage.waitForUploading() + await uploadPage.waitForShareLink() +}) +``` + +### 4.6 Cancel Button + +**Test ID**: `upload-cancel` + +```typescript +test('cancel button aborts upload and returns to landing', async ({uploadPage}) => { + await uploadPage.selectFile('cancel-test.bin', createTestContent(1024 * 1024)) + await uploadPage.expectProgressVisible() + + await uploadPage.clickCancel() + + await uploadPage.expectDropZoneVisible() + await expect(uploadPage.shareLink).toBeHidden() +}) +``` + +### 4.7 Share Link Display and Copy + +**Test ID**: `upload-share-link-copy` + +```typescript +test('share link copy button works', async ({uploadPage, context}) => { + await context.grantPermissions(['clipboard-read', 'clipboard-write']) + + await uploadPage.selectTextFile('copy-test.txt', 'copy test content') + const link = await uploadPage.waitForShareLink() + + await uploadPage.clickCopy() + + // Verify clipboard (may fail in headless) + try { + const clipboardText = await uploadPage.page.evaluate(() => navigator.clipboard.readText()) + expect(clipboardText).toBe(link) + } catch { + // Clipboard API may not be available + } +}) +``` + +### 4.8 Error Handling and Retry + +**Test ID**: `upload-error-retry` + +```typescript +test('error state shows retry button', async ({uploadPage}) => { + await uploadPage.selectFile('error-test.txt', Buffer.alloc(0)) + await uploadPage.expectError('empty') + await expect(uploadPage.retryButton).toBeVisible() +}) +``` + +--- + +## 5. Download Flow Tests + +### 5.1 Invalid Link Handling - Malformed Hash + +**Test ID**: `download-invalid-hash-malformed` + +```typescript +test('download shows error for malformed hash', async ({downloadPage}) => { + await downloadPage.goto('#not-valid-base64!!!') + await downloadPage.expectInitialError(/[Ii]nvalid|corrupted/) + await expect(downloadPage.downloadButton).not.toBeVisible() +}) +``` + +### 5.2 Invalid Link Handling - Valid Base64 but Invalid Structure + +**Test ID**: `download-invalid-hash-structure` + +```typescript +test('download shows error for invalid structure', async ({downloadPage}) => { + await downloadPage.goto('#AAAA') + await downloadPage.expectInitialError(/[Ii]nvalid|corrupted/) +}) +``` + +### 5.3 Download Button Click + +**Test ID**: `download-button-click` + +```typescript +test('download button initiates download', async ({uploadPage, downloadPage}) => { + // Upload first + await uploadPage.selectTextFile('dl-btn-test.txt', 'download test content') + const link = await uploadPage.waitForShareLink() + + // Navigate to download + await downloadPage.gotoWithLink(link) + await downloadPage.expectFileReady() + + // Click download + const download = await downloadPage.clickDownload() + expect(download.suggestedFilename()).toBe('dl-btn-test.txt') +}) +``` + +### 5.4 Progress Display + +**Test ID**: `download-progress-display` + +```typescript +test('download shows progress', async ({uploadPage, downloadPage}) => { + await uploadPage.selectFile('dl-progress.bin', createTestContent(500 * 1024)) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const downloadPromise = downloadPage.clickDownload() + + await downloadPage.expectProgressVisible() + await downloadPage.waitForDownloading() + + await downloadPromise +}) +``` + +### 5.5 File Save Verification + +**Test ID**: `download-file-save` + +```typescript +test('downloaded file content matches upload', async ({uploadPage, downloadPage}) => { + const content = 'verification content ' + Date.now() + const fileName = 'verify.txt' + + await uploadPage.selectTextFile(fileName, content) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe(fileName) + + const path = await download.path() + if (path) { + const downloadedContent = (await import('fs')).readFileSync(path, 'utf-8') + expect(downloadedContent).toBe(content) + } +}) +``` + +--- + +## 6. Edge Cases + +### 6.1 Very Small Files + +**Test ID**: `edge-small-file` + +```typescript +test('upload and download 1-byte file', async ({uploadPage, downloadPage}) => { + await uploadPage.selectFile('tiny.bin', Buffer.from([0x42])) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe('tiny.bin') + + const path = await download.path() + if (path) { + const content = (await import('fs')).readFileSync(path) + expect(content.length).toBe(1) + expect(content[0]).toBe(0x42) + } +}) +``` + +### 6.2 Files Near 100MB Limit + +**Test ID**: `edge-near-limit` + +```typescript +test.slow() +test('upload file at exactly 100MB', async ({uploadPage}) => { + await uploadPage.selectLargeFile('exactly-100mb.bin', 100 * 1024 * 1024) + + // Should succeed (not show error) + await expect(uploadPage.errorStage).toBeHidden({timeout: 5000}) + await uploadPage.expectProgressVisible() + + // Wait for completion (may take a while) + await uploadPage.waitForShareLink(300_000) +}) +``` + +### 6.3 Special Characters in Filename + +**Test ID**: `edge-special-chars-filename` + +```typescript +test('upload and download file with unicode filename', async ({uploadPage, downloadPage}) => { + const fileName = 'test-\u4e2d\u6587-\u0420\u0443\u0441\u0441\u043a\u0438\u0439.txt' + + await uploadPage.selectTextFile(fileName, 'unicode filename test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe(fileName) +}) + +test('upload and download file with spaces', async ({uploadPage, downloadPage}) => { + const fileName = 'my document (final) v2.txt' + + await uploadPage.selectTextFile(fileName, 'spaces test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe(fileName) +}) + +test('filename with path separators is sanitized', async ({uploadPage, downloadPage}) => { + await uploadPage.selectTextFile('../../../etc/passwd', 'path traversal test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).not.toContain('/') + expect(download.suggestedFilename()).not.toContain('\\') +}) +``` + +### 6.4 Network Errors (Mocked) + +**Test ID**: `edge-network-error` + +```typescript +test('upload handles network error gracefully', async ({uploadPage}) => { + // Intercept and abort POST requests + await uploadPage.page.route('**/localhost:*', route => { + if (route.request().method() === 'POST') { + route.abort('failed') + } else { + route.continue() + } + }) + + await uploadPage.selectTextFile('network-error.txt', 'network error test') + await uploadPage.expectError(/.+/) // Any error message +}) +``` + +### 6.5 Binary File Content Integrity + +**Test ID**: `edge-binary-content` + +```typescript +test('binary file with all byte values', async ({uploadPage, downloadPage}) => { + // Create buffer with all 256 byte values + const buffer = Buffer.alloc(256) + for (let i = 0; i < 256; i++) buffer[i] = i + + await uploadPage.selectFile('all-bytes.bin', buffer) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + const path = await download.path() + if (path) { + const content = (await import('fs')).readFileSync(path) + expect(content.length).toBe(256) + for (let i = 0; i < 256; i++) { + expect(content[i]).toBe(i) + } + } +}) +``` + +### 6.6 Multiple Concurrent Downloads + +**Test ID**: `edge-concurrent-downloads` + +```typescript +test('concurrent downloads from same link', async ({browser}) => { + const context = await browser.newContext({ignoreHTTPSErrors: true}) + const page1 = await context.newPage() + const upload = new UploadPage(page1) + + await upload.goto() + await upload.selectTextFile('concurrent.txt', 'concurrent download test') + const link = await upload.waitForShareLink() + const hash = upload.getHashFromLink(link) + + // Open two tabs and download concurrently + const page2 = await context.newPage() + const page3 = await context.newPage() + const dl2 = new DownloadPage(page2) + const dl3 = new DownloadPage(page3) + + await dl2.goto(hash) + await dl3.goto(hash) + + const [download2, download3] = await Promise.all([ + dl2.clickDownload(), + dl3.clickDownload() + ]) + + expect(download2.suggestedFilename()).toBe('concurrent.txt') + expect(download3.suggestedFilename()).toBe('concurrent.txt') + + await context.close() +}) +``` + +### 6.7 Redirect File Handling (Multi-chunk) + +**Test ID**: `edge-redirect-file` + +```typescript +test.slow() +test('upload and download multi-chunk file with redirect', async ({uploadPage, downloadPage}) => { + // Use ~5MB file to get multiple chunks + await uploadPage.selectLargeFile('multi-chunk.bin', 5 * 1024 * 1024) + const link = await uploadPage.waitForShareLink(120_000) + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe('multi-chunk.bin') + + const path = await download.path() + if (path) { + const stat = (await import('fs')).statSync(path) + expect(stat.size).toBe(5 * 1024 * 1024) + } +}) +``` + +### 6.8 UI Information Display + +**Test ID**: `edge-ui-info` + +```typescript +test('upload complete shows expiry and security note', async ({uploadPage}) => { + await uploadPage.selectTextFile('ui-test.txt', 'ui test') + await uploadPage.waitForShareLink() + + await uploadPage.expectCompleteWithExpiry() + await uploadPage.expectSecurityNote() +}) + +test('download page shows file size and security note', async ({uploadPage, downloadPage}) => { + await uploadPage.selectFile('size-test.bin', createTestContent(1024)) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + await downloadPage.expectFileSizeDisplayed() + await downloadPage.expectSecurityNote() +}) +``` + +--- + +## 7. Implementation Order + +### Phase 1: Core Infrastructure (Priority: High) +1. Create `test/pages/UploadPage.ts` with Page Object +2. Create `test/pages/DownloadPage.ts` with Page Object +3. Create `test/fixtures.ts` with extended test function +4. Refactor existing test to use Page Objects + +### Phase 2: Core Happy Path (Priority: High) +5. `upload-file-picker` - Basic upload via file picker +6. `download-button-click` - Basic download +7. `download-file-save` - Content verification + +### Phase 3: Validation (Priority: High) +8. `upload-file-too-large` - Size validation +9. `upload-file-empty` - Empty file validation +10. `download-invalid-hash-malformed` - Invalid link handling +11. `download-invalid-hash-structure` - Invalid structure handling + +### Phase 4: Progress and Cancel (Priority: Medium) +12. `upload-progress-display` - Progress visibility +13. `upload-cancel` - Cancel functionality +14. `download-progress-display` - Download progress + +### Phase 5: Link Sharing (Priority: Medium) +15. `upload-share-link-copy` - Copy button functionality +16. `upload-drag-drop` - Drag-drop upload + +### Phase 6: Edge Cases (Priority: Low) +17. `edge-small-file` - 1-byte file +18. `edge-special-chars-filename` - Unicode/special characters +19. `edge-binary-content` - Binary content integrity +20. `edge-near-limit` - 100MB file (slow test) +21. `edge-network-error` - Network error handling + +### Phase 7: Error Recovery and Advanced (Priority: Low) +22. `upload-error-retry` - Retry after error +23. `edge-concurrent-downloads` - Concurrent access +24. `edge-redirect-file` - Multi-chunk file with redirect (slow) +25. `edge-ui-info` - Expiry message, security notes + +--- + +## 8. Test Utilities + +### 8.1 Shared Test Setup + +```typescript +// test/page.spec.ts +import {test, expect, createTestContent, createTextContent, uniqueFileName} from './fixtures' + +test.describe('Upload Flow', () => { + test('upload via file picker', async ({uploadPage}) => { + // Tests use uploadPage fixture which navigates automatically + }) +}) + +test.describe('Download Flow', () => { + test('download works', async ({uploadPage, downloadPage}) => { + // Both pages available via fixtures + }) +}) + +test.describe('Edge Cases', () => { + // Edge case tests +}) +``` + +### 8.2 File Structure + +``` +xftp-web/test/ +├── fixtures.ts # Playwright fixtures with page objects +├── pages/ +│ ├── UploadPage.ts # Upload page object +│ └── DownloadPage.ts # Download page object +├── page.spec.ts # All E2E tests +└── globalSetup.ts # Server startup (existing) +``` + +--- + +## Appendix: Test Matrix + +| Test ID | Category | Priority | Estimated Time | Dependencies | +|---------|----------|----------|----------------|--------------| +| upload-file-picker | Upload | High | 30s | - | +| upload-drag-drop | Upload | Medium | 30s | - | +| upload-file-too-large | Upload | High | 5s | - | +| upload-file-empty | Upload | High | 5s | - | +| upload-progress-display | Upload | Medium | 45s | - | +| upload-cancel | Upload | Medium | 30s | - | +| upload-share-link-copy | Upload | Medium | 30s | - | +| upload-error-retry | Upload | Low | 30s | - | +| download-invalid-hash-malformed | Download | High | 5s | - | +| download-invalid-hash-structure | Download | High | 5s | - | +| download-button-click | Download | High | 45s | upload | +| download-progress-display | Download | Medium | 60s | upload | +| download-file-save | Download | High | 45s | upload | +| edge-small-file | Edge | Low | 30s | - | +| edge-near-limit | Edge | Low | 300s | - | +| edge-special-chars-filename | Edge | Low | 30s | - | +| edge-network-error | Edge | Low | 45s | - | +| edge-binary-content | Edge | Low | 30s | - | +| edge-concurrent-downloads | Edge | Low | 60s | upload | +| edge-redirect-file | Edge | Low | 120s | - | +| edge-ui-info | Edge | Low | 60s | upload | + +**Total estimated time**: ~18 minutes (excluding 100MB and 5MB tests) diff --git a/rfcs/2026-01-30-send-file-page/2026-02-08-xftp-web-hello-header.md b/rfcs/2026-01-30-send-file-page/2026-02-08-xftp-web-hello-header.md new file mode 100644 index 0000000000..c46f38a46f --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-08-xftp-web-hello-header.md @@ -0,0 +1,221 @@ +# XFTP Web Hello Header — Session Re-handshake for Browser Connection Reuse + +## 1. Problem Statement + +Browser HTTP/2 connection pooling reuses TLS connections across page navigations (same origin = same connection pool). The XFTP server maintains per-TLS-connection session state in `TMap SessionId Handshake` keyed by `tlsUniq tls`. When a browser navigates from the upload page to the download page (or reloads), the new page sends a fresh ClientHello on the reused HTTP/2 connection. The server is already in `HandshakeAccepted` state for that connection, so it routes the request to `processRequest`, which expects a 16384-byte command block but receives a 34-byte ClientHello → `ERR BLOCK`. + +**Root cause**: The server cannot distinguish a ClientHello from a command on an already-handshaked connection because both arrive on the same HTTP/2 connection (same `tlsUniq`), and there is no content-level discriminator (ClientHello is unpadded, but the server never gets to parse it — the size check in `processRequest` rejects it first). + +**Browser limitation**: `fetch()` provides zero control over HTTP/2 connection pooling. There is no browser API to force a new connection or detect connection reuse before a request is sent. + +## 2. Solution Summary + +Add an HTTP header `xftp-web-hello` to web ClientHello requests. When the server sees this header on an already-handshaked connection (`HandshakeAccepted` state), it re-runs `processHello` **reusing the existing session keys** (same X25519 key pair from the original handshake). The client then completes the normal handshake flow (sends ClientHandshake, receives ack) and proceeds with commands. + +Key properties: +- Server reuses existing `serverPrivKey` — no new key material generated on re-handshake, so `thAuth` remains consistent with any in-flight commands on concurrent HTTP/2 streams. +- Header is only checked when `sniUsed` is true (web/browser connections). Native XFTP clients are unaffected. +- CORS preflight already allows all headers (`Access-Control-Allow-Headers: *`). +- Web clients always send this header on ClientHello — it's harmless on first connection (`Nothing` state) and enables re-handshake on reused connections (`HandshakeAccepted` state). + +## 3. Detailed Technical Design + +### 3.1 Server change: parameterize `processHello` (`src/Simplex/FileTransfer/Server.hs`) + +The entire server change is parameterizing the existing `processHello` with `Maybe C.PrivateKeyX25519`. Zero new functions. + +#### Current code (lines 165-191): + +```haskell +xftpServerHandshakeV1 chain serverSignKey sessions + XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do + s <- atomically $ TM.lookup sessionId sessions + r <- runExceptT $ case s of + Nothing -> processHello + Just (HandshakeSent pk) -> processClientHandshake pk + Just (HandshakeAccepted thParams) -> pure $ Just thParams + either sendError pure r + where + processHello = do + challenge_ <- + if + | B.null bodyHead -> pure Nothing + | sniUsed -> do + XFTPClientHello {webChallenge} <- liftHS $ smpDecode bodyHead + pure webChallenge + | otherwise -> throwE HANDSHAKE + (k, pk) <- atomically . C.generateKeyPair =<< asks random + atomically $ TM.insert sessionId (HandshakeSent pk) sessions + -- ...build and send ServerHandshake... + pure Nothing +``` + +#### After (diff is ~10 lines): + +```haskell +xftpServerHandshakeV1 chain serverSignKey sessions + XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, request, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do +-- ^^^^^^^ bind request + s <- atomically $ TM.lookup sessionId sessions + r <- runExceptT $ case s of + Nothing -> processHello Nothing + Just (HandshakeSent pk) -> processClientHandshake pk + Just (HandshakeAccepted thParams) + | webHello -> processHello (serverPrivKey <$> thAuth thParams) + | otherwise -> pure $ Just thParams + either sendError pure r + where + webHello = sniUsed && any (\(t, _) -> tokenKey t == "xftp-web-hello") (fst $ H.requestHeaders request) + processHello pk_ = do + challenge_ <- + if + | B.null bodyHead -> pure Nothing + | sniUsed -> do + XFTPClientHello {webChallenge} <- liftHS $ smpDecode bodyHead + pure webChallenge + | otherwise -> throwE HANDSHAKE + (k, pk) <- maybe + (atomically . C.generateKeyPair =<< asks random) + (\pk -> pure (C.publicKey pk, pk)) + pk_ + atomically $ TM.insert sessionId (HandshakeSent pk) sessions + -- ...rest unchanged... + pure Nothing +``` + +#### What changes: + +1. **Bind `request`** in the `XFTPTransportRequest` pattern (+1 field) +2. **Add `webHello`** binding in `where` clause (1 line) — checks header only when `sniUsed` +3. **Add `pk_` parameter** to `processHello` (change signature) +4. **Replace key generation** with `maybe` that generates fresh keys when `pk_ = Nothing`, or derives public from existing private when `pk_ = Just pk` (3 lines replace 1 line) +5. **Add guard** in `HandshakeAccepted` branch (2 lines replace 1 line) +6. **Call site** `Nothing -> processHello Nothing` (+1 word) +7. **One import** added: `Network.HPACK.Token (tokenKey)` + +#### Imports to add: + +```haskell +import Network.HPACK.Token (tokenKey) +``` + +`OverloadedStrings` (already enabled in Server.hs) provides the `IsString` instance for `CI ByteString`, so `tokenKey t == "xftp-web-hello"` works without importing `Data.CaseInsensitive`. Verified on Hackage: `requestHeaders :: Request -> HeaderTable`, `tokenKey :: Token -> CI ByteString`. + +### 3.2 Re-handshake flow + +When `webHello` is true in `HandshakeAccepted` state: + +1. `processHello (serverPrivKey <$> thAuth thParams)` is called with `Just pk` (existing private key) +2. `(k, pk) <- pure (C.publicKey pk, pk)` — reuses same key pair, no generation +3. `TM.insert sessionId (HandshakeSent pk) sessions` — transitions state back to `HandshakeSent` with same `pk` +4. Server sends `ServerHandshake` response (same format as initial handshake) +5. Client sends `ClientHandshake` on next stream → enters `Just (HandshakeSent pk) -> processClientHandshake pk` → normal flow +6. `processClientHandshake` stores `HandshakeAccepted thParams` with same `serverPrivKey = pk` + +### 3.3 Web client change (`xftp-web/src/client.ts`) + +Add optional `headers?` parameter to `Transport.post()`, thread it through `fetch()` and `session.request()`, and pass `{"xftp-web-hello": "1"}` in the ClientHello call in `connectXFTP`. + +### 3.4 What does NOT change + +- **CORS**: Already has `Access-Control-Allow-Headers: *` (Server.hs:106). +- **Native Haskell client**: Uses `[]` headers. No header = existing behavior. +- **Protocol wire format**: ClientHello, ServerHandshake, ClientHandshake, commands — all unchanged. +- **`processRequest`**, **`processClientHandshake`**, **`sendError`**, **`encodeXftp`** — unchanged. + +### 3.5 Haskell test (`tests/XFTPServerTests.hs`) + +Add `testWebReHandshake` next to the existing `testWebHandshake` (line 504). It reuses the same SNI + HTTP/2 setup pattern, performs a full handshake, then sends a second ClientHello with the `xftp-web-hello` header on the same connection and verifies the server responds with a valid ServerHandshake (same `sessionId`), then completes the second handshake. + +```haskell +-- Register in xftpServerTests (after line 86): +it "should re-handshake on same connection with xftp-web-hello header" testWebReHandshake + +-- Test (after testWebHandshake): +testWebReHandshake :: Expectation +testWebReHandshake = + withXFTPServerSNI $ \_ -> do + Fingerprint fp <- loadFileFingerprint "tests/fixtures/ca.crt" + let keyHash = C.KeyHash fp + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just keyHash) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + g <- C.newRandom + -- First handshake (same as testWebHandshake) + challenge1 <- atomically $ C.randomBytes 32 g + let helloReq1 = H2.requestBuilder "POST" "/" [] $ byteString (smpEncode (XFTPClientHello {webChallenge = Just challenge1})) + resp1 <- either (error . show) pure =<< HC.sendRequest h2 helloReq1 (Just 5000000) + shs1 <- either error pure $ smpDecode =<< C.unPad (bodyHead (HC.respBody resp1)) + let XFTPServerHandshake {sessionId = sid1} = shs1 + clientHsPadded <- either (error . show) pure $ C.pad (smpEncode (XFTPClientHandshake {xftpVersion = VersionXFTP 1, keyHash})) xftpBlockSize + resp1b <- either (error . show) pure =<< HC.sendRequest h2 (H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded) (Just 5000000) + B.length (bodyHead (HC.respBody resp1b)) `shouldBe` 0 + -- Second handshake on same connection with xftp-web-hello header + challenge2 <- atomically $ C.randomBytes 32 g + let helloReq2 = H2.requestBuilder "POST" "/" [("xftp-web-hello", "1")] $ byteString (smpEncode (XFTPClientHello {webChallenge = Just challenge2})) + resp2 <- either (error . show) pure =<< HC.sendRequest h2 helloReq2 (Just 5000000) + shs2 <- either error pure $ smpDecode =<< C.unPad (bodyHead (HC.respBody resp2)) + let XFTPServerHandshake {sessionId = sid2} = shs2 + sid2 `shouldBe` sid1 -- same TLS connection → same sessionId + -- Complete second handshake + resp2b <- either (error . show) pure =<< HC.sendRequest h2 (H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded) (Just 5000000) + B.length (bodyHead (HC.respBody resp2b)) `shouldBe` 0 +``` + +The only difference from `testWebHandshake`: the second `helloReq2` passes `[("xftp-web-hello", "1")]` instead of `[]`. The test verifies: +1. Server responds with `ServerHandshake` (not `ERR BLOCK`) +2. Same `sessionId` (same TLS connection) +3. Second `ClientHandshake` completes with empty ACK + +## 4. Implementation Plan + +### Step 1: Server — parameterize `processHello` + +Apply the diff from Section 3.1 to `src/Simplex/FileTransfer/Server.hs`. + +### Step 2: Test — add `testWebReHandshake` + +Add the test from Section 3.5 to `tests/XFTPServerTests.hs`. + +### Step 3: Client — add `xftp-web-hello` header + +Add optional `headers?` to `Transport.post()`, pass `{"xftp-web-hello": "1"}` on ClientHello in `connectXFTP`. + +### Step 4: Test + +Run Haskell tests (`cabal test`) and E2E Playwright tests (`npx playwright test` in `xftp-web/`). + +## 5. Race Condition Analysis + +### Single-tab navigation (the common case) + +1. Upload page completes, all fetch() requests finish +2. Browser navigates to download page (or reloads) +3. All upload-page fetches are aborted on page unload +4. Download page sends ClientHello with `xftp-web-hello` header +5. Server is in `HandshakeAccepted` → `processHello (Just pk)` → `HandshakeSent pk` (same key) +6. No concurrent streams → no race + +**Safe.** + +### Multi-tab (edge case) + +Tab A (upload) and Tab B (download) share the same HTTP/2 connection. + +1. Tab A has active command streams (e.g., FPUT upload in progress) +2. Tab B sends ClientHello with header +3. Server reads `HandshakeAccepted` atomically for both streams +4. Tab A's stream already has its `thParams` snapshot → proceeds with `processRequest` using old `thParams` +5. Tab B's stream triggers `processHello (Just pk)` → stores `HandshakeSent pk` (same pk!) +6. Tab A's in-progress FPUT continues with snapshot `thParams` → completes normally (same `serverPrivKey`) +7. Tab A's NEXT command reads `HandshakeSent` from TMap → enters `processClientHandshake` → fails (command body ≠ ClientHandshake format) → HANDSHAKE error + +**Tab A's in-flight commands succeed. Tab A's subsequent commands fail with HANDSHAKE error.** This is the inherent multi-tab problem — unavoidable with per-connection session state and HTTP/2 connection sharing. The failure is clean (HANDSHAKE error, not silent corruption). + +## 6. Security Considerations + +- **No new key material**: Re-handshake reuses existing `serverPrivKey`. No opportunity for key confusion or downgrade. +- **Identity re-verification**: Server re-signs the web challenge with its long-term signing key. Client verifies identity again. +- **Header cannot escalate privileges**: The header only triggers re-handshake (which the server was already capable of doing on first connection). It does not bypass any authentication. +- **Timing**: Re-handshake takes the same code path as initial handshake, so timing side-channels are unchanged. diff --git a/rfcs/2026-01-30-send-file-page/2026-02-11-xftp-web-error-handling.md b/rfcs/2026-01-30-send-file-page/2026-02-11-xftp-web-error-handling.md new file mode 100644 index 0000000000..2802f16a59 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-11-xftp-web-error-handling.md @@ -0,0 +1,948 @@ +# XFTP Web Error Handling and Connection Resilience + +## 1. Problem Statement + +The XFTP web client is fundamentally fragile: any transient error (browser opening a new HTTP/2 connection, network hiccup, server restart) causes an unrecoverable failure with a cryptic error message. There is no retry logic, no fetch timeout, no error categorization, and the upload uses a single server instead of distributing chunks across preset servers. This makes the app frustrating — it works most of the time but fails unpredictably, which is worse than being completely broken. + +### Confirmed root cause (from diagnostic logs) + +When the browser opens a new HTTP/2 connection mid-operation, the new connection has a different TLS SessionId with no handshake state in the server's `TMap SessionId Handshake`. The server's `Nothing` branch in `xftpServerHandshakeV1` (Server.hs:169) unconditionally calls `processHello`, which tries to decode the command body as `XFTPClientHello`, fails, and sends a raw padded "HANDSHAKE" error string. The client cannot parse this as a proper transmission (first byte 'H' = 72 is read as batch count), producing `"expected batch count 1, got 72"`. + +Server log confirming the SessionId change: +``` +DEBUG dispatch: Accepted+command sessId="ZSo1GGETgIvjbB7CWHbvGPpbMjx_b2IlC1eTI6aKfqc=" +...20 successful commands... +DEBUG dispatch: Nothing sessId="mJC7Sck9xxW5UsXoPGoUWduuHghSVgf6CnD6ZC6SBhU=" webHello=False +``` + +### Why re-handshake is required (cannot be made optional) + +1. **SessionId is baked into signed command data.** `encodeAuthTransmission` signs `concat(encode(sessionId), tInner)` with Ed25519. Server's `tDecodeServer` (Protocol.hs:2242) verifies `sessId == sessionId`. New connection = different sessionId = signature mismatch. +2. **Server generates per-session DH keys.** `processHello` creates fresh X25519 keypair stored in `HandshakeSent`. For SMP browser clients (future), `verifyCmdAuth` (Protocol.hs:1322) requires the matching `serverPrivKey` from `thAuth`. +3. **This applies to both XFTP and future SMP browser clients** — the session management approach is the same. + +### Why multiple preset servers cannot work + +Upload (`agent.ts:105-157`) takes a single `server: XFTPServer` parameter and uploads ALL chunks to it. `web/upload.ts:133` calls `pickRandomServer(servers)` which selects ONE random server from all presets. The multi-server preset configuration is pointless — only one server is ever used per upload. The design intent (RFC section 11.6: "upload in parallel to 8 randomly selected servers") is not implemented. This must be fixed in Phase 2 (section 3.7). + +## 2. Solution Summary + +### Phase 1: Error handling and connection resilience + +1. **Server: strict dispatch for allowed protocol combinations** — reject all invalid combinations +2. **Client: automatic retry with re-handshake** on SESSION/HANDSHAKE errors +3. **Client: fetch timeout** with configurable duration +4. **UI: error categorization and retry** — auto-retry temporary, human-readable permanent +5. **Client: connection state with Promise-based lock and per-server queues** — `ServerConnection` with `client: Promise` + `queue: Promise` +6. **Client: fix cache key** — include keyHash + +### Phase 2: Multi-server upload (after Phase 1) + +7. **Multi-server upload with server selection and failover** — distribute chunks across servers, retry FNEW on different server if one fails + +## 3. Detailed Technical Design + +### 3.1 Server: strict dispatch for allowed protocol combinations + +**Principle:** Everything not explicitly done by existing Haskell/TS clients is prohibited. It is better to fail on impossible combinations than to be permissive — permissiveness complicates debugging and creates attack vectors via unexpected behaviors. + +**Allowed behaviors by client type:** + +| Client | SNI | webHello header | Hello body | When | +|--------|-----|----------------|------------|------| +| Haskell | No | No | Empty | New connection only | +| Web | Yes | Yes | Non-empty (XFTPClientHello) | New OR existing connection | + +**Minimal surgical change.** The existing dispatch (Server.hs:169-189) already correctly handles `HandshakeSent` and `HandshakeAccepted` — their guards cover all valid and invalid combinations. The ONLY missing case is `Nothing` + web client sending a command on a stale session. + +`processHello` (Server.hs:194-217) already internally routes: `B.null bodyHead` → Haskell hello, `sniUsed` → web hello decode, else → HANDSHAKE. For stale web sessions, it currently tries to decode a command body as `XFTPClientHello`, fails, and throws HANDSHAKE. The fix: detect this case BEFORE calling processHello and throw SESSION instead, so the client knows to re-handshake (not that its hello was malformed). + +**Change: add one guard to `Nothing` branch, remove debug logging.** + +```haskell +-- Before (1 line): +Nothing -> processHello Nothing + +-- After (3 lines): +Nothing + | sniUsed && not webHello -> throwE SESSION -- web command on stale session + | otherwise -> processHello Nothing -- normal hello (web or Haskell) +``` + +`throwE SESSION` is caught by `either sendError pure r` (line 190). `sendError` pads `smpEncode SESSION` = `"SESSION"` (Transport.hs:298) to `xftpBlockSize`. The client's padded error detection (section 3.2) catches this as a retriable error and triggers re-handshake. SESSION is a valid `XFTPErrorType` constructor (Transport.hs:225) — no new helpers needed. + +**All other branches remain unchanged.** `HandshakeSent` guards (`webHello` → processHello, `otherwise` → processClientHandshake with body size check inside) are correct. `HandshakeAccepted` guards (`webHello`, `webHandshake`, `otherwise` → command) are correct. + +### 3.2 Client: automatic retry with re-handshake + +**Location:** `sendXFTPCommand` in `client.ts` + +**Design:** Retry loop inside `sendXFTPCommand`. Maximum 3 attempts. On retriable error, close old client, re-handshake, retry. + +**Error classification:** + +| Error | Type | Retriable? | Human-readable message | +|-------|------|-----------|----------------------| +| Padded "HANDSHAKE" | Temporary | Yes (auto) | "Connection interrupted, reconnecting..." | +| Padded "SESSION" | Temporary | Yes (auto) | "Session expired, reconnecting..." | +| `FRErr SESSION` | Temporary | Yes (auto) | "Session expired, reconnecting..." | +| `FRErr HANDSHAKE` | Temporary | Yes (auto) | "Connection interrupted, reconnecting..." | +| `fetch()` TypeError | Temporary | Yes (auto) | "Network error, retrying..." | +| AbortError (timeout) | Temporary | Yes (auto) | "Server timeout, retrying..." | +| `FRErr AUTH` | Permanent | No | "File is invalid, expired, or has been removed" | +| `FRErr NO_FILE` | Permanent | No | "File not found — it may have expired" | +| `FRErr SIZE` | Permanent | No | "File size exceeds server limit" | +| `FRErr QUOTA` | Permanent | No | "Server storage quota exceeded" | +| `FRErr BLOCKED` | Permanent | No | "File has been blocked by server" | +| `FRErr DIGEST` | Permanent | No | "File integrity check failed" | +| `FRErr INTERNAL` | Permanent | No | "Server internal error" | +| `CMD *` | Permanent | No | "Protocol error" | + +**Retry behavior:** +- Auto-retry up to 3 times for temporary errors, transparent to user +- After 3 failures: show human-readable error with diagnosis, offer manual retry button +- Permanent errors: show human-readable error immediately, NO manual retry button (user can reload page) + +**Implementation:** + +```typescript +async function sendXFTPCommand( + agent: XFTPClientAgent, + server: XFTPServer, + privateKey: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + chunkData?: Uint8Array, + maxRetries: number = 3 +): Promise<{response: FileResponse, body: Uint8Array}> { + let clientP = getXFTPServerClient(agent, server) + let client = await clientP + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + return await sendXFTPCommandOnce(client, privateKey, entityId, cmdBytes, chunkData) + } catch (e) { + if (!isRetriable(e)) { + // Permanent error (AUTH, NO_FILE, etc.) — connection is fine, don't touch it + throw categorizeError(e) + } + if (attempt === maxRetries) { + // Retriable error exhausted — connection is bad, remove stale promise + removeStaleConnection(agent, server, clientP) + throw categorizeError(e) + } + clientP = reconnectClient(agent, server) + client = await clientP + } + } + throw new Error("unreachable") +} +``` + +**`sendXFTPCommandOnce`** — renamed from current `sendXFTPCommand`. Two changes: + +1. **Padded error detection** (before `decodeTransmission`): + +```typescript +// After getting respBlock, before decodeTransmission: +const raw = blockUnpad(respBlock) +if (raw.length < 20) { + const text = new TextDecoder().decode(raw) + if (/^[A-Z_]+$/.test(text)) { + throw new XFTPRetriableError(text) // "HANDSHAKE" or "SESSION" + } +} +``` + +2. **FRErr classification** (replaces current unconditional throw): + +```typescript +// After decodeResponse, instead of throw new Error("Server error: " + err.type): +if (response.type === "FRErr") { + const err = response.err + if (err.type === "SESSION" || err.type === "HANDSHAKE") { + throw new XFTPRetriableError(err.type) + } + throw new XFTPPermanentError(err.type, humanReadableMessage(err)) +} +``` + +### 3.3 Client: fetch timeout + +**Location:** `createBrowserTransport` and `createNodeTransport` in `client.ts` + +**Design:** `AbortController` with configurable timeout on every `fetch()`. + +```typescript +interface TransportConfig { + timeoutMs: number // default 30000, lower for tests +} + +function createBrowserTransport(baseUrl: string, config: TransportConfig): Transport { + return { + async post(body: Uint8Array, headers?: Record): Promise { + const controller = new AbortController() + const timer = setTimeout(() => controller.abort(), config.timeoutMs) + try { + const resp = await fetch(effectiveUrl, { + method: "POST", headers, body, + signal: controller.signal + }) + if (!resp.ok) throw new Error(`Server request failed: ${resp.status}`) + return new Uint8Array(await resp.arrayBuffer()) + } finally { + clearTimeout(timer) + } + }, + close() {} + } +} +``` + +For Node.js transport, use `setTimeout` on the HTTP/2 request stream. + +Default: 30s for production, 5s for tests. Threaded through `connectXFTP` → `createTransport`. + +### 3.4 UI: error categorization and retry + +**Behavior (Option D):** + +- **Temporary errors:** Auto-retry loop (3 attempts). After 3 failures, show human-readable diagnosis with manual retry button. Diagnosis examples: "Server timeout — the server may be temporarily unavailable", "Connection interrupted — your network may be unstable". +- **Permanent errors:** Show human-readable error immediately, NO retry button. User can reload page if they want to retry. Examples: "File is invalid, expired, or has been removed" (AUTH), "File not found" (NO_FILE). + +**Current UI retry buttons:** +- `upload.ts:73-75` — retry calls `startUpload(pendingFile)` from scratch +- `download.ts:60` — retry calls `startDownload()` from scratch + +**Improvement:** Track uploaded/downloaded chunk indices. On manual retry, skip completed chunks: + +```typescript +// Upload: track which chunks completed +const completedChunks: Set = new Set() +for (let i = 0; i < specs.length; i++) { + if (completedChunks.has(i)) continue + // ... create + upload chunk + completedChunks.add(i) +} + +// Download: already naturally resumable — each chunk is independent +``` + +### 3.5 Client: connection state with Promise-based lock and per-server queues + +**Design:** Each server gets a `ServerConnection` record containing a `Promise` (the connection lock) and a `Promise` (the sequential command queue). The `XFTPClientAgent` maps server keys to these records. + +The promise IS the lock — every consumer awaits the same promise. When reconnect is needed, the promise is replaced atomically. + +```typescript +interface ServerConnection { + client: Promise // resolves to connected client; replaced on reconnect + queue: Promise // tail of sequential command chain +} + +interface XFTPClientAgent { + connections: Map +} + +function newXFTPAgent(): XFTPClientAgent { + return {connections: new Map()} +} +``` + +**Connection lifecycle — `getXFTPServerClient` and `reconnectClient`:** + +```typescript +function getXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): Promise { + const key = formatXFTPServer(server) + let conn = agent.connections.get(key) + if (!conn) { + const p = connectXFTP(server) + conn = {client: p, queue: Promise.resolve()} + agent.connections.set(key, conn) + // On connection failure, remove from map so next call retries + p.catch(() => { + const cur = agent.connections.get(key) + if (cur && cur.client === p) agent.connections.delete(key) + }) + } + return conn.client +} + +function reconnectClient(agent: XFTPClientAgent, server: XFTPServer): Promise { + const key = formatXFTPServer(server) + const old = agent.connections.get(key) + // Close old client (fire-and-forget) + old?.client.then(c => c.transport.close(), () => {}) + // Replace with new connection promise — all concurrent callers will await this + // Queue survives reconnect — pending operations stay ordered + const p = connectXFTP(server) + const conn: ServerConnection = {client: p, queue: old?.queue ?? Promise.resolve()} + agent.connections.set(key, conn) + p.catch(() => { + const cur = agent.connections.get(key) + if (cur && cur.client === p) agent.connections.delete(key) + }) + return p +} + +function closeXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): void { + const key = formatXFTPServer(server) + const conn = agent.connections.get(key) + if (conn) { + agent.connections.delete(key) + conn.client.then(c => c.transport.close(), () => {}) + } +} + +function closeXFTPAgent(agent: XFTPClientAgent): void { + for (const conn of agent.connections.values()) { + conn.client.then(c => c.transport.close(), () => {}) + } + agent.connections.clear() +} +``` + +**Precise semantics:** + +1. `getXFTPServerClient(agent, server)` — returns existing `conn.client` promise if present, otherwise creates a new `ServerConnection` with fresh connection and empty queue +2. When error detected, first caller calls `reconnectClient` which replaces `conn.client` with a new connection promise. The queue is preserved across reconnect. +3. All concurrent callers awaiting the OLD promise receive the error +4. They then call `getXFTPServerClient` which returns the NEW promise +5. If reconnection fails, auto-cleanup (`p.catch(() => delete)`) removes the entry so the next caller starts fresh + +**Stale error cleanup rule:** When a caller exhausts retries for a retriable error, it removes the failed entry from the map (only if no concurrent caller has already replaced it via `reconnectClient`). This prevents the next caller from receiving a stale rejected promise. Permanent errors (AUTH, NO_FILE, etc.) do NOT remove the connection — the transport is fine, only the command failed. + +```typescript +function removeStaleConnection( + agent: XFTPClientAgent, server: XFTPServer, failedP: Promise +): void { + const key = formatXFTPServer(server) + const conn = agent.connections.get(key) + // Only remove if current promise is the one that failed — not if already replaced by reconnect + if (conn && conn.client === failedP) { + agent.connections.delete(key) + failedP.then(c => c.transport.close(), () => {}) + } +} +``` + +**Per-server sequential queue:** `queue` is a `Promise` — the tail of the sequential operation chain. Each new operation `.then()`s onto it. It's `void` because callers hold their own typed promises; the queue only tracks completion order: + +```typescript +async function enqueueCommand( + agent: XFTPClientAgent, + server: XFTPServer, + fn: () => Promise // no client param — fn uses command wrappers (agent+server) +): Promise { + const key = formatXFTPServer(server) + // Ensure connection exists (with auto-cleanup on failure) + await getXFTPServerClient(agent, server) + const conn = agent.connections.get(key)! // guaranteed to exist after getXFTPServerClient + // Chain onto the queue — fn runs after previous operation completes + let resolve_: (v: T) => void, reject_: (e: any) => void + const result = new Promise((res, rej) => { resolve_ = res; reject_ = rej }) + conn.queue = conn.queue.then( + () => fn().then(resolve_!, reject_!), + () => fn().then(resolve_!, reject_!) + ).then(() => {}, () => {}) // swallow errors in the chain + return result +} +``` + +Commands to the same server execute one at a time via the queue. Commands to different servers execute concurrently because each has its own queue. `enqueueCommand` provides sequencing; `sendXFTPCommand` (called inside `fn` via command wrappers) provides retry. They compose as: `enqueueCommand` sequences calls to wrappers that internally use `sendXFTPCommand`. + +**Download change:** Group chunks by server, process each server's chunks sequentially, servers in parallel. Uses `for` loop for per-server sequencing (same pattern as Stage 2 upload). `enqueueCommand` is available for cases where different callers target the same server. + +```typescript +const byServer = new Map() +for (const chunk of resolvedFd.chunks) { + const srv = chunk.replicas[0]?.server ?? "" + if (!byServer.has(srv)) byServer.set(srv, []) + byServer.get(srv)!.push(chunk) +} +await Promise.all([...byServer.entries()].map(async ([srv, chunks]) => { + const server = parseXFTPServer(srv) + for (const chunk of chunks) { + const seed = decodePrivKeyEd25519(chunk.replicas[0].replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + const raw = await downloadXFTPChunkRaw(agent, server, kp.privateKey, chunk.replicas[0].replicaId) + await onRawChunk({chunkNo: chunk.chunkNo, dhSecret: raw.dhSecret, nonce: raw.nonce, body: raw.body, digest: chunk.digest}) + downloaded += chunk.chunkSize + onProgress?.(downloaded, resolvedFd.size) + } +})) +``` + +### 3.6 Fix cache key + +**Bug:** `getXFTPServerClient` (client.ts:110) uses `"https://" + server.host + ":" + server.port` as cache key, ignoring `keyHash`. Two servers with same host:port but different keyHash share a cached connection, bypassing identity verification. + +**Fix:** Use `formatXFTPServer(server)` as cache key (includes keyHash). Already available in `protocol/address.ts:52-54`. + +```typescript +// Before: +const key = "https://" + server.host + ":" + server.port + +// After: +const key = formatXFTPServer(server) +``` + +Note: With the redesign in 3.5, the cache key fix is inherent — the `connections` Map uses `formatXFTPServer(server)` everywhere. + +### 3.7 Phase 2: Multi-server upload with server selection and failover + +**Problem:** Current upload (`agent.ts:105-157`) takes a single `server: XFTPServer` and uploads ALL chunks to it. The 12 preset servers (6 SimpleX + 6 Flux) are pointless — only one is ever used. + +**Design goal:** Distribute chunks across servers. Retry FNEW on a different server if one fails. Once working servers are found, prefer them (heuristic: server unlikely to fail mid-process, more likely to be broken initially due to maintenance/downtime). + +**Reference implementation:** Haskell `Agent.hs:457-486` (`createChunk` / `createWithNextSrv`) + `Client.hs:2335-2385` (`getNextServer_` / `withNextSrv`). + +#### Haskell algorithm summary + +Two-stage architecture: + +1. **Allocate stage (serial per file in Haskell):** For each chunk, call FNEW on a randomly-selected server. If FNEW fails, pick a different server and retry. Track tried hosts to avoid retrying the same server. After all chunks are assigned to servers, spawn one upload worker per server. + +2. **Upload stage (parallel per server):** Each server worker uploads its assigned chunks sequentially (FPUT). On FPUT failure, retry on the same server with backoff (because the chunk replica already exists on that server). No server failover for FPUT. + +Server selection constraints (hierarchical, `getNextServer_` Client.hs:2335-2350): +1. Prefer servers from unused operators (operator diversity) +2. Prefer servers with unused hosts (host diversity) +3. Random pick from the most-constrained candidate set +4. If all exhausted, reset tried set and start over + +#### Web client adaptation + +The web client doesn't have operators or a database. Simplified algorithm with two stages: + +**Stage 1 — Allocate:** Create chunk records on servers (FNEW). Unlike Haskell which is serial here, web FNEW runs concurrently within a concurrency limit. FNEW is a small command — concurrent FNEW on the same connection is not a problem, and concurrent FNEW across servers improves upload startup time. + +**Stage 2 — Upload:** Upload chunk data (FPUT). Parallel across servers, sequential per server (reuses per-server queues from 3.5). FPUT retries on the same server with backoff — no server rotation because the chunk replica already exists on that server. Stage 2 reads chunk data by offset (via `readChunk`), so `SentChunk` must be extended with `chunkOffset: number` (from ChunkSpec). + +```typescript +interface UploadState { + untriedServers: XFTPServer[] // servers not yet attempted — initially all servers + workingServers: XFTPServer[] // servers that succeeded FNEW +} + +const MAX_FNEW_ATTEMPTS = 5 // per chunk: try up to 5 different servers + +async function uploadFile( + agent: XFTPClientAgent, + allServers: XFTPServer[], + encrypted: EncryptedFileMetadata, + options?: UploadOptions +): Promise { + const state: UploadState = {untriedServers: [...allServers], workingServers: []} + const specs = prepareChunkSpecs(encrypted.chunkSizes) + const concurrency = options?.concurrency ?? 4 + + // Stage 1: Allocate — concurrent FNEW within concurrency limit + const sentChunks: SentChunk[] = new Array(specs.length) + const queue = specs.map((spec, i) => ({spec, chunkNo: i + 1, index: i})) + let idx = 0 + async function allocateWorker() { + while (idx < queue.length) { + const item = queue[idx++] + const {server, chunk} = await createChunkWithFailover( + agent, allServers, state, concurrency, item.spec, item.chunkNo + ) + sentChunks[item.index] = chunk + } + } + const allocateWorkers = Array.from( + {length: Math.min(concurrency, queue.length)}, + () => allocateWorker() + ) + await Promise.all(allocateWorkers) + + // Stage 2: Upload — parallel across servers, sequential per server + // readChunk reads from the encrypted file by offset (same as Phase 1 uploadFile) + let uploaded = 0 + const total = encrypted.chunkSizes.reduce((a, b) => a + b, 0) + const byServer = groupBy(sentChunks, c => formatXFTPServer(c.server)) + await Promise.all([...byServer.entries()].map(async ([srvKey, chunks]) => { + for (const chunk of chunks) { + const chunkData = await readChunk(chunk.chunkOffset, chunk.chunkSize) + await uploadXFTPChunk(agent, chunk.server, chunk.senderKey, chunk.senderId, chunkData) + uploaded += chunk.chunkSize + options?.onProgress?.(uploaded, total) + } + })) + + return buildDescriptions(encrypted, sentChunks) +} +``` + +**`createChunkWithFailover`** — server selection with per-chunk retry limit: + +```typescript +async function createChunkWithFailover( + agent: XFTPClientAgent, + allServers: XFTPServer[], + state: UploadState, + concurrency: number, + spec: ChunkSpec, + chunkNo: number +): Promise<{server: XFTPServer, chunk: SentChunk}> { + const maxAttempts = Math.min(allServers.length, MAX_FNEW_ATTEMPTS) + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + const server = pickServer(allServers, state, concurrency) + try { + const chunk = await createAndPrepareChunk(agent, server, spec, chunkNo) + // Success — add to working set (if not already there) + if (!state.workingServers.some(s => formatXFTPServer(s) === formatXFTPServer(server))) { + state.workingServers.push(server) + } + return {server, chunk} + } catch (e) { + // Remove from working if it was there + state.workingServers = state.workingServers.filter( + s => formatXFTPServer(s) !== formatXFTPServer(server) + ) + if (attempt === maxAttempts - 1) throw e + } + } + throw new Error("unreachable") +} +``` + +**`pickServer`** — two-list selection: + +```typescript +function pickServer( + allServers: XFTPServer[], + state: UploadState, + concurrency: number +): XFTPServer { + // Once enough working servers found, only use those + if (state.workingServers.length >= concurrency) { + return randomPick(state.workingServers) + } + // Still exploring — pick from untried + if (state.untriedServers.length > 0) { + const idx = Math.floor(Math.random() * state.untriedServers.length) + return state.untriedServers.splice(idx, 1)[0] // remove from untried + } + // All tried — reset untried to non-working servers and retry + state.untriedServers = allServers.filter( + s => !state.workingServers.some(w => formatXFTPServer(w) === formatXFTPServer(s)) + ) + if (state.untriedServers.length > 0) { + const idx = Math.floor(Math.random() * state.untriedServers.length) + return state.untriedServers.splice(idx, 1)[0] + } + // Every server is working — pick any working + return randomPick(state.workingServers) +} +``` + +**Algorithm:** Two lists — `untriedServers` (initially all) and `workingServers` (initially empty). When `workingServers.length < concurrency`, pick from `untriedServers` (removing on pick). On FNEW success, add to `workingServers`. On FNEW failure, server is already removed from `untriedServers`; remove from `workingServers` if present. When `untriedServers` is empty, reset it to all non-working servers. Once `workingServers.length >= concurrency`, pick randomly only from `workingServers`. + +**Termination condition:** Each chunk tries at most `min(serverCount, 5)` different servers. If all attempts fail, the chunk fails and the upload fails with the last error. Rationale: if 5 out of 12 servers are down, something systemic is wrong and continuing is unlikely to help. Timeouts count as failures — the timed-out server is removed from working and a different server is picked next. + +**Key differences from Haskell:** +- No operator concept — just host diversity via random selection +- No database — state tracked in-memory during upload +- FNEW runs concurrently (Haskell is serial) — improves startup time +- FNEW is cheap and retried with server rotation; FPUT retries on same server + +**Download changes (also Phase 2):** Default concurrency should be 4 (matching Haskell). Download already groups by server in 3.5. If `replicas[0]` download fails, try `replicas[1]`, `replicas[2]`, etc. (fallback across replicas). + +## 4. Implementation Plan + +### Phase 1: Error handling and connection resilience + +Steps are ordered by dependency and should be implemented one by one. + +#### Step 1: Fix cache key (3.6) +- Change cache key to `formatXFTPServer(server)` in `getXFTPServerClient` and `closeXFTPServerClient` +- Add import for `formatXFTPServer` +- Run existing tests to verify no regression + +#### Step 2: Typed error detection for padded server errors (3.2 client-side) +- Add `XFTPRetriableError` class +- In `sendXFTPCommand`, detect padded error strings before `decodeTransmission` +- Classify `FRErr` responses as retriable or permanent with human-readable messages +- Run existing tests + +#### Step 3: Fetch timeout (3.3) +- Add `TransportConfig` with `timeoutMs` +- Thread config through `createTransport` → `connectXFTP` → command wrappers +- Add `AbortController` to browser `fetch()` and `setTimeout` to Node.js HTTP/2 +- Add vitest test: timeout triggers after configured duration +- Run existing tests + +#### Step 4: Connection state with Promise-based lock and per-server queues (3.5) +- Introduce `ServerConnection` record: `{client: Promise, queue: Promise}` +- Replace `XFTPClientAgent.clients: Map` with `connections: Map` +- Implement `reconnectClient` — replaces `conn.client` with new promise, preserves queue +- Implement `enqueueCommand` — chains operation onto server's queue +- Implement `removeStaleConnection` — removes entry only if current promise is the failed one +- Auto-cleanup: `p.catch(() => delete)` removes failed connections so next caller starts fresh +- Adapt `closeXFTPServerClient` and `closeXFTPAgent` +- Add vitest tests: + - Concurrent calls to same server produce single connection + - Failed promise is cleaned up, next caller gets fresh connection + +#### Step 5: Automatic retry in sendXFTPCommand (3.2) +- Add retry loop with reconnect +- Change `sendXFTPCommand` signature: takes `agent + server` instead of `client`; export it (needed by tests and by agent.ts callers) +- Rename current `sendXFTPCommand` → `sendXFTPCommandOnce` (private); add padded error detection + FRErr classification (throw `XFTPRetriableError` for SESSION/HANDSHAKE, `XFTPPermanentError` for AUTH/NO_FILE/etc.) +- All command wrappers (`createXFTPChunk`, `uploadXFTPChunk`, etc.) pass agent + server +- Update agent.ts call sites: remove `getXFTPServerClient` calls before command wrappers (in `uploadFile`, `uploadRedirectDescription`, `downloadFileRaw`, `resolveRedirect`, `deleteFile`) +- Max 3 retries for retriable errors, immediate throw for permanent +- On retriable error: call `reconnectClient` and retry. On retriable error exhausted: call `removeStaleConnection` to clean up. On permanent error: throw immediately without touching connection +- Add vitest tests: + - Server started with delay → first attempt fails, retry succeeds + - 3 retries exhausted → error propagates with human-readable message + - Non-retriable error (AUTH) → no retry, immediate failure + +#### Step 6: Server-side stale session handling (3.1) +- Add one guard to `Nothing` branch: `sniUsed && not webHello -> throwE SESSION` +- Remove debug `hPutStrLn stderr` lines (all 6 occurrences in dispatch) +- All other branches unchanged +- Run Haskell tests + Playwright tests + +#### Step 7: Download with per-server grouping +- Modify `downloadFileRaw` to group chunks by server, sequential within each server (`for` loop), parallel across servers (`Promise.all`) +- Add vitest test: concurrent downloads from different servers run in parallel + +#### Step 8: UI error improvements (3.4) +- Temporary errors: auto-retry loop (3 attempts), then show human-readable diagnosis + manual retry button +- Permanent errors: show human-readable error, NO retry button +- Manual retry resumes from last successful chunk (not full restart) + +#### Step 9: Remove debug logging +- Remove all `console.log('[DEBUG ...]')` and `hPutStrLn stderr "DEBUG ..."` lines +- Keep `console.error('[XFTP] ...')` error logging + +### Phase 2: Multi-server upload + +Implement after Phase 1 is complete and tested. + +#### Step 10: Multi-server upload with failover (3.7) +- Extend `SentChunk` with `chunkOffset: number` (from ChunkSpec) and `server: XFTPServer` (assigned during allocate) — Stage 2 reads data by offset and groups chunks by server +- Change `uploadFile` signature: takes `allServers: XFTPServer[]` instead of single `server` +- Implement `UploadState` with `untriedServers` and `workingServers` +- Implement `createChunkWithFailover` and `pickServer`: two-list selection (untried → working once enough found), max `min(serverCount, 5)` attempts per chunk +- Allocate stage: concurrent FNEW within concurrency limit (default 4) +- Upload stage: parallel across servers, sequential per server (reuse queue from Step 7) +- Update `web/upload.ts`: pass `getServers()` instead of `pickRandomServer(getServers())` +- Update description building: each chunk references its actual server +- Add vitest tests: + - File split across N servers (verify different servers in description) + - One server down → chunks redistributed to others + - All servers down → error after exhausting 5 attempts per chunk + +#### Step 11: Download concurrency and replica fallback +- Change default download concurrency from 1 to 4 +- If `replicas[0]` download fails, try `replicas[1]`, `replicas[2]`, etc. +- Uses per-server queues from Step 7 + +## 5. Testing Plan + +### Principle + +Prefer low-level vitest tests over Playwright E2E. Each new function gets one focused test. Pure functions tested without mocks; connection management tested with mock `connectXFTP`; server behavior tested with real server. Total: 13 tests across 4 files. + +Tests A-C run in browser context (`@vitest/browser` with Chromium headless), configured in `vitest.config.ts`. Test D (integration) requires a separate Node.js vitest config since it uses `node:http2`. Existing `globalSetup.ts` provides a real XFTP server for integration tests. + +### Test file A: `test/errors.test.ts` — pure, no server + +Tests error classification and padded error detection (Steps 2, 5). + +**T1. `isRetriable` classifies errors correctly** +```typescript +// Retriable: +expect(isRetriable(new XFTPRetriableError("SESSION"))).toBe(true) +expect(isRetriable(new XFTPRetriableError("HANDSHAKE"))).toBe(true) +expect(isRetriable(new TypeError("fetch failed"))).toBe(true) // network error +expect(isRetriable(Object.assign(new Error(), {name: "AbortError"}))).toBe(true) // timeout +// Not retriable: +expect(isRetriable(new XFTPPermanentError("AUTH", "..."))).toBe(false) +expect(isRetriable(new XFTPPermanentError("NO_FILE", "..."))).toBe(false) +expect(isRetriable(new XFTPPermanentError("INTERNAL", "..."))).toBe(false) +``` + +**T2. `categorizeError` produces human-readable messages** +```typescript +// categorizeError receives thrown errors (from sendXFTPCommandOnce or transport) +const e = categorizeError(new XFTPPermanentError("AUTH", "File is invalid, expired, or has been removed")) +expect(e.message).toContain("expired") +// Verify every permanent error type maps to a non-empty human-readable message +for (const errType of ["AUTH", "NO_FILE", "SIZE", "QUOTA", "BLOCKED", "DIGEST", "INTERNAL"]) { + expect(humanReadableMessage({type: errType}).length).toBeGreaterThan(0) +} +// Retriable errors also get human-readable messages after exhaustion +const re = categorizeError(new XFTPRetriableError("SESSION")) +expect(re.message).toContain("expired") // "Session expired, reconnecting..." +``` + +**T3. Padded error detection extracts error string from padded block** +```typescript +import {blockPad, blockUnpad} from '../src/protocol/transmission.js' +// Simulate server sending padded "SESSION" +const padded = blockPad(new TextEncoder().encode("SESSION")) +const raw = blockUnpad(padded) +expect(raw.length).toBeLessThan(20) +expect(new TextDecoder().decode(raw)).toBe("SESSION") +// Normal transmission block (batch count + large-encoded data) is NOT a short string +const sessionId = new Uint8Array(32) // dummy +const normalBlock = encodeTransmission(sessionId, new Uint8Array(0), new Uint8Array(0), encodePING()) +const normalRaw = blockUnpad(normalBlock) +expect(normalRaw.length).toBeGreaterThan(20) // not mistaken for padded error +``` + +### Test file B: `test/connection.test.ts` — mock connectXFTP, no server + +Tests connection management functions (Steps 4, 5). Uses `vi.mock` to replace `connectXFTP` with a controllable promise factory. + +**T4. `getXFTPServerClient` coalesces concurrent calls** +```typescript +// Mock connectXFTP to return a deferred promise +const {promise, resolve} = promiseWithResolvers() +vi.mocked(connectXFTP).mockReturnValueOnce(promise) +const agent = newXFTPAgent() +const p1 = getXFTPServerClient(agent, server) +const p2 = getXFTPServerClient(agent, server) +expect(p1).toBe(p2) // same promise, single connection +resolve(mockClient) +expect(await p1).toBe(mockClient) +``` + +**T5. `getXFTPServerClient` auto-cleans failed connections** +```typescript +vi.mocked(connectXFTP).mockReturnValueOnce(Promise.reject(new Error("down"))) +const agent = newXFTPAgent() +const p1 = getXFTPServerClient(agent, server) +await expect(p1).rejects.toThrow("down") +// After microtask, entry is removed +await new Promise(r => setTimeout(r, 0)) +expect(agent.connections.has(formatXFTPServer(server))).toBe(false) +// Next call creates fresh connection +vi.mocked(connectXFTP).mockReturnValueOnce(Promise.resolve(mockClient)) +const p2 = getXFTPServerClient(agent, server) +expect(p2).not.toBe(p1) +``` + +**T6. `removeStaleConnection` respects promise identity** +```typescript +const agent = newXFTPAgent() +const p1 = Promise.resolve(mockClient) +agent.connections.set(key, {client: p1, queue: Promise.resolve()}) +// Replace with reconnect +const p2 = Promise.resolve(mockClient2) +agent.connections.set(key, {client: p2, queue: Promise.resolve()}) +// removeStaleConnection with old promise does NOT remove new entry +removeStaleConnection(agent, server, p1) +expect(agent.connections.has(key)).toBe(true) +expect(agent.connections.get(key)!.client).toBe(p2) +// removeStaleConnection with current promise removes it +removeStaleConnection(agent, server, p2) +expect(agent.connections.has(key)).toBe(false) +``` + +**T7. `reconnectClient` replaces promise but preserves queue** +```typescript +const agent = newXFTPAgent() +const origQueue = Promise.resolve() +agent.connections.set(key, {client: Promise.resolve(mockClient), queue: origQueue}) +vi.mocked(connectXFTP).mockReturnValueOnce(Promise.resolve(mockClient2)) +reconnectClient(agent, server) +const conn = agent.connections.get(key)! +expect(await conn.client).toBe(mockClient2) // new client +expect(conn.queue).toBe(origQueue) // queue preserved +``` + +**T8. Retry loop: retriable error triggers reconnect, permanent error does not** + +Mock approach: `vi.mock('../src/client.js')` to mock `connectXFTP` (exported). `reconnectClient` is not exported — its behavior is controlled indirectly via `connectXFTP` mock (it calls `connectXFTP` internally). Verify retry count via `connectXFTP` call count. Note: vitest module mocking may need adjustment depending on ESM transform behavior — if intra-module calls bypass the mock, extract `connectXFTP` to a separate module or use dependency injection for testing. + +```typescript +// Script: first connectXFTP returns client whose post throws retriable, +// second connectXFTP (from reconnect) returns client whose post succeeds +vi.mocked(connectXFTP) + .mockResolvedValueOnce({ + ...mockClient, + transport: { post: async () => { throw new XFTPRetriableError("SESSION") }, close: () => {} } + }) + .mockResolvedValueOnce({ + ...mockClient, + transport: { post: async () => okResponseBlock, close: () => {} } + }) + +const agent = newXFTPAgent() +const result = await sendXFTPCommand(agent, server, dummyKey, dummyId, encodePING()) +expect(result.response.type).toBe("FROk") +expect(vi.mocked(connectXFTP)).toHaveBeenCalledTimes(2) // initial + 1 reconnect + +// Reset — all 3 retries exhausted: connectXFTP called 3 times (initial + 2 reconnects) +vi.mocked(connectXFTP).mockClear() +vi.mocked(connectXFTP).mockResolvedValue({ + ...mockClient, + transport: { post: async () => { throw new XFTPRetriableError("SESSION") }, close: () => {} } +}) +const agent2 = newXFTPAgent() +await expect(sendXFTPCommand(agent2, server, dummyKey, dummyId, encodePING())) + .rejects.toThrow(/reconnecting|expired/) +expect(vi.mocked(connectXFTP)).toHaveBeenCalledTimes(3) // initial + 2 reconnects + +// Reset — permanent error: connectXFTP called once (initial only, no reconnect) +vi.mocked(connectXFTP).mockClear() +vi.mocked(connectXFTP).mockResolvedValue({ + ...mockClient, + transport: { post: async () => authErrorBlock, close: () => {} } +}) +const agent3 = newXFTPAgent() +await expect(sendXFTPCommand(agent3, server, dummyKey, dummyId, encodePING())) + .rejects.toThrow(/expired/) +expect(vi.mocked(connectXFTP)).toHaveBeenCalledTimes(1) // initial only, no reconnect +``` + +### Test file C: `test/server-selection.test.ts` — pure, no server + +Tests `pickServer` state machine (Step 10). Determinism: seed `Math.random` or test invariants not specific picks. + +**T9. `pickServer` picks from untried when working < concurrency** +```typescript +const servers = [s1, s2, s3, s4, s5] +const state: UploadState = {untriedServers: [...servers], workingServers: []} +const picked = pickServer(servers, state, 4) +// picked is from untried, and was removed from untried +expect(state.untriedServers.length).toBe(4) +expect(state.untriedServers).not.toContainEqual(picked) +``` + +**T10. `pickServer` picks only from working when working >= concurrency** +```typescript +const state: UploadState = { + untriedServers: [s5], // still has untried + workingServers: [s1, s2, s3, s4] +} +const picked = pickServer(servers, state, 4) +// Must pick from working, NOT from untried +expect([s1, s2, s3, s4]).toContainEqual(picked) +expect(state.untriedServers.length).toBe(1) // untried unchanged +``` + +**T11. `pickServer` resets untried when exhausted** +```typescript +const state: UploadState = { + untriedServers: [], // all tried + workingServers: [s1, s2] // only 2 working, concurrency=4 +} +const picked = pickServer(servers, state, 4) +// Should have reset untried to non-working servers and picked from them +expect([s3, s4, s5]).toContainEqual(picked) +expect(state.untriedServers.length).toBe(2) // 3 non-working minus 1 picked +``` + +### Test file D: `test/integration.test.ts` — real server, Node.js mode + +Requires separate vitest config with `browser: {enabled: false}` since these tests use `node:http2` directly. Alternatively, add `test/vitest.node.config.ts` that includes only `test/integration.test.ts` and runs in Node.js. + +**T12. Stale session returns padded SESSION error (requires Step 6)** +```typescript +import http2 from 'node:http2' +// Connect and handshake normally via the client +const client = await connectXFTP(server) +// Create a raw HTTP/2 session (new TLS SessionId, no handshake state on server) +const session = http2.connect(client.baseUrl, {rejectUnauthorized: false}) +// Build a dummy command block using the old client's sessionId. +// Content doesn't matter — server detects stale session before parsing command. +const dummyKey = new Uint8Array(64) // Ed25519 private key (dummy) +const dummyId = new Uint8Array(24) // entity ID (dummy) +const cmdBlock = encodeAuthTransmission(client.sessionId, new Uint8Array(0), dummyId, encodePING(), dummyKey) +const resp = await new Promise((resolve, reject) => { + const req = session.request({":method": "POST", ":path": "/"}) + const chunks: Buffer[] = [] + req.on("data", (c: Buffer) => chunks.push(c)) + req.on("end", () => resolve(new Uint8Array(Buffer.concat(chunks)))) + req.on("error", reject) + req.end(Buffer.from(cmdBlock)) +}) +// Server should return padded "SESSION" (not crash, not "HANDSHAKE") +const raw = blockUnpad(resp.subarray(0, XFTP_BLOCK_SIZE)) +expect(new TextDecoder().decode(raw)).toBe("SESSION") +session.close() +closeXFTP(client) +``` + +**T13. Fetch timeout fires within configured duration** +```typescript +// connectXFTP with 1ms timeout — handshake requires multiple round trips, +// so even on localhost it will exceed 1ms and trigger abort +await expect( + connectXFTP(server, {timeoutMs: 1}) +).rejects.toThrow(/abort|timeout/i) +``` + +### What existing tests already cover (no new tests needed) + +| Behavior | Covered by | +|----------|-----------| +| Cache key fix (Step 1) | Existing round-trip test — uses `formatXFTPServer` after refactor | +| Basic upload/download | 24 Playwright tests + 1 vitest browser test | +| File size limits, unicode filenames | Playwright edge case tests | +| Server startup/teardown | `globalSetup.ts` / `globalTeardown.ts` | +| Handshake + identity verification | `connectXFTP` in existing round-trip test | + +### Test ordering + +Tests must be added alongside their implementation step: +- **Step 2**: Add T1, T2, T3 (test/errors.test.ts) +- **Step 3**: Add T13 (test/integration.test.ts) — requires Node.js vitest config +- **Step 4**: Add T4, T5, T6, T7 (test/connection.test.ts) +- **Step 5**: Add T8 (test/connection.test.ts) +- **Step 6**: Add T12 (test/integration.test.ts) — requires server change + Node.js vitest config +- **Step 10**: Add T9, T10, T11 (test/server-selection.test.ts) + +## 6. Context for Implementation Sessions + +### Files to re-read on session start + +**TypeScript (xftp-web/src/):** +- `client.ts` — `XFTPClient`, `XFTPClientAgent`, `getXFTPServerClient`, `closeXFTPServerClient`, `connectXFTP`, `sendXFTPCommand`, `createBrowserTransport`, `createNodeTransport`, all command wrappers +- `agent.ts` — `uploadFile`, `downloadFileRaw`, `downloadFile`, `resolveRedirect`, `encryptFileForUpload` +- `protocol/transmission.ts` — `encodeAuthTransmission`, `decodeTransmission`, `blockPad`, `blockUnpad` +- `protocol/commands.ts` — `XFTPErrorType`, `FileResponse`, `decodeResponse`, `decodeXFTPError` +- `protocol/handshake.ts` — `decodeServerHandshake` (padded error detection heuristic) +- `protocol/address.ts` — `XFTPServer`, `parseXFTPServer`, `formatXFTPServer` +- `web/upload.ts` — UI error handling, retry button +- `web/download.ts` — UI error handling, retry button +- `web/servers.ts` — `getServers`, `pickRandomServer` + +**TypeScript (xftp-web/test/):** +- `browser.test.ts` — vitest Node.js test template (uses real Haskell server) +- `globalSetup.ts` — server startup, config generation, port file +- `page.spec.ts` — Playwright page tests + +**Haskell (reference for multi-server):** +- `src/Simplex/FileTransfer/Agent.hs` — `createChunk` (lines 457-486, allocate stage), `runXFTPSndPrepareWorker` (lines 391-430, serial allocate in Haskell), `runXFTPSndWorker` (lines 494-548, per-server upload worker) +- `src/Simplex/Messaging/Agent/Client.hs` — `getNextServer_` (lines 2335-2350), `withNextSrv` (lines 2366-2385), `pickServer` (lines 2309-2314) + +**Haskell (server):** +- `src/Simplex/FileTransfer/Server.hs` — `xftpServerHandshakeV1` (lines 165-244), `processRequest` (lines 403-435) +- `src/Simplex/Messaging/Protocol.hs` — `tDecodeServer` (lines 2239-2265) — sessionId verification at line 2242 + +### Key design constraints + +1. `tDecodeServer` (Protocol.hs:2242) verifies `sessId == sessionId` — commands signed with old sessionId WILL fail on new connection +2. Server generates per-session DH key in `processHello` (Server.hs:207) — cannot be shared across sessions +3. `fetch()` provides zero control over HTTP/2 connection reuse — browser decides +4. `xftp-web-hello` header is only checked in dispatch (Server.hs:192), NOT inside `processHello` +5. Handshake-phase errors are raw padded strings; command-phase errors are proper ERR transmissions +6. Ed25519 signature verification (`TASignature` path, Protocol.hs:1314) does NOT use `thAuth` — but SMP will +7. Reconnect must re-handshake to get new sessionId AND new server DH key +8. The new `throwE SESSION` guard (Step 6) sends a raw padded "SESSION" string — no sessionId framing. Client detects this via padded error heuristic (section 3.2), not via sessionId mismatch +9. FNEW is cheap (creates chunk record on server) — retry with different server on failure +10. FPUT retries on same server (chunk replica already exists there) — close connection + backoff + +## 7. Plan Maintenance + +This plan must be updated as implementation proceeds: +- Mark completed steps with date +- Record any deviations from the plan with rationale +- Add new issues discovered during implementation +- Update file references if code moves diff --git a/rfcs/2026-01-30-send-file-page/2026-02-12-xftp-cli-web-link-compat.md b/rfcs/2026-01-30-send-file-page/2026-02-12-xftp-cli-web-link-compat.md new file mode 100644 index 0000000000..14b7187e90 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-12-xftp-cli-web-link-compat.md @@ -0,0 +1,327 @@ +# CLI-Web Link Compatibility + +## Problem + +CLI and web clients are isolated: CLI outputs `.xftp` description files, web outputs +`https://host/#` links. A file uploaded via one cannot be downloaded via the other. + +## Solution Summary + +Make CLI produce and consume web-compatible links so that: +- CLI `send` always outputs a web link (in addition to `.xftp` files) +- CLI `recv` accepts a web link URL as input (alternative to `.xftp` file path) +- Browser can download files uploaded by CLI and vice versa + +The web page host is derived from the XFTP server address - the server that hosts the file +also hosts the download page. Making XFTP servers actually serve the web page is a separate +concern (not covered here), but the link format anticipates it. + +The YAML file description format is already identical between CLI and web. +The only gap is the URI encoding layer: DEFLATE-raw compression + base64url + URL structure. + +## Current State + +### Web link format + +``` +https:///# +``` + +Encoding chain (agent.ts:64-68): +1. `encodeFileDescription(fd)` -> YAML string +2. `TextEncoder.encode(yaml)` -> bytes +3. `pako.deflateRaw(bytes)` -> compressed +4. `base64urlEncode(compressed)` -> URI fragment (no `#`) + +For multi-chunk files exceeding ~400 chars in URI, a redirect description is uploaded: +the real file description is encrypted, uploaded as a separate XFTP file, and a smaller +"redirect" description (pointing to it) is put in the URI. + +### CLI file format + +``` +xftp send FILE -> writes rcv1.xftp (raw YAML), snd.xftp.private +xftp recv FILE.xftp -> reads raw YAML from file +``` + +No URI support. No compression. No redirect descriptions. + +### Existing Haskell `FileDescriptionURI` + +`Description.hs:243-266` defines a `simplex:/file#/?desc=` format. +This is the SimpleX Chat app format - NOT the web page format. It uses URL-encoded raw YAML +(no DEFLATE compression), and has a different URL structure. + +## Detailed Tech Design + +### 1. File Header (Filename) Compatibility + +The filename is carried **inside the encrypted file data**, not in the file description YAML. +Both CLI and web use the same `FileHeader` structure and binary encoding - full interop. + +#### FileHeader type + +Haskell (`Types.hs:36-46`): +```haskell +data FileHeader = FileHeader { fileName :: Text, fileExtra :: Maybe Text } +instance Encoding FileHeader where + smpEncode FileHeader {fileName, fileExtra} = smpEncode (fileName, fileExtra) +``` + +TypeScript (`crypto/file.ts:11-24`): +```typescript +interface FileHeader { fileName: string; fileExtra: string | null } +function encodeFileHeader(hdr: FileHeader): Uint8Array { + return concatBytes(encodeString(hdr.fileName), encodeMaybe(encodeString, hdr.fileExtra)) +} +``` + +Both produce identical binary: `[1-byte UTF-8 length][fileName bytes]['0']` (for null fileExtra). +Max filename: 255 UTF-8 bytes (1-byte length prefix). + +#### Encrypted file structure + +Both CLI and web produce the same encrypted stream: +``` +XSalsa20-Poly1305 encrypted: + [8-byte Int64 fileSize] [FileHeader] [file content] ['#' padding] + + [16-byte auth tag] + +Where fileSize = len(FileHeader) + len(file content) +``` + +The 8-byte length prefix and padding are handled identically: +- Haskell: `Crypto.hs:43-56` (`encryptFile`) / `Crypto.hs:81-87` (`decryptFirstChunk`) +- TypeScript: `crypto/file.ts:51-70` (`encryptFile`) / `crypto/file.ts:81-94` (`decryptChunks`) + +On decryption, `unPadLazy`/`splitLen` strips the 8-byte length prefix, then `parseFileHeader` +extracts the filename from the remaining decrypted bytes (up to 1024 bytes examined, both sides). + +#### CLI upload: sets real filename (ok) + +`Client/Main.hs:246-247,273`: +```haskell +let (_, fileNameStr) = splitFileName filePath + fileName = T.pack fileNameStr +... + fileHdr = smpEncode FileHeader {fileName, fileExtra = Nothing} +``` + +Extracts the actual filename from the path and embeds it in the encrypted header. + +#### CLI download: uses filename from header (ok) + +`Crypto.hs:62-66` (single chunk) / `Crypto.hs:72-74` (multi-chunk): +```haskell +(FileHeader {fileName}, rest) <- parseFileHeader decryptedContent +destFile <- withExceptT FTCEFileIOError $ getDestFile fileName +``` + +`Client/Main.hs:435-441` (`getFilePath`): +- If output dir specified: saves to `/` +- If no dir: saves to `~/Downloads/` + +The filename from the decrypted header determines the output file name. + +#### Web upload: sets real filename (ok) + +`upload.ts:121` -> `agent.ts:86`: +```typescript +const fileHdr = encodeFileHeader({fileName, fileExtra: null}) +``` + +Where `fileName` comes from `file.name` (browser File API). + +#### Web download: uses filename from header (ok) + +`download.ts:97,102`: +```typescript +const fileName = sanitizeFileName(header.fileName) +a.download = encodeURIComponent(fileName) +``` + +The web client additionally sanitizes the filename (strips path separators, control chars, +bidi overrides, limits to 255 chars). + +#### Web redirect description: empty filename (correct) + +`agent.ts:193`: `encryptFileForUpload(yamlBytes, "")` - redirect descriptions use empty filename +because they are internal artifacts, not user files. This is handled correctly on both sides: +the redirect content is decrypted and parsed as YAML, not saved as a file. + +#### Cross-client interop: fully compatible (ok) + +| Scenario | Filename flow | Status | +|----------|--------------|--------| +| CLI upload -> CLI download | `splitFileName` -> header -> `getDestFile` | Works | +| Web upload -> Web download | `File.name` -> header -> `sanitizeFileName` | Works | +| CLI upload -> Web download | `splitFileName` -> header -> `sanitizeFileName` | **Compatible** | +| Web upload -> CLI download | `File.name` -> header -> `getDestFile` | **Compatible** | + +The binary encoding is identical (smpEncode). No changes needed for filename interop. +The CLI should consider adding filename sanitization similar to the web client for safety. + +### 2. Web Link Host Derivation + +The web page URL domain comes from the XFTP server address, not from a CLI flag: + +- **Non-redirected description**: use the server host of the first chunk's first replica. + E.g., `xftp://abc=@xftp1.simplex.im` -> `https://xftp1.simplex.im/#` + +- **Redirected description**: use the server host of the redirect chunk (the outer description's + chunk that stores the encrypted inner description). + +The server address format is `xftp://@[,,...][:]`. +The web link uses `https://` (port 443 implied). + +This means the CLI does not need a `--web-url` flag - the server address fully determines +the link. The XFTP server serving the web page is a separate deployment concern. + +### 3. Web URI Encoding/Decoding in Haskell + +Add two functions (new module or in `Description.hs`): + +```haskell +-- Encode file description as web URI fragment (no leading #) +encodeWebURI :: FileDescription 'FRecipient -> ByteString +-- 1. Y.encode . encodeFileDescription -> YAML bytes +-- 2. deflateRaw (raw DEFLATE, no zlib/gzip header) via zlib package +-- 3. base64url encode (with padding, matching Data.ByteString.Base64.URL) + +-- Decode web URI fragment (no leading #) to file description +decodeWebURI :: ByteString -> Either String (ValidFileDescription 'FRecipient) +-- 1. base64url decode +-- 2. inflateRaw (raw DEFLATE decompress) +-- 3. Y.decodeEither' -> YAMLFileDescription -> FileDescription +-- 4. validateFileDescription + +-- Build full web link from file description +-- Extracts server host from first chunk replica (or redirect chunk) +fileWebLink :: FileDescription 'FRecipient -> (String, ByteString) +-- Returns (webHost, uriFragment) +-- Caller assembles: "https://" <> webHost <> "/#" <> uriFragment +``` + +**Dependency**: Add `zlib` to `simplexmq.cabal` (for raw DEFLATE). +The codebase already has `zstd` for message compression - `zlib` is standard and small. + +The `zlib` Haskell package provides `Codec.Compression.Zlib.Raw` for raw DEFLATE +(no header/trailer), matching `pako.deflateRaw()` / `pako.inflateRaw()`. + +### 4. Redirect Description Support + +The CLI currently does NOT create redirect descriptions. For single-server single-recipient +uploads, most file descriptions fit in a reasonable URI even for multi-chunk files. But for +large files (many chunks x long server hostnames), the URI can exceed practical limits. + +**Approach**: Match the web client threshold. +- After encoding the URI, if `length > 400` and chunks > 1, upload a redirect description. +- The redirect upload uses the same XFTP upload flow: encrypt YAML -> upload as file -> create + outer description pointing to it. +- This matches `agent.ts:152-155` exactly. +- The redirect chunk's server becomes the web link host. + +For CLI download from a redirect URI, the existing `cliReceiveFile` needs extension: +- After decoding the file description, check `redirect` field. +- If present: download and decrypt the redirect chunks first to get the inner description, + then download the actual file using the inner description. +- The web client already does this (`resolveRedirect` in agent.ts:320-346). + +### 5. CLI Command Changes + +#### `xftp send` - always output web link + +``` +xftp send FILE [DIR] [-n COUNT] [-s SERVERS] +``` + +- Upload file as usual +- Generate web link: `https:///#` +- If URI exceeds threshold, upload redirect description first +- Print web link to stdout (in addition to `.xftp` file paths) +- Only generates link for the first recipient (web links are single-recipient) + +**Output change**: +``` +Sender file description: ./file.xftp/snd.xftp.private +Pass file descriptions to the recipient(s): +./file.xftp/rcv1.xftp + +Web link: +https://xftp1.simplex.im/#eJy0VduO2zYQ... +``` + +#### `xftp recv` - accept URL as input + +``` +xftp recv [DIR] +``` + +- If input starts with `http://` or `https://`, extract hash fragment after `#` +- Decode: base64url -> inflateRaw -> YAML -> FileDescription +- Resolve redirect if present +- Download and decrypt as usual + +The URL must be quoted on the command line (`"https://...#..."`) because `#` is a shell +comment character when unquoted. + +Implementation: modify `receiveP` parser to accept URL, add `decodeWebURI` path in +`cliReceiveFile` alongside existing `getFileDescription'`. + +### 6. YAML Format Compatibility + +Already identical. The web `description.ts` explicitly matches Haskell `Data.Yaml` output: +- Same field names (alphabetical key order) +- Same base64url encoding for binary fields (with `=` padding) +- Same server replica colon-delimited format: `chunkNo:replicaId:replicaKey[:digest][:chunkSize]` +- Same size encoding (`kb`/`mb`/`gb` suffixes) +- Same redirect structure + +**Verification**: The Playwright test suite already tests upload->download round-trips. +Adding a cross-client test (CLI upload -> web download, or web upload -> CLI download) would +validate interop end-to-end. + +### 7. Server Compatibility + +No server changes needed. Both clients use the same XFTP protocol (FGET, FPUT, FNEW, FACK, FDEL). +The web client adds `xftp-web-hello: 1` header for the hello handshake, but the actual file +operations are identical wire-format. + +The only consideration: CLI uses native HTTP/2 (via `http2` Haskell package), web uses +browser `fetch()` API over HTTP/2. Both produce identical XFTP protocol frames. + +**Note**: Making XFTP servers actually serve the web download page at `https:///` is a +separate deployment/infrastructure task. This plan only establishes the link format convention +so that links are ready to work once servers serve the page. + +## Implementation Plan + +### Phase 1: Web URI codec in Haskell + +1. Add `zlib` dependency to `simplexmq.cabal` +2. Add `encodeWebURI` / `decodeWebURI` / `fileWebLink` to `Simplex.FileTransfer.Description` + (or a new `Simplex.FileTransfer.Description.WebURI` module) +3. `fileWebLink` extracts host from first chunk's first replica server address +4. Add unit tests: encode a known FileDescription, verify output matches web client encoding +5. Add round-trip test: encode -> decode -> compare + +### Phase 2: CLI `recv` accepts URL + +1. Modify `ReceiveOptions` to accept `Either FilePath WebURL` for `fileDescription` +2. In `cliReceiveFile`: if URL, extract fragment after `#`, call `decodeWebURI` +3. Add redirect resolution: if `redirect /= Nothing`, download redirect chunks, + decrypt, parse inner description, then proceed with download +4. Test: upload via web page -> copy link -> `xftp recv ` + +### Phase 3: CLI `send` outputs web link + +1. After upload, call `fileWebLink` to get (host, fragment) +2. If fragment exceeds threshold, upload redirect description first, rebuild link +3. Print `https:///#` to stdout +4. Test: `xftp send FILE` -> open link in browser -> download + +### Phase 4: Cross-client integration test + +1. Add test: CLI send -> extract link from stdout -> Playwright browser download -> verify +2. Add test: Playwright browser upload -> extract link -> CLI recv -> verify +3. These can be shell-script or Haskell test-suite tests that spawn both clients diff --git a/simplexmq.cabal b/simplexmq.cabal index 2ff5de8002..69d6ffefc0 100644 --- a/simplexmq.cabal +++ b/simplexmq.cabal @@ -347,6 +347,7 @@ library , process ==1.6.* , temporary ==1.3.* , websockets ==0.12.* + , zlib >=0.6 && <0.8 if flag(client_postgres) || flag(server_postgres) build-depends: postgresql-libpq >=0.10.0.0 @@ -497,6 +498,7 @@ test-suite simplexmq-test XFTPCLI XFTPClient XFTPServerTests + XFTPWebTests Static Static.Embedded Paths_simplexmq @@ -526,6 +528,7 @@ test-suite simplexmq-test , async , base64-bytestring , bytestring + , case-insensitive ==1.2.* , containers , crypton , crypton-x509 diff --git a/src/Simplex/FileTransfer/Agent.hs b/src/Simplex/FileTransfer/Agent.hs index ff271240b9..a8b220327c 100644 --- a/src/Simplex/FileTransfer/Agent.hs +++ b/src/Simplex/FileTransfer/Agent.hs @@ -47,7 +47,7 @@ import Data.Map.Strict (Map) import qualified Data.Map.Strict as M import Data.Maybe (fromMaybe, mapMaybe) import qualified Data.Set as S -import Data.Text (Text) +import Data.Text (Text, pack) import Data.Time.Clock (getCurrentTime) import Data.Time.Format (defaultTimeLocale, formatTime) import Simplex.FileTransfer.Chunks (toKB) @@ -433,7 +433,7 @@ runXFTPSndPrepareWorker c Worker {doWork} = do encryptFileForUpload :: SndFile -> FilePath -> AM (FileDigest, [(XFTPChunkSpec, FileDigest)]) encryptFileForUpload SndFile {key, nonce, srcFile, redirect} fsEncPath = do let CryptoFile {filePath} = srcFile - fileName = takeFileName filePath + fileName = pack $ takeFileName filePath fileSize <- liftIO $ fromInteger <$> CF.getFileContentsSize srcFile when (fileSize > maxFileSizeHard) $ throwE $ FILE FT.SIZE let fileHdr = smpEncode FileHeader {fileName, fileExtra = Nothing} diff --git a/src/Simplex/FileTransfer/Client.hs b/src/Simplex/FileTransfer/Client.hs index 62f06b7d31..4c35780d39 100644 --- a/src/Simplex/FileTransfer/Client.hs +++ b/src/Simplex/FileTransfer/Client.hs @@ -40,11 +40,11 @@ import Simplex.Messaging.Client NetworkRequestMode (..), ProtocolClientError (..), TransportSession, - netTimeoutInt, chooseTransportHost, + clientSocksCredentials, defaultNetworkConfig, + netTimeoutInt, transportClientConfig, - clientSocksCredentials, unexpectedResponse, useWebPort, ) @@ -54,13 +54,13 @@ import Simplex.Messaging.Encoding (smpDecode, smpEncode) import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol ( BasicAuth, + NetworkError (..), Protocol (..), ProtocolServer (..), RecipientId, SenderId, - pattern NoEntity, - NetworkError (..), toNetworkError, + pattern NoEntity, ) import Simplex.Messaging.Transport (ALPN, CertChainPubKey (..), HandshakeError (..), THandleAuth (..), THandleParams (..), TransportError (..), TransportPeer (..), defaultSupportedParams) import Simplex.Messaging.Transport.Client (TransportClientConfig (..), TransportHost) @@ -126,8 +126,9 @@ getXFTPClient transportSession@(_, srv, _) config@XFTPClientConfig {clientALPN, thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} logDebug $ "Client negotiated handshake protocol: " <> tshow sessionALPN thParams@THandleParams {thVersion} <- case sessionALPN of - Just alpn | alpn == xftpALPNv1 || alpn == httpALPN11 -> - xftpClientHandshakeV1 serverVRange keyHash http2Client thParams0 + Just alpn + | alpn == xftpALPNv1 || alpn == httpALPN11 -> + xftpClientHandshakeV1 serverVRange keyHash http2Client thParams0 _ -> pure thParams0 logDebug $ "Client negotiated protocol: " <> tshow thVersion let c = XFTPClient {http2Client, thParams, transportSession, config} @@ -212,7 +213,7 @@ sendXFTPTransmission XFTPClient {config, thParams, http2Client} t chunkSpec_ = d HTTP2Response {respBody = body@HTTP2Body {bodyHead}} <- withExceptT xftpClientError . ExceptT $ sendRequest http2Client req (Just reqTimeout) when (B.length bodyHead /= xftpBlockSize) $ throwE $ PCEResponseError BLOCK -- TODO validate that the file ID is the same as in the request? - (_, _fId, respOrErr) <-liftEither $ first PCEResponseError $ xftpDecodeTClient thParams bodyHead + (_, _fId, respOrErr) <- liftEither $ first PCEResponseError $ xftpDecodeTClient thParams bodyHead case respOrErr of Right r -> case protocolError r of Just e -> throwE $ PCEProtocolError e diff --git a/src/Simplex/FileTransfer/Client/Main.hs b/src/Simplex/FileTransfer/Client/Main.hs index c73cac6376..b5fa112cda 100644 --- a/src/Simplex/FileTransfer/Client/Main.hs +++ b/src/Simplex/FileTransfer/Client/Main.hs @@ -16,6 +16,9 @@ module Simplex.FileTransfer.Client.Main xftpClientCLI, cliSendFile, cliSendFileOpts, + encodeWebURI, + decodeWebURI, + fileWebLink, singleChunkSize, prepareChunkSizes, prepareChunkSpecs, @@ -23,6 +26,7 @@ module Simplex.FileTransfer.Client.Main ) where +import qualified Codec.Compression.Zlib.Raw as Z import Control.Logger.Simple import Control.Monad import Control.Monad.Except @@ -30,17 +34,19 @@ import Control.Monad.Trans.Except import Crypto.Random (ChaChaDRG) import qualified Data.Attoparsec.ByteString.Char8 as A import Data.Bifunctor (first) +import qualified Data.ByteString.Base64.URL as U import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Lazy.Char8 as LB import Data.Char (toLower) import Data.Either (partitionEithers) import Data.Int (Int64) -import Data.List (foldl', sortOn) +import Data.List (foldl', isPrefixOf, sortOn) import Data.List.NonEmpty (NonEmpty (..), nonEmpty) import qualified Data.List.NonEmpty as L import Data.Map.Strict (Map) import qualified Data.Map.Strict as M import Data.Maybe (fromMaybe) +import Data.Text (Text) import qualified Data.Text as T import Data.Word (Word32) import GHC.Records (HasField (getField)) @@ -62,7 +68,7 @@ import qualified Simplex.Messaging.Crypto.Lazy as LC import Simplex.Messaging.Encoding import Simplex.Messaging.Encoding.String (StrEncoding (..)) import Simplex.Messaging.Parsers (parseAll) -import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), SenderId, SndPrivateAuthKey, XFTPServer, XFTPServerWithAuth) +import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), ProtocolServer (..), SenderId, SndPrivateAuthKey, XFTPServer, XFTPServerWithAuth) import Simplex.Messaging.Server.CLI (getCliCommand') import Simplex.Messaging.Util (groupAllOn, ifM, tshow, whenM) import System.Exit (exitFailure) @@ -242,7 +248,8 @@ cliSendFile opts = cliSendFileOpts opts True $ printProgress "Uploaded" cliSendFileOpts :: SendOptions -> Bool -> (Int64 -> Int64 -> IO ()) -> ExceptT CLIError IO () cliSendFileOpts SendOptions {filePath, outputDir, numRecipients, xftpServers, retryCount, tempPath, verbose} printInfo notifyProgress = do - let (_, fileName) = splitFileName filePath + let (_, fileNameStr) = splitFileName filePath + fileName = T.pack fileNameStr liftIO $ when printInfo $ printNoNewLine "Encrypting file..." g <- liftIO C.newRandom (encPath, fdRcv, fdSnd, chunkSpecs, encSize) <- encryptFileForUpload g fileName @@ -254,14 +261,18 @@ cliSendFileOpts SendOptions {filePath, outputDir, numRecipients, xftpServers, re liftIO $ do let fdRcvs = createRcvFileDescriptions fdRcv sentChunks fdSnd' = createSndFileDescription fdSnd sentChunks - (fdRcvPaths, fdSndPath) <- writeFileDescriptions fileName fdRcvs fdSnd' + (fdRcvPaths, fdSndPath) <- writeFileDescriptions fileNameStr fdRcvs fdSnd' when printInfo $ do printNoNewLine "File uploaded!" putStrLn $ "\nSender file description: " <> fdSndPath putStrLn "Pass file descriptions to the recipient(s):" forM_ fdRcvPaths putStrLn + when printInfo $ case fdRcvs of + rcvFd : _ -> forM_ (fileWebLink rcvFd) $ \(host, fragment) -> + putStrLn $ "\nWeb link:\nhttps://" <> B.unpack host <> "/#" <> B.unpack fragment + _ -> pure () where - encryptFileForUpload :: TVar ChaChaDRG -> String -> ExceptT CLIError IO (FilePath, FileDescription 'FRecipient, FileDescription 'FSender, [XFTPChunkSpec], Int64) + encryptFileForUpload :: TVar ChaChaDRG -> Text -> ExceptT CLIError IO (FilePath, FileDescription 'FRecipient, FileDescription 'FSender, [XFTPChunkSpec], Int64) encryptFileForUpload g fileName = do fileSize <- fromInteger <$> getFileSize filePath when (fileSize > maxFileSize) $ throwE $ CLIError $ "Files bigger than " <> maxFileSizeStr <> " are not supported" @@ -387,10 +398,16 @@ cliSendFileOpts SendOptions {filePath, outputDir, numRecipients, xftpServers, re cliReceiveFile :: ReceiveOptions -> ExceptT CLIError IO () cliReceiveFile ReceiveOptions {fileDescription, filePath, retryCount, tempPath, verbose, yes} = - getFileDescription' fileDescription >>= receive + getInputFileDescription >>= receive 1 where - receive :: ValidFileDescription 'FRecipient -> ExceptT CLIError IO () - receive (ValidFileDescription FileDescription {size, digest, key, nonce, chunks}) = do + getInputFileDescription + | "http://" `isPrefixOf` fileDescription || "https://" `isPrefixOf` fileDescription = do + let fragment = B.pack $ drop 1 $ dropWhile (/= '#') fileDescription + when (B.null fragment) $ throwE $ CLIError "Invalid URL: no fragment" + either (throwE . CLIError . ("Invalid web link: " <>)) pure $ decodeWebURI fragment + | otherwise = getFileDescription' fileDescription + receive :: Int -> ValidFileDescription 'FRecipient -> ExceptT CLIError IO () + receive depth (ValidFileDescription FileDescription {size, digest, key, nonce, chunks, redirect}) = do encPath <- getEncPath tempPath "xftp" createDirectory encPath a <- liftIO $ newXFTPAgent defaultXFTPClientAgentConfig @@ -408,13 +425,26 @@ cliReceiveFile ReceiveOptions {fileDescription, filePath, retryCount, tempPath, when (encDigest /= unFileDigest digest) $ throwE $ CLIError "File digest mismatch" encSize <- liftIO $ foldM (\s path -> (s +) . fromIntegral <$> getFileSize path) 0 chunkPaths when (FileSize encSize /= size) $ throwE $ CLIError "File size mismatch" - liftIO $ printNoNewLine "Decrypting file..." - CryptoFile path _ <- withExceptT cliCryptoError $ decryptChunks encSize chunkPaths key nonce $ fmap CF.plain . getFilePath - forM_ chunks $ acknowledgeFileChunk a - whenM (doesPathExist encPath) $ removeDirectoryRecursive encPath - liftIO $ do - printNoNewLine $ "File downloaded: " <> path - removeFD yes fileDescription + case redirect of + Just _ + | depth > 0 -> do + CryptoFile tmpFile _ <- withExceptT cliCryptoError $ decryptChunks encSize chunkPaths key nonce $ \_ -> + fmap CF.plain $ uniqueCombine encPath "redirect.yaml" + forM_ chunks $ acknowledgeFileChunk a + yaml <- liftIO $ B.readFile tmpFile + whenM (doesPathExist encPath) $ removeDirectoryRecursive encPath + innerVfd <- either (throwE . CLIError . ("Redirect: invalid file description: " <>)) pure $ strDecode yaml + receive 0 innerVfd + | otherwise -> throwE $ CLIError "Redirect chain too long" + Nothing -> do + liftIO $ printNoNewLine "Decrypting file..." + CryptoFile path _ <- withExceptT cliCryptoError $ decryptChunks encSize chunkPaths key nonce $ fmap CF.plain . getFilePath + forM_ chunks $ acknowledgeFileChunk a + whenM (doesPathExist encPath) $ removeDirectoryRecursive encPath + liftIO $ do + printNoNewLine $ "File downloaded: " <> path + unless ("http://" `isPrefixOf` fileDescription || "https://" `isPrefixOf` fileDescription) $ + removeFD yes fileDescription downloadFileChunk :: TVar ChaChaDRG -> XFTPClientAgent -> FilePath -> FileSize Int64 -> TVar [Int64] -> FileChunk -> ExceptT CLIError IO (Int, FilePath) downloadFileChunk g a encPath (FileSize encSize) downloadedChunks FileChunk {chunkNo, chunkSize, digest, replicas = replica : _} = do let FileChunkReplica {server, replicaId, replicaKey} = replica @@ -430,13 +460,14 @@ cliReceiveFile ReceiveOptions {fileDescription, filePath, retryCount, tempPath, when verbose $ putStrLn "" pure (chunkNo, chunkPath) downloadFileChunk _ _ _ _ _ _ = throwE $ CLIError "chunk has no replicas" - getFilePath :: String -> ExceptT String IO FilePath - getFilePath name = - case filePath of - Just path -> - ifM (doesDirectoryExist path) (uniqueCombine path name) $ - ifM (doesFileExist path) (throwE "File already exists") (pure path) - _ -> (`uniqueCombine` name) . ( "Downloads") =<< getHomeDirectory + getFilePath :: Text -> ExceptT String IO FilePath + getFilePath name = case filePath of + Just path -> + ifM (doesDirectoryExist path) (uniqueCombine path name') $ + ifM (doesFileExist path) (throwE "File already exists") (pure path) + _ -> (`uniqueCombine` name') . ( "Downloads") =<< getHomeDirectory + where + name' = T.unpack name acknowledgeFileChunk :: XFTPClientAgent -> FileChunk -> ExceptT CLIError IO () acknowledgeFileChunk a FileChunk {replicas = replica : _} = do let FileChunkReplica {server, replicaId, replicaKey} = replica @@ -552,3 +583,24 @@ cliRandomFile RandomFileOptions {filePath, fileSize = FileSize size} = do B.hPut h bytes when (sz > mb') $ saveRandomFile h (sz - mb') mb' = mb 1 + +-- | Encode file description as web-compatible URI fragment. +-- Result is base64url(deflateRaw(YAML)), no leading '#'. +encodeWebURI :: FileDescription 'FRecipient -> B.ByteString +encodeWebURI fd = U.encode $ LB.toStrict $ Z.compress $ LB.fromStrict $ strEncode fd + +-- | Decode web URI fragment to validated file description. +-- Input is base64url-encoded DEFLATE-compressed YAML, no leading '#'. +decodeWebURI :: B.ByteString -> Either String (ValidFileDescription 'FRecipient) +decodeWebURI fragment = do + compressed <- U.decode fragment + let yaml = LB.toStrict $ Z.decompress $ LB.fromStrict compressed + strDecode yaml >>= validateFileDescription + +-- | Extract web link host and URI fragment from a file description. +-- Returns (hostname, uriFragment) for https://hostname/#uriFragment. +fileWebLink :: FileDescription 'FRecipient -> Maybe (B.ByteString, B.ByteString) +fileWebLink fd@FileDescription {chunks} = case chunks of + (FileChunk {replicas = FileChunkReplica {server = ProtocolServer {host}} : _} : _) -> + Just (strEncode (L.head host), encodeWebURI fd) + _ -> Nothing diff --git a/src/Simplex/FileTransfer/Crypto.hs b/src/Simplex/FileTransfer/Crypto.hs index 72344f3c02..6c9039e0cd 100644 --- a/src/Simplex/FileTransfer/Crypto.hs +++ b/src/Simplex/FileTransfer/Crypto.hs @@ -16,6 +16,7 @@ import Data.ByteString.Char8 (ByteString) import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Lazy.Char8 as LB import Data.Int (Int64) +import Data.Text (Text) import Simplex.FileTransfer.Types (FileHeader (..), authTagSize) import qualified Simplex.Messaging.Crypto as C import Simplex.Messaging.Crypto.File (CryptoFile (..), FTCryptoError (..)) @@ -54,7 +55,7 @@ encryptFile srcFile fileHdr key nonce fileSize' encSize encFile = do liftIO $ B.hPut w ch' encryptChunks_ get w (sb', len - chSize) -decryptChunks :: Int64 -> [FilePath] -> C.SbKey -> C.CbNonce -> (String -> ExceptT String IO CryptoFile) -> ExceptT FTCryptoError IO CryptoFile +decryptChunks :: Int64 -> [FilePath] -> C.SbKey -> C.CbNonce -> (Text -> ExceptT String IO CryptoFile) -> ExceptT FTCryptoError IO CryptoFile decryptChunks _ [] _ _ _ = throwE $ FTCEInvalidHeader "empty" decryptChunks encSize (chPath : chPaths) key nonce getDestFile = case reverse chPaths of [] -> do diff --git a/src/Simplex/FileTransfer/Server.hs b/src/Simplex/FileTransfer/Server.hs index 25de49afc8..b007cbe29e 100644 --- a/src/Simplex/FileTransfer/Server.hs +++ b/src/Simplex/FileTransfer/Server.hs @@ -40,6 +40,7 @@ import GHC.IO.Handle (hSetNewlineMode) import GHC.IORef (atomicSwapIORef) import GHC.Stats (getRTSStats) import qualified Network.HTTP.Types as N +import Network.HPACK.Token (tokenKey) import qualified Network.HTTP2.Server as H import Network.Socket import Simplex.FileTransfer.Protocol @@ -63,12 +64,12 @@ import Simplex.Messaging.Server.Stats import Simplex.Messaging.SystemTime import Simplex.Messaging.TMap (TMap) import qualified Simplex.Messaging.TMap as TM -import Simplex.Messaging.Transport (CertChainPubKey (..), SessionId, THandleAuth (..), THandleParams (..), TransportPeer (..), defaultSupportedParams) +import Simplex.Messaging.Transport (CertChainPubKey (..), SessionId, THandleAuth (..), THandleParams (..), TransportPeer (..), defaultSupportedParams, defaultSupportedParamsHTTPS) import Simplex.Messaging.Transport.Buffer (trimCR) import Simplex.Messaging.Transport.HTTP2 import Simplex.Messaging.Transport.HTTP2.File (fileBlockSize) -import Simplex.Messaging.Transport.HTTP2.Server -import Simplex.Messaging.Transport.Server (runLocalTCPServer) +import Simplex.Messaging.Transport.HTTP2.Server (runHTTP2Server) +import Simplex.Messaging.Transport.Server (SNICredentialUsed, TransportServerConfig (..), runLocalTCPServer) import Simplex.Messaging.Util import Simplex.Messaging.Version import System.Environment (lookupEnv) @@ -89,9 +90,24 @@ data XFTPTransportRequest = XFTPTransportRequest { thParams :: THandleParamsXFTP 'TServer, reqBody :: HTTP2Body, request :: H.Request, - sendResponse :: H.Response -> IO () + sendResponse :: H.Response -> IO (), + sniUsed :: SNICredentialUsed, + addCORS :: Bool } +corsHeaders :: Bool -> [N.Header] +corsHeaders addCORS + | addCORS = [("Access-Control-Allow-Origin", "*"), ("Access-Control-Expose-Headers", "*")] + | otherwise = [] + +corsPreflightHeaders :: [N.Header] +corsPreflightHeaders = + [ ("Access-Control-Allow-Origin", "*"), + ("Access-Control-Allow-Methods", "POST, OPTIONS"), + ("Access-Control-Allow-Headers", "*"), + ("Access-Control-Max-Age", "86400") + ] + runXFTPServer :: XFTPServerConfig -> IO () runXFTPServer cfg = do started <- newEmptyTMVarIO @@ -120,45 +136,73 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira runServer :: M () runServer = do srvCreds@(chain, pk) <- asks tlsServerCreds + httpCreds_ <- asks httpServerCreds signKey <- liftIO $ case C.x509ToPrivate' pk of Right pk' -> pure pk' Left e -> putStrLn ("Server has no valid key: " <> show e) >> exitFailure env <- ask sessions <- liftIO TM.emptyIO let cleanup sessionId = atomically $ TM.delete sessionId sessions - liftIO . runHTTP2Server started xftpPort defaultHTTP2BufferSize defaultSupportedParams srvCreds transportConfig inactiveClientExpiration cleanup $ \sessionId sessionALPN r sendResponse -> do - reqBody <- getHTTP2Body r xftpBlockSize - let v = VersionXFTP 1 - thServerVRange = versionToRange v - thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} - req0 = XFTPTransportRequest {thParams = thParams0, request = r, reqBody, sendResponse} - flip runReaderT env $ case sessionALPN of - Nothing -> processRequest req0 - Just alpn | alpn == xftpALPNv1 || alpn == httpALPN11 -> - xftpServerHandshakeV1 chain signKey sessions req0 >>= \case - Nothing -> pure () -- handshake response sent - Just thParams -> processRequest req0 {thParams} -- proceed with new version (XXX: may as well switch the request handler here) - _ -> liftIO . sendResponse $ H.responseNoBody N.ok200 [] -- shouldn't happen: means server picked handshake protocol it doesn't know about + srvParams = if isJust httpCreds_ then defaultSupportedParamsHTTPS else defaultSupportedParams + liftIO . runHTTP2Server started xftpPort defaultHTTP2BufferSize srvParams srvCreds httpCreds_ transportConfig inactiveClientExpiration cleanup $ \sniUsed sessionId sessionALPN r sendResponse -> do + let addCORS' = sniUsed && addCORSHeaders transportConfig + if addCORS' && H.requestMethod r == Just "OPTIONS" + then sendResponse $ H.responseNoBody N.ok200 corsPreflightHeaders + else do + reqBody <- getHTTP2Body r xftpBlockSize + let v = VersionXFTP 1 + thServerVRange = versionToRange v + thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} + req0 = XFTPTransportRequest {thParams = thParams0, request = r, reqBody, sendResponse, sniUsed, addCORS = addCORS'} + flip runReaderT env $ case sessionALPN of + Nothing -> processRequest req0 + Just alpn + | alpn == xftpALPNv1 || alpn == httpALPN11 || (sniUsed && alpn == "h2") -> + xftpServerHandshakeV1 chain signKey sessions req0 >>= \case + Nothing -> pure () + Just thParams -> processRequest req0 {thParams} + | otherwise -> liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS') xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M (Maybe (THandleParams XFTPVersion 'TServer)) - xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, reqBody = HTTP2Body {bodyHead}, sendResponse} = do + xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, request, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do s <- atomically $ TM.lookup sessionId sessions r <- runExceptT $ case s of - Nothing -> processHello - Just (HandshakeSent pk) -> processClientHandshake pk - Just (HandshakeAccepted thParams) -> pure $ Just thParams + Nothing + | sniUsed && not webHello -> throwE SESSION + | otherwise -> processHello Nothing + Just (HandshakeSent pk) + | webHello -> processHello (Just pk) + | otherwise -> processClientHandshake pk + Just (HandshakeAccepted thParams) + | webHello -> processHello (serverPrivKey <$> thAuth thParams) + | webHandshake, Just auth <- thAuth thParams -> processClientHandshake (serverPrivKey auth) + | otherwise -> pure $ Just thParams either sendError pure r where - processHello = do - unless (B.null bodyHead) $ throwE HANDSHAKE - (k, pk) <- atomically . C.generateKeyPair =<< asks random - atomically $ TM.insert sessionId (HandshakeSent pk) sessions + webHello = sniUsed && any (\(t, _) -> tokenKey t == "xftp-web-hello") (fst $ H.requestHeaders request) + webHandshake = sniUsed && any (\(t, _) -> tokenKey t == "xftp-handshake") (fst $ H.requestHeaders request) + processHello pk_ = do + challenge_ <- + if + | B.null bodyHead -> pure Nothing + | sniUsed -> do + body <- liftHS $ C.unPad bodyHead + XFTPClientHello {webChallenge} <- liftHS $ first show (smpDecode body) + pure webChallenge + | otherwise -> throwE HANDSHAKE + rng <- asks random + k <- atomically $ TM.lookup sessionId sessions >>= \case + Just (HandshakeSent pk') -> pure $ C.publicKey pk' + _ -> do + kp <- maybe (C.generateKeyPair rng) (\p -> pure (C.publicKey p, p)) pk_ + fst kp <$ TM.insert sessionId (HandshakeSent $ snd kp) sessions let authPubKey = CertChainPubKey chain (C.signX509 serverSignKey $ C.publicToX509 k) - let hs = XFTPServerHandshake {xftpVersionRange = xftpServerVRange, sessionId, authPubKey} + webIdentityProof = C.sign serverSignKey . (<> sessionId) <$> challenge_ + let hs = XFTPServerHandshake {xftpVersionRange = xftpServerVRange, sessionId, authPubKey, webIdentityProof} shs <- encodeXftp hs #ifdef slow_servers lift randomDelay #endif - liftIO . sendResponse $ H.responseBuilder N.ok200 [] shs + liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) shs pure Nothing processClientHandshake pk = do unless (B.length bodyHead == xftpBlockSize) $ throwE HANDSHAKE @@ -174,13 +218,13 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira #ifdef slow_servers lift randomDelay #endif - liftIO . sendResponse $ H.responseNoBody N.ok200 [] + liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS) pure Nothing Nothing -> throwE HANDSHAKE sendError :: XFTPErrorType -> M (Maybe (THandleParams XFTPVersion 'TServer)) sendError err = do runExceptT (encodeXftp err) >>= \case - Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 [] bs + Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) bs Left _ -> logError $ "Error encoding handshake error: " <> tshow err pure Nothing encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT XFTPEnv IO) Builder @@ -346,7 +390,7 @@ data ServerFile = ServerFile } processRequest :: XFTPTransportRequest -> M () -processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse} +processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse, addCORS} | B.length bodyHead /= xftpBlockSize = sendXFTPResponse ("", NoEntity, FRErr BLOCK) Nothing | otherwise = case xftpDecodeTServer thParams bodyHead of @@ -365,7 +409,7 @@ processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHea #ifdef slow_servers randomDelay #endif - liftIO $ sendResponse $ H.responseStreaming N.ok200 [] $ streamBody t_ + liftIO $ sendResponse $ H.responseStreaming N.ok200 (corsHeaders addCORS) $ streamBody t_ where streamBody t_ send done = do case t_ of diff --git a/src/Simplex/FileTransfer/Server/Env.hs b/src/Simplex/FileTransfer/Server/Env.hs index 206a5b3753..389296a8f5 100644 --- a/src/Simplex/FileTransfer/Server/Env.hs +++ b/src/Simplex/FileTransfer/Server/Env.hs @@ -57,6 +57,7 @@ data XFTPServerConfig = XFTPServerConfig -- | time after which inactive clients can be disconnected and check interval, seconds inactiveClientExpiration :: Maybe ExpirationConfig, xftpCredentials :: ServerCredentials, + httpCredentials :: Maybe ServerCredentials, -- | XFTP client-server protocol version range xftpServerVRange :: VersionRangeXFTP, -- stats config - see SMP server config @@ -84,6 +85,7 @@ data XFTPEnv = XFTPEnv random :: TVar ChaChaDRG, serverIdentity :: C.KeyHash, tlsServerCreds :: T.Credential, + httpServerCreds :: Maybe T.Credential, serverStats :: FileServerStats } @@ -98,7 +100,7 @@ defaultFileExpiration = } newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv -newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials} = do +newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials, httpCredentials} = do random <- C.newRandom store <- newFileStore storeLog <- mapM (`readWriteFileStore` store) storeLogFile @@ -108,9 +110,10 @@ newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCrede logNote $ "Total / available storage: " <> tshow quota <> " / " <> tshow (quota - used) when (quota < used) $ logWarn "WARNING: storage quota is less than used storage, no files can be uploaded!" tlsServerCreds <- loadServerCredential xftpCredentials + httpServerCreds <- mapM loadServerCredential httpCredentials Fingerprint fp <- loadFingerprint xftpCredentials serverStats <- newFileServerStats =<< getCurrentTime - pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, serverIdentity = C.KeyHash fp, serverStats} + pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats} countUsedStorage :: M.Map k FileRec -> Int64 countUsedStorage = M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 diff --git a/src/Simplex/FileTransfer/Server/Main.hs b/src/Simplex/FileTransfer/Server/Main.hs index 944df1ca0e..1c09762f8f 100644 --- a/src/Simplex/FileTransfer/Server/Main.hs +++ b/src/Simplex/FileTransfer/Server/Main.hs @@ -12,7 +12,7 @@ import Data.Either (fromRight) import Data.Functor (($>)) import Data.Ini (lookupValue, readIniFile) import Data.Int (Int64) -import Data.Maybe (fromMaybe) +import Data.Maybe (fromMaybe, isJust) import qualified Data.Text as T import qualified Data.Text.IO as T import Network.Socket (HostName) @@ -21,7 +21,7 @@ import Simplex.FileTransfer.Chunks import Simplex.FileTransfer.Description (FileSize (..)) import Simplex.FileTransfer.Server (runXFTPServer) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration) -import Simplex.FileTransfer.Transport (supportedFileServerVRange, alpnSupportedXFTPhandshakes) +import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import qualified Simplex.Messaging.Crypto as C import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), pattern XFTPServer) @@ -29,7 +29,7 @@ import Simplex.Messaging.Server.CLI import Simplex.Messaging.Server.Expiration import Simplex.Messaging.Transport.Client (TransportHost (..)) import Simplex.Messaging.Transport.HTTP2 (httpALPN) -import Simplex.Messaging.Transport.Server (ServerCredentials (..), mkTransportServerConfig) +import Simplex.Messaging.Transport.Server (ServerCredentials (..), TransportServerConfig (..), mkTransportServerConfig) import Simplex.Messaging.Util (eitherToMaybe, safeDecodeUtf8, tshow) import System.Directory (createDirectoryIfMissing, doesFileExist) import System.FilePath (combine) @@ -124,6 +124,10 @@ xftpServerCLI cfgPath logPath = do \disconnect: off\n" <> ("# ttl: " <> tshow (ttl defaultInactiveClientExpiration) <> "\n") <> ("# check_interval: " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n") + <> "\n\ + \[WEB]\n\ + \# cert: /etc/opt/simplex-xftp/web.crt\n\ + \# key: /etc/opt/simplex-xftp/web.key\n" runServer ini = do hSetBuffering stdout LineBuffering hSetBuffering stderr LineBuffering @@ -155,6 +159,17 @@ xftpServerCLI cfgPath logPath = do else "NOT allowed." putStrLn $ "Listening on port " <> xftpPort <> "..." + httpCredentials_ = + eitherToMaybe $ do + cert <- T.unpack <$> lookupValue "WEB" "cert" ini + key <- T.unpack <$> lookupValue "WEB" "key" ini + pure + ServerCredentials + { caCertificateFile = Nothing, + certificateFile = cert, + privateKeyFile = key + } + serverConfig = XFTPServerConfig { xftpPort = T.unpack $ strictIni "TRANSPORT" "port" ini, @@ -186,6 +201,7 @@ xftpServerCLI cfgPath logPath = do privateKeyFile = c serverKeyFile, certificateFile = c serverCrtFile }, + httpCredentials = httpCredentials_, xftpServerVRange = supportedFileServerVRange, logStatsInterval = logStats $> 86400, -- seconds logStatsStartTime = 0, -- seconds from 00:00 UTC @@ -194,10 +210,12 @@ xftpServerCLI cfgPath logPath = do prometheusInterval = eitherToMaybe $ read . T.unpack <$> lookupValue "STORE_LOG" "prometheus_interval" ini, prometheusMetricsFile = combine logPath "xftp-server-metrics.txt", transportConfig = - mkTransportServerConfig - (fromMaybe False $ iniOnOff "TRANSPORT" "log_tls_errors" ini) - (Just $ alpnSupportedXFTPhandshakes <> httpALPN) - False, + let cfg = + mkTransportServerConfig + (fromMaybe False $ iniOnOff "TRANSPORT" "log_tls_errors" ini) + (Just $ alpnSupportedXFTPhandshakes <> httpALPN) + False + in cfg {addCORSHeaders = isJust httpCredentials_}, responseDelay = 0 } @@ -229,11 +247,14 @@ cliCommandP cfgPath logPath iniFile = initP :: Parser InitOptions initP = do enableStoreLog <- - flag' False + flag' + False ( long "disable-store-log" <> help "Disable store log for persistence (enabled by default)" ) - <|> flag True True + <|> flag + True + True ( long "store-log" <> short 'l' <> help "Enable store log for persistence (DEPRECATED, enabled by default)" diff --git a/src/Simplex/FileTransfer/Transport.hs b/src/Simplex/FileTransfer/Transport.hs index b7746f1cbe..d55b251483 100644 --- a/src/Simplex/FileTransfer/Transport.hs +++ b/src/Simplex/FileTransfer/Transport.hs @@ -19,6 +19,7 @@ module Simplex.FileTransfer.Transport -- xftpClientHandshake, XFTPServerHandshake (..), -- xftpServerHandshake, + XFTPClientHello (..), THandleXFTP, THandleParamsXFTP, VersionXFTP, @@ -35,6 +36,7 @@ module Simplex.FileTransfer.Transport ) where +import Control.Applicative (optional) import qualified Control.Exception as E import Control.Logger.Simple import Control.Monad @@ -60,7 +62,7 @@ import Simplex.Messaging.Parsers import Simplex.Messaging.Protocol (BlockingInfo, CommandError) import Simplex.Messaging.Transport (ALPN, CertChainPubKey, ServiceCredentials, SessionId, THandle (..), THandleParams (..), TransportError (..), TransportPeer (..)) import Simplex.Messaging.Transport.HTTP2.File -import Simplex.Messaging.Util (bshow, tshow) +import Simplex.Messaging.Util (bshow, tshow, (<$?>)) import Simplex.Messaging.Version import Simplex.Messaging.Version.Internal import System.IO (Handle, IOMode (..), withFile) @@ -111,11 +113,18 @@ alpnSupportedXFTPhandshakes = [xftpALPNv1] xftpALPNv1 :: ALPN xftpALPNv1 = "xftp/1" +data XFTPClientHello = XFTPClientHello + { -- | a random string sent by the client to the server to prove that server has identity certificate + webChallenge :: Maybe ByteString + } + data XFTPServerHandshake = XFTPServerHandshake { xftpVersionRange :: VersionRangeXFTP, sessionId :: SessionId, -- | pub key to agree shared secrets for command authorization and entity ID encryption. - authPubKey :: CertChainPubKey + authPubKey :: CertChainPubKey, + -- | signed identity challenge from XFTPClientHello + webIdentityProof :: Maybe C.ASignature } data XFTPClientHandshake = XFTPClientHandshake @@ -125,6 +134,14 @@ data XFTPClientHandshake = XFTPClientHandshake keyHash :: C.KeyHash } +instance Encoding XFTPClientHello where + smpEncode XFTPClientHello {webChallenge} = smpEncode webChallenge + smpP = do + webChallenge <- smpP + forM_ webChallenge $ \challenge -> unless (B.length challenge == 32) $ fail "bad XFTPClientHello webChallenge" + Tail _compat <- smpP + pure XFTPClientHello {webChallenge} + instance Encoding XFTPClientHandshake where smpEncode XFTPClientHandshake {xftpVersion, keyHash} = smpEncode (xftpVersion, keyHash) @@ -134,13 +151,13 @@ instance Encoding XFTPClientHandshake where pure XFTPClientHandshake {xftpVersion, keyHash} instance Encoding XFTPServerHandshake where - smpEncode XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey} = - smpEncode (xftpVersionRange, sessionId, authPubKey) + smpEncode XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey, webIdentityProof} = + smpEncode (xftpVersionRange, sessionId, authPubKey, C.signatureBytes webIdentityProof) smpP = do - (xftpVersionRange, sessionId) <- smpP - authPubKey <- smpP + (xftpVersionRange, sessionId, authPubKey) <- smpP + webIdentityProof <- optional $ C.decodeSignature <$?> smpP Tail _compat <- smpP - pure XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey} + pure XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey, webIdentityProof} sendEncFile :: Handle -> (Builder -> IO ()) -> LC.SbState -> Word32 -> IO () sendEncFile h send = go diff --git a/src/Simplex/FileTransfer/Types.hs b/src/Simplex/FileTransfer/Types.hs index aa465a12ec..8c48b25c46 100644 --- a/src/Simplex/FileTransfer/Types.hs +++ b/src/Simplex/FileTransfer/Types.hs @@ -10,6 +10,7 @@ import qualified Data.Aeson.TH as J import qualified Data.Attoparsec.ByteString.Char8 as A import Data.ByteString.Char8 (ByteString) import Data.Int (Int64) +import Data.Text (Text) import qualified Data.Text as T import Data.Text.Encoding (encodeUtf8) import Data.Word (Word32) @@ -33,8 +34,8 @@ authTagSize = fromIntegral C.authTagSize -- fileExtra is added to allow header extension in future versions data FileHeader = FileHeader - { fileName :: String, - fileExtra :: Maybe String + { fileName :: Text, + fileExtra :: Maybe Text } deriving (Eq, Show) diff --git a/src/Simplex/Messaging/Encoding.hs b/src/Simplex/Messaging/Encoding.hs index ef0033dfb6..d069e5518a 100644 --- a/src/Simplex/Messaging/Encoding.hs +++ b/src/Simplex/Messaging/Encoding.hs @@ -24,6 +24,8 @@ import Data.Bits (shiftL, shiftR, (.|.)) import Data.ByteString.Char8 (ByteString) import qualified Data.ByteString.Char8 as B import Data.ByteString.Internal (c2w, w2c) +import Data.Text (Text) +import Data.Text.Encoding (decodeUtf8', encodeUtf8) import Data.Int (Int64) import qualified Data.List.NonEmpty as L import Data.Time.Clock.System (SystemTime (..)) @@ -156,6 +158,12 @@ smpEncodeList xs = B.cons (lenEncode $ length xs) . B.concat $ map smpEncode xs smpListP :: Encoding a => Parser [a] smpListP = (`A.count` smpP) =<< lenP +instance Encoding Text where + smpEncode = smpEncode . encodeUtf8 + {-# INLINE smpEncode #-} + smpP = either (fail . show) pure . decodeUtf8' =<< smpP + {-# INLINE smpP #-} + instance Encoding String where smpEncode = smpEncode . B.pack {-# INLINE smpEncode #-} diff --git a/src/Simplex/Messaging/Transport/HTTP2/Server.hs b/src/Simplex/Messaging/Transport/HTTP2/Server.hs index 7152eb5a9e..8ece9488b2 100644 --- a/src/Simplex/Messaging/Transport/HTTP2/Server.hs +++ b/src/Simplex/Messaging/Transport/HTTP2/Server.hs @@ -16,7 +16,7 @@ import Numeric.Natural (Natural) import Simplex.Messaging.Server.Expiration import Simplex.Messaging.Transport (ALPN, SessionId, TLS, closeConnection, tlsALPN, tlsUniq) import Simplex.Messaging.Transport.HTTP2 -import Simplex.Messaging.Transport.Server (ServerCredentials, TransportServerConfig (..), loadServerCredential, runTransportServer) +import Simplex.Messaging.Transport.Server (SNICredentialUsed, ServerCredentials, TLSServerCredential (..), TransportServerConfig (..), loadServerCredential, newSocketState, runTransportServerState_) import Simplex.Messaging.Util (threadDelay') import UnliftIO (finally) import UnliftIO.Concurrent (forkIO, killThread) @@ -54,7 +54,7 @@ getHTTP2Server HTTP2ServerConfig {qSize, http2Port, bufferSize, bodyHeadSize, se started <- newEmptyTMVarIO reqQ <- newTBQueueIO qSize action <- async $ - runHTTP2Server started http2Port bufferSize serverSupported srvCreds transportConfig Nothing (const $ pure ()) $ \sessionId sessionALPN r sendResponse -> do + runHTTP2Server started http2Port bufferSize serverSupported srvCreds Nothing transportConfig Nothing (const $ pure ()) $ \_sniUsed sessionId sessionALPN r sendResponse -> do reqBody <- getHTTP2Body r bodyHeadSize atomically $ writeTBQueue reqQ HTTP2Request {sessionId, sessionALPN, request = r, reqBody, sendResponse} void . atomically $ takeTMVar started @@ -63,24 +63,33 @@ getHTTP2Server HTTP2ServerConfig {qSize, http2Port, bufferSize, bodyHeadSize, se closeHTTP2Server :: HTTP2Server -> IO () closeHTTP2Server = uninterruptibleCancel . action -runHTTP2Server :: TMVar Bool -> ServiceName -> BufferSize -> T.Supported -> T.Credential -> TransportServerConfig -> Maybe ExpirationConfig -> (SessionId -> IO ()) -> HTTP2ServerFunc -> IO () -runHTTP2Server started port bufferSize srvSupported srvCreds transportConfig expCfg_ clientFinished = runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup +runHTTP2Server :: TMVar Bool -> ServiceName -> BufferSize -> T.Supported -> T.Credential -> Maybe T.Credential -> TransportServerConfig -> Maybe ExpirationConfig -> (SessionId -> IO ()) -> (SNICredentialUsed -> HTTP2ServerFunc) -> IO () +runHTTP2Server started port bufferSize srvSupported srvCreds httpCreds_ transportConfig expCfg_ clientFinished = runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup where - setup = runTransportServer started port srvSupported srvCreds transportConfig + setup handler = do + ss <- newSocketState + let combinedCreds = TLSServerCredential {credential = srvCreds, sniCredential = httpCreds_} + runTransportServerState_ ss started port srvSupported combinedCreds transportConfig $ \_ -> handler -- HTTP2 server can be run on both client and server TLS connections. runHTTP2ServerWith :: BufferSize -> ((TLS p -> IO ()) -> a) -> HTTP2ServerFunc -> a -runHTTP2ServerWith = runHTTP2ServerWith_ Nothing (\_sessId -> pure ()) +runHTTP2ServerWith bufferSize tlsSetup http2Server = + runHTTP2ServerWith_ + Nothing + (\_sessId -> pure ()) + bufferSize + (\handler -> tlsSetup $ \tls -> handler (False, tls)) + (const http2Server) -runHTTP2ServerWith_ :: Maybe ExpirationConfig -> (SessionId -> IO ()) -> BufferSize -> ((TLS p -> IO ()) -> a) -> HTTP2ServerFunc -> a -runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup http2Server = setup $ \tls -> do +runHTTP2ServerWith_ :: Maybe ExpirationConfig -> (SessionId -> IO ()) -> BufferSize -> (((SNICredentialUsed, TLS p) -> IO ()) -> a) -> (SNICredentialUsed -> HTTP2ServerFunc) -> a +runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup http2Server = setup $ \(sniUsed, tls) -> do activeAt <- newTVarIO =<< getSystemTime tid_ <- mapM (forkIO . expireInactiveClient tls activeAt) expCfg_ - withHTTP2 bufferSize (run tls activeAt) (clientFinished $ tlsUniq tls) tls `finally` mapM_ killThread tid_ + withHTTP2 bufferSize (run sniUsed tls activeAt) (clientFinished $ tlsUniq tls) tls `finally` mapM_ killThread tid_ where - run tls activeAt cfg = H.run cfg $ \req _aux sendResp -> do + run sniUsed tls activeAt cfg = H.run cfg $ \req _aux sendResp -> do getSystemTime >>= atomically . writeTVar activeAt - http2Server (tlsUniq tls) (tlsALPN tls) req (`sendResp` []) + http2Server sniUsed (tlsUniq tls) (tlsALPN tls) req (`sendResp` []) expireInactiveClient tls activeAt expCfg = loop where loop = do diff --git a/src/Simplex/Messaging/Transport/Server.hs b/src/Simplex/Messaging/Transport/Server.hs index 00b94ddc5b..cdfc300b71 100644 --- a/src/Simplex/Messaging/Transport/Server.hs +++ b/src/Simplex/Messaging/Transport/Server.hs @@ -11,6 +11,7 @@ module Simplex.Messaging.Transport.Server ( TransportServerConfig (..), ServerCredentials (..), TLSServerCredential (..), + SNICredentialUsed, AddHTTP, mkTransportServerConfig, runTransportServerState, @@ -62,6 +63,7 @@ data TransportServerConfig = TransportServerConfig { logTLSErrors :: Bool, serverALPN :: Maybe [ALPN], askClientCert :: Bool, + addCORSHeaders :: Bool, tlsSetupTimeout :: Int, transportTimeout :: Int } @@ -91,6 +93,7 @@ mkTransportServerConfig logTLSErrors serverALPN askClientCert = { logTLSErrors, serverALPN, askClientCert, + addCORSHeaders = False, tlsSetupTimeout = 60000000, transportTimeout = 40000000 } @@ -274,9 +277,10 @@ paramsAskClientCert clientCert params = { T.serverWantClientCert = True, T.serverHooks = (T.serverHooks params) - { T.onClientCertificate = \cc -> validateClientCertificate cc >>= \case - Just reason -> T.CertificateUsageReject reason <$ atomically (tryPutTMVar clientCert Nothing) - Nothing -> T.CertificateUsageAccept <$ atomically (tryPutTMVar clientCert $ Just cc) + { T.onClientCertificate = \cc -> + validateClientCertificate cc >>= \case + Just reason -> T.CertificateUsageReject reason <$ atomically (tryPutTMVar clientCert Nothing) + Nothing -> T.CertificateUsageAccept <$ atomically (tryPutTMVar clientCert $ Just cc) } } diff --git a/tests/Test.hs b/tests/Test.hs index 2ed0bda9e6..dcc5de3fb1 100644 --- a/tests/Test.hs +++ b/tests/Test.hs @@ -35,6 +35,7 @@ import Util import XFTPAgent import XFTPCLI import XFTPServerTests (xftpServerTests) +import XFTPWebTests (xftpWebTests) #if defined(dbPostgres) import Fixtures @@ -149,6 +150,7 @@ main = do describe "XFTP file description" fileDescriptionTests describe "XFTP CLI" xftpCLITests describe "XFTP agent" xftpAgentTests + describe "XFTP Web Client" xftpWebTests describe "XRCP" remoteControlTests describe "Server CLIs" cliTests diff --git a/tests/XFTPCLI.hs b/tests/XFTPCLI.hs index 0f308b61a7..d6c97d73ce 100644 --- a/tests/XFTPCLI.hs +++ b/tests/XFTPCLI.hs @@ -48,12 +48,14 @@ testXFTPCLISendReceive = withXFTPServer $ do fdSnd = filePath <> ".xftp" "snd.xftp.private" progress : sendResult <- xftpCLI ["send", filePath, senderFiles, "-n", "2", "-s", testXFTPServerStr, "--tmp=tests/tmp"] progress `shouldSatisfy` uploadProgress - sendResult + let (sendInfo, sendRest) = splitAt 4 sendResult + sendInfo `shouldBe` [ "Sender file description: " <> fdSnd, "Pass file descriptions to the recipient(s):", fdRcv1, fdRcv2 ] + sendRest `shouldSatisfy` any ("https://" `isPrefixOf`) testInfoFile fdRcv1 "Recipient" testReceiveFile fdRcv1 "testfile" file testInfoFile fdRcv2 "Recipient" @@ -82,12 +84,14 @@ testXFTPCLISendReceive2servers = withXFTPServer . withXFTPServer2 $ do fdSnd = filePath <> ".xftp" "snd.xftp.private" progress : sendResult <- xftpCLI ["send", filePath, senderFiles, "-n", "2", "-s", testXFTPServerStr <> ";" <> testXFTPServerStr2, "--tmp=tests/tmp"] progress `shouldSatisfy` uploadProgress - sendResult + let (sendInfo, sendRest) = splitAt 4 sendResult + sendInfo `shouldBe` [ "Sender file description: " <> fdSnd, "Pass file descriptions to the recipient(s):", fdRcv1, fdRcv2 ] + sendRest `shouldSatisfy` any ("https://" `isPrefixOf`) testReceiveFile fdRcv1 "testfile" file testReceiveFile fdRcv2 "testfile_1" file where @@ -118,12 +122,14 @@ testXFTPCLIDelete = withXFTPServer . withXFTPServer2 $ do fdSnd = filePath <> ".xftp" "snd.xftp.private" progress : sendResult <- xftpCLI ["send", filePath, senderFiles, "-n", "2", "-s", testXFTPServerStr <> ";" <> testXFTPServerStr2, "--tmp=tests/tmp"] progress `shouldSatisfy` uploadProgress - sendResult + let (sendInfo, sendRest) = splitAt 4 sendResult + sendInfo `shouldBe` [ "Sender file description: " <> fdSnd, "Pass file descriptions to the recipient(s):", fdRcv1, fdRcv2 ] + sendRest `shouldSatisfy` any ("https://" `isPrefixOf`) xftpCLI ["del", fdRcv1] `shouldThrow` anyException progress1 : recvResult <- xftpCLI ["recv", fdRcv1, recipientFiles, "--tmp=tests/tmp", "-y"] diff --git a/tests/XFTPClient.hs b/tests/XFTPClient.hs index bd62afa068..f0d1e3a61d 100644 --- a/tests/XFTPClient.hs +++ b/tests/XFTPClient.hs @@ -15,8 +15,9 @@ import Simplex.FileTransfer.Client import Simplex.FileTransfer.Description import Simplex.FileTransfer.Server (runXFTPServerBlocking) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defaultFileExpiration, defaultInactiveClientExpiration) -import Simplex.FileTransfer.Transport (supportedFileServerVRange, alpnSupportedXFTPhandshakes) +import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import Simplex.Messaging.Protocol (XFTPServer) +import Simplex.Messaging.Transport.HTTP2 (httpALPN) import Simplex.Messaging.Transport.Server import Test.Hspec hiding (fit, it) @@ -125,6 +126,7 @@ testXFTPServerConfig = privateKeyFile = "tests/fixtures/server.key", certificateFile = "tests/fixtures/server.crt" }, + httpCredentials = Nothing, xftpServerVRange = supportedFileServerVRange, logStatsInterval = Nothing, logStatsStartTime = 0, @@ -148,3 +150,44 @@ testXFTPClientWith cfg client = do getXFTPClient (1, testXFTPServer, Nothing) cfg [] ts (\_ -> pure ()) >>= \case Right c -> client c Left e -> error $ show e + +testXFTPServerConfigSNI :: XFTPServerConfig +testXFTPServerConfigSNI = + testXFTPServerConfig + { httpCredentials = + Just + ServerCredentials + { caCertificateFile = Nothing, + privateKeyFile = "tests/fixtures/web.key", + certificateFile = "tests/fixtures/web.crt" + }, + transportConfig = + (mkTransportServerConfig True (Just $ alpnSupportedXFTPhandshakes <> httpALPN) False) + { addCORSHeaders = True + } + } + +withXFTPServerSNI :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerSNI = withXFTPServerCfg testXFTPServerConfigSNI + +testXFTPServerConfigEd25519SNI :: XFTPServerConfig +testXFTPServerConfigEd25519SNI = + testXFTPServerConfig + { xftpCredentials = + ServerCredentials + { caCertificateFile = Just "tests/fixtures/ed25519/ca.crt", + privateKeyFile = "tests/fixtures/ed25519/server.key", + certificateFile = "tests/fixtures/ed25519/server.crt" + }, + httpCredentials = + Just + ServerCredentials + { caCertificateFile = Nothing, + privateKeyFile = "tests/fixtures/web.key", + certificateFile = "tests/fixtures/web.crt" + }, + transportConfig = + (mkTransportServerConfig True (Just $ alpnSupportedXFTPhandshakes <> httpALPN) False) + { addCORSHeaders = True + } + } diff --git a/tests/XFTPServerTests.hs b/tests/XFTPServerTests.hs index c1d34177f3..0af3d7ecaa 100644 --- a/tests/XFTPServerTests.hs +++ b/tests/XFTPServerTests.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedLists #-} @@ -13,23 +14,37 @@ import Control.Exception (SomeException) import Control.Monad import Control.Monad.Except import Control.Monad.IO.Unlift +import qualified Crypto.PubKey.RSA as RSA import qualified Data.ByteString.Base64.URL as B64 +import Data.ByteString.Builder (byteString) import Data.ByteString.Char8 (ByteString) import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Lazy.Char8 as LB -import Data.List (isInfixOf) +import qualified Data.CaseInsensitive as CI +import Data.List (find, isInfixOf) import Data.Time.Clock (getCurrentTime) +import qualified Data.X509 as X +import Data.X509.Validation (Fingerprint (..), getFingerprint) +import Network.HPACK.Token (tokenKey) +import qualified Network.HTTP2.Client as H2 import ServerTests (logSize) import Simplex.FileTransfer.Client import Simplex.FileTransfer.Description (kb) -import Simplex.FileTransfer.Protocol (FileInfo (..), XFTPFileId) +import Simplex.FileTransfer.Protocol (FileInfo (..), XFTPFileId, xftpBlockSize) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..)) -import Simplex.FileTransfer.Transport (XFTPErrorType (..), XFTPRcvChunkSpec (..)) +import Simplex.FileTransfer.Transport (XFTPClientHandshake (..), XFTPClientHello (..), XFTPErrorType (..), XFTPRcvChunkSpec (..), XFTPServerHandshake (..), pattern VersionXFTP) import Simplex.Messaging.Client (ProtocolClientError (..)) import qualified Simplex.Messaging.Crypto as C import qualified Simplex.Messaging.Crypto.Lazy as LC +import Simplex.Messaging.Encoding (smpDecode, smpEncode) import Simplex.Messaging.Protocol (BasicAuth, EntityId (..), pattern NoEntity) import Simplex.Messaging.Server.Expiration (ExpirationConfig (..)) +import Simplex.Messaging.Transport (CertChainPubKey (..), TLS (..), TransportPeer (..), defaultSupportedParams, defaultSupportedParamsHTTPS) +import Simplex.Messaging.Transport.Client (TransportClientConfig (..), TransportHost (..), defaultTransportClientConfig, runTLSTransportClient) +import Simplex.Messaging.Transport.HTTP2 (HTTP2Body (..)) +import qualified Simplex.Messaging.Transport.HTTP2.Client as HC +import Simplex.Messaging.Transport.Server (loadFileFingerprint) +import Simplex.Messaging.Transport.Shared (ChainCertificates (..), chainIdCaCerts) import System.Directory (createDirectoryIfMissing, removeDirectoryRecursive, removeFile) import System.FilePath (()) import Test.Hspec hiding (fit, it) @@ -39,10 +54,8 @@ import XFTPClient xftpServerTests :: Spec xftpServerTests = - before_ (createDirectoryIfMissing False xftpServerFiles) - . after_ (removeDirectoryRecursive xftpServerFiles) - . describe "XFTP file chunk delivery" - $ do + before_ (createDirectoryIfMissing False xftpServerFiles) . after_ (removeDirectoryRecursive xftpServerFiles) $ do + describe "XFTP file chunk delivery" $ do it "should create, upload and receive file chunk (1 client)" testFileChunkDelivery it "should create, upload and receive file chunk (2 clients)" testFileChunkDelivery2 it "should create, add recipients, upload and receive file chunk" testFileChunkDeliveryAddRecipients @@ -63,6 +76,16 @@ xftpServerTests = it "allowed with correct basic auth" $ testFileBasicAuth True (Just "pwd") (Just "pwd") True it "allowed with auth on server without auth" $ testFileBasicAuth True Nothing (Just "any") True it "should not change content for uploaded and committed files" testFileSkipCommitted + describe "XFTP SNI and CORS" $ do + it "should select web certificate when SNI is used" testSNICertSelection + it "should select XFTP certificate when SNI is not used" testNoSNICertSelection + it "should add CORS headers when SNI is used" testCORSHeaders + it "should respond to OPTIONS preflight with CORS headers" testCORSPreflight + it "should not add CORS headers without SNI" testNoCORSWithoutSNI + it "should upload and receive file chunk through SNI-enabled server" testFileChunkDeliverySNI + it "should complete web handshake with challenge-response" testWebHandshake + it "should re-handshake on same connection with xftp-web-hello header" testWebReHandshake + it "should return padded SESSION error for stale web session" testStaleWebSession chSize :: Integral a => a chSize = kb 128 @@ -395,3 +418,183 @@ testFileSkipCommitted = uploadXFTPChunk c spKey sId chunkSpec -- upload again to get FROk without getting stuck downloadXFTPChunk g c rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk" chSize digest liftIO $ B.readFile "tests/tmp/received_chunk" `shouldReturn` bytes -- new chunk content got ignored + +-- SNI and CORS tests + +lookupResponseHeader :: B.ByteString -> H2.Response -> Maybe B.ByteString +lookupResponseHeader name resp = + snd <$> find (\(t, _) -> tokenKey t == CI.mk name) (fst $ H2.responseHeaders resp) + +getCerts :: TLS 'TClient -> [X.Certificate] +getCerts tls = + let X.CertificateChain cc = tlsPeerCert tls + in map (X.signedObject . X.getSigned) cc + +testSNICertSelection :: Expectation +testSNICertSelection = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/web_ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + tlsALPN tls `shouldBe` Just "h2" + case getCerts tls of + X.Certificate {X.certPubKey = X.PubKeyRSA rsa} : _ -> RSA.public_size rsa `shouldSatisfy` (> 0) + leaf : _ -> expectationFailure $ "Expected RSA cert, got: " <> show (X.certPubKey leaf) + [] -> expectationFailure "Empty certificate chain" + +testNoSNICertSelection :: Expectation +testNoSNICertSelection = + withXFTPServerSNI $ \_ -> do + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caXFTP = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["xftp/1"], useSNI = False} + runTLSTransportClient defaultSupportedParams Nothing cfg Nothing "localhost" xftpTestPort (Just caXFTP) $ \(tls :: TLS 'TClient) -> do + tlsALPN tls `shouldBe` Just "xftp/1" + case getCerts tls of + X.Certificate {X.certPubKey = X.PubKeyEd448 _} : _ -> pure () + leaf : _ -> expectationFailure $ "Expected Ed448 cert, got: " <> show (X.certPubKey leaf) + [] -> expectationFailure "Empty certificate chain" + +testCORSHeaders :: Expectation +testCORSHeaders = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/web_ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "POST" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Just "*" + lookupResponseHeader "access-control-expose-headers" response `shouldBe` Just "*" + +testCORSPreflight :: Expectation +testCORSPreflight = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/web_ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "OPTIONS" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Just "*" + lookupResponseHeader "access-control-allow-methods" response `shouldBe` Just "POST, OPTIONS" + lookupResponseHeader "access-control-allow-headers" response `shouldBe` Just "*" + lookupResponseHeader "access-control-max-age" response `shouldBe` Just "86400" + +testNoCORSWithoutSNI :: Expectation +testNoCORSWithoutSNI = + withXFTPServerSNI $ \_ -> do + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caXFTP = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["xftp/1"], useSNI = False} + runTLSTransportClient defaultSupportedParams Nothing cfg Nothing "localhost" xftpTestPort (Just caXFTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "POST" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Nothing + +testFileChunkDeliverySNI :: Expectation +testFileChunkDeliverySNI = + withXFTPServerSNI $ \_ -> testXFTPClient $ \c -> runRight_ $ runTestFileChunkDelivery c c + +testWebHandshake :: Expectation +testWebHandshake = + withXFTPServerSNI $ \_ -> do + Fingerprint fpWeb <- loadFileFingerprint "tests/fixtures/web_ca.crt" + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let webCaHash = C.KeyHash fpWeb + keyHash = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just webCaHash) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + -- Send web challenge as XFTPClientHello + g <- C.newRandom + challenge <- atomically $ C.randomBytes 32 g + helloBody <- either (error . show) pure $ C.pad (smpEncode (XFTPClientHello {webChallenge = Just challenge})) xftpBlockSize + let helloReq = H2.requestBuilder "POST" "/" [("xftp-web-hello", "1")] $ byteString helloBody + resp1 <- either (error . show) pure =<< HC.sendRequest h2 helloReq (Just 5000000) + let serverHsBody = bodyHead (HC.respBody resp1) + -- Decode server handshake + serverHsDecoded <- either (error . show) pure $ C.unPad serverHsBody + XFTPServerHandshake {sessionId, authPubKey = CertChainPubKey {certChain, signedPubKey}, webIdentityProof} <- + either error pure $ smpDecode serverHsDecoded + sig <- maybe (error "expected webIdentityProof") pure webIdentityProof + -- Verify cert chain identity + (leafCert, idCert) <- case chainIdCaCerts certChain of + CCValid {leafCert, idCert} -> pure (leafCert, idCert) + _ -> error "expected CCValid chain" + let Fingerprint idCertFP = getFingerprint idCert X.HashSHA256 + C.KeyHash idCertFP `shouldBe` keyHash + -- Verify challenge signature (identity proof) + leafPubKey <- either error pure $ C.x509ToPublic' $ X.certPubKey $ X.signedObject $ X.getSigned leafCert + C.verify leafPubKey sig (challenge <> sessionId) `shouldBe` True + -- Verify signedPubKey (DH key auth) + void $ either error pure $ C.verifyX509 leafPubKey signedPubKey + -- Send client handshake with echoed challenge + let clientHs = XFTPClientHandshake {xftpVersion = VersionXFTP 1, keyHash} + clientHsPadded <- either (error . show) pure $ C.pad (smpEncode clientHs) xftpBlockSize + let clientHsReq = H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded + resp2 <- either (error . show) pure =<< HC.sendRequest h2 clientHsReq (Just 5000000) + let ackBody = bodyHead (HC.respBody resp2) + B.length ackBody `shouldBe` 0 + +testWebReHandshake :: Expectation +testWebReHandshake = + withXFTPServerSNI $ \_ -> do + Fingerprint fpWeb <- loadFileFingerprint "tests/fixtures/web_ca.crt" + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let webCaHash = C.KeyHash fpWeb + keyHash = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just webCaHash) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + g <- C.newRandom + -- First handshake + challenge1 <- atomically $ C.randomBytes 32 g + helloBody1 <- either (error . show) pure $ C.pad (smpEncode (XFTPClientHello {webChallenge = Just challenge1})) xftpBlockSize + let helloReq1 = H2.requestBuilder "POST" "/" [("xftp-web-hello", "1")] $ byteString helloBody1 + resp1 <- either (error . show) pure =<< HC.sendRequest h2 helloReq1 (Just 5000000) + serverHs1 <- either (error . show) pure $ C.unPad (bodyHead (HC.respBody resp1)) + XFTPServerHandshake {sessionId = sid1} <- either error pure $ smpDecode serverHs1 + clientHsPadded <- either (error . show) pure $ C.pad (smpEncode (XFTPClientHandshake {xftpVersion = VersionXFTP 1, keyHash})) xftpBlockSize + resp1b <- either (error . show) pure =<< HC.sendRequest h2 (H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded) (Just 5000000) + B.length (bodyHead (HC.respBody resp1b)) `shouldBe` 0 + -- Re-handshake on same connection with xftp-web-hello header + challenge2 <- atomically $ C.randomBytes 32 g + helloBody2 <- either (error . show) pure $ C.pad (smpEncode (XFTPClientHello {webChallenge = Just challenge2})) xftpBlockSize + let helloReq2 = H2.requestBuilder "POST" "/" [("xftp-web-hello", "1")] $ byteString helloBody2 + resp2 <- either (error . show) pure =<< HC.sendRequest h2 helloReq2 (Just 5000000) + serverHs2 <- either (error . show) pure $ C.unPad (bodyHead (HC.respBody resp2)) + XFTPServerHandshake {sessionId = sid2} <- either error pure $ smpDecode serverHs2 + sid2 `shouldBe` sid1 + -- Complete re-handshake + resp2b <- either (error . show) pure =<< HC.sendRequest h2 (H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded) (Just 5000000) + B.length (bodyHead (HC.respBody resp2b)) `shouldBe` 0 + +testStaleWebSession :: Expectation +testStaleWebSession = + withXFTPServerSNI $ \_ -> do + Fingerprint fpWeb <- loadFileFingerprint "tests/fixtures/web_ca.crt" + let webCaHash = C.KeyHash fpWeb + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just webCaHash) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + -- Send a command on web connection without doing hello (no xftp-web-hello header) + dummyBody <- either (error . show) pure $ C.pad "PING" xftpBlockSize + let req = H2.requestBuilder "POST" "/" [] $ byteString dummyBody + resp <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + let respBody = bodyHead (HC.respBody resp) + -- Server should return padded SESSION error + B.length respBody `shouldBe` xftpBlockSize + decoded <- either (error . show) pure $ C.unPad respBody + decoded `shouldBe` smpEncode SESSION + diff --git a/tests/XFTPWebTests.hs b/tests/XFTPWebTests.hs new file mode 100644 index 0000000000..fa402ad6f2 --- /dev/null +++ b/tests/XFTPWebTests.hs @@ -0,0 +1,3247 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-- | Per-function tests for the xftp-web TypeScript XFTP client library. +-- Each test calls the Haskell function and the corresponding TypeScript function +-- via node, then asserts byte-identical output. +-- +-- Prerequisites: cd xftp-web && npm install && npm run build +-- Run: cabal test --test-option=--match="/XFTP Web Client/" +module XFTPWebTests (xftpWebTests) where + +import Control.Concurrent (forkIO, newEmptyMVar, putMVar, takeMVar) +import Control.Monad (replicateM, when) +import Crypto.Error (throwCryptoError) +import qualified Crypto.PubKey.Curve25519 as X25519 +import qualified Crypto.PubKey.Ed25519 as Ed25519 +import qualified Data.ByteArray as BA +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as LB +import Data.Int (Int64) +import Data.List (intercalate) +import qualified Data.List.NonEmpty as NE +import Data.Word (Word8, Word16, Word32) +import System.Random (randomIO) +import Data.X509.Validation (Fingerprint (..)) +import Simplex.FileTransfer.Client (prepareChunkSizes) +import Simplex.FileTransfer.Client.Main (decodeWebURI, encodeWebURI) +import Simplex.FileTransfer.Description (FileDescription (..), FileSize (..), ValidFileDescription, pattern ValidFileDescription) +import Simplex.FileTransfer.Protocol (FileParty (..), xftpBlockSize) +import Simplex.FileTransfer.Transport (XFTPClientHello (..)) +import Simplex.FileTransfer.Types (FileHeader (..)) +import qualified Simplex.Messaging.Crypto as C +import qualified Simplex.Messaging.Crypto.Lazy as LC +import Simplex.Messaging.Encoding +import Simplex.Messaging.Encoding.String (strDecode, strEncode) +import Simplex.Messaging.Transport.Server (loadFileFingerprint) +import System.Directory (createDirectoryIfMissing, doesDirectoryExist, removeDirectoryRecursive) +import System.Environment (getEnvironment) +import System.Exit (ExitCode (..)) +import System.Process (CreateProcess (..), StdStream (..), createProcess, proc, waitForProcess) +import Test.Hspec hiding (fit, it) +import Util +import Simplex.FileTransfer.Server.Env (XFTPServerConfig) +import XFTPClient (testXFTPServerConfigEd25519SNI, testXFTPServerConfigSNI, withXFTPServerCfg, xftpTestPort) +import AgentTests.FunctionalAPITests (rfGet, runRight, runRight_, sfGet, withAgent) +import Simplex.Messaging.Agent (AgentClient, xftpReceiveFile, xftpSendFile, xftpStartWorkers) +import Simplex.Messaging.Agent.Protocol (AEvent (..)) +import SMPAgentClient (agentCfg, initAgentServers, testDB) +import XFTPCLI (recipientFiles, senderFiles) +import qualified Simplex.Messaging.Crypto.File as CF + +xftpWebDir :: FilePath +xftpWebDir = "xftp-web" + +-- | Redirect console.log/warn to stderr so library debug output doesn't pollute stdout binary data. +redirectConsole :: String +redirectConsole = "console.log = console.warn = (...a) => process.stderr.write(a.map(String).join(' ') + '\\n');" + +-- | Run an inline ES module script via node, return stdout as ByteString. +callNode :: String -> IO B.ByteString +callNode script = do + baseEnv <- getEnvironment + let nodeEnv = ("NODE_TLS_REJECT_UNAUTHORIZED", "0") : baseEnv + (_, Just hout, Just herr, ph) <- + createProcess + (proc "node" ["--input-type=module", "-e", redirectConsole <> script]) + { std_out = CreatePipe, + std_err = CreatePipe, + cwd = Just xftpWebDir, + env = Just nodeEnv + } + errVar <- newEmptyMVar + _ <- forkIO $ B.hGetContents herr >>= putMVar errVar + out <- B.hGetContents hout + err <- takeMVar errVar + ec <- waitForProcess ph + when (ec /= ExitSuccess) $ + expectationFailure $ + "node " <> show ec <> "\nstderr: " <> map (toEnum . fromIntegral) (B.unpack err) + pure out + +-- | Format a ByteString as a JS Uint8Array constructor. +jsUint8 :: B.ByteString -> String +jsUint8 bs = "new Uint8Array([" <> intercalate "," (map show (B.unpack bs)) <> "])" + +-- Import helpers for inline scripts. +impEnc, impPad, impDig, impKey, impSb :: String +impEnc = "import * as E from './dist/protocol/encoding.js';" +impPad = "import * as P from './dist/crypto/padding.js';" +impDig = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as D from './dist/crypto/digest.js';" + <> "await sodium.ready;" +impKey = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "await sodium.ready;" +impSb = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as S from './dist/crypto/secretbox.js';" + <> "await sodium.ready;" +impFile :: String +impFile = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as F from './dist/crypto/file.js';" + <> "await sodium.ready;" +impCmd :: String +impCmd = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as Cmd from './dist/protocol/commands.js';" + <> "await sodium.ready;" +impTx :: String +impTx = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Tx from './dist/protocol/transmission.js';" + <> "await sodium.ready;" +impHs :: String +impHs = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Hs from './dist/protocol/handshake.js';" + <> "await sodium.ready;" +impId :: String +impId = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Id from './dist/crypto/identity.js';" + <> "await sodium.ready;" +impDesc :: String +impDesc = "import * as Desc from './dist/protocol/description.js';" +impChk :: String +impChk = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as Desc from './dist/protocol/description.js';" + <> "import * as Chk from './dist/protocol/chunks.js';" + <> "await sodium.ready;" +impCli :: String +impCli = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Cli from './dist/protocol/client.js';" + <> "await sodium.ready;" +impDl :: String +impDl = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as F from './dist/crypto/file.js';" + <> "import * as Cli from './dist/protocol/client.js';" + <> "import * as Dl from './dist/download.js';" + <> "import * as Cmd from './dist/protocol/commands.js';" + <> "import * as Tx from './dist/protocol/transmission.js';" + <> "await sodium.ready;" + +impAddr :: String +impAddr = "import * as Addr from './dist/protocol/address.js';" + +-- | Wrap expression in process.stdout.write(Buffer.from(...)). +jsOut :: String -> String +jsOut expr = "process.stdout.write(Buffer.from(" <> expr <> "));" + +xftpWebTests :: Spec +xftpWebTests = do + distExists <- runIO $ doesDirectoryExist (xftpWebDir <> "/dist") + if distExists + then do + tsEncodingTests + tsPaddingTests + tsDigestTests + tsKeyTests + tsSecretboxTests + tsFileCryptoTests + tsCommandTests + tsTransmissionTests + tsHandshakeTests + tsIdentityTests + tsDescriptionTests + tsChunkTests + tsClientTests + tsDownloadTests + tsAddressTests + tsIntegrationTests + else + it "skipped (run 'cd xftp-web && npm install && npm run build' first)" $ + pendingWith "TS project not compiled" + +-- ── protocol/encoding ────────────────────────────────────────────── + +tsEncodingTests :: Spec +tsEncodingTests = describe "protocol/encoding" $ do + describe "encode" $ do + it "encodeWord16" $ do + let val = 42 :: Word16 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord16(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeWord16 max" $ do + let val = 65535 :: Word16 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord16(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeWord32" $ do + let val = 100000 :: Word32 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord32(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeInt64" $ do + let val = 1234567890123456789 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeInt64 negative" $ do + let val = -42 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeInt64 zero" $ do + let val = 0 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeBytes" $ do + let val = "hello" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeBytes(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode val + + it "encodeBytes empty" $ do + let val = "" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeBytes(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode val + + it "encodeLarge" $ do + let val = "test data for large encoding" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeLarge(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Large val) + + it "encodeTail" $ do + let val = "raw tail bytes" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeTail(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Tail val) + + it "encodeBool True" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeBool(true)" + actual `shouldBe` smpEncode True + + it "encodeBool False" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeBool(false)" + actual `shouldBe` smpEncode False + + it "encodeString" $ do + let val = "hello" :: String + actual <- callNode $ impEnc <> jsOut "E.encodeString('hello')" + actual `shouldBe` smpEncode val + + it "encodeMaybe Nothing" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeMaybe(E.encodeBytes, null)" + actual `shouldBe` smpEncode (Nothing :: Maybe B.ByteString) + + it "encodeMaybe Just" $ do + let val = "hello" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeMaybe(E.encodeBytes, " <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Just val) + + it "encodeList" $ do + let vals = ["ab", "cd", "ef"] :: [B.ByteString] + actual <- + callNode $ + impEnc + <> "const xs = [" + <> intercalate "," (map jsUint8 vals) + <> "];" + <> jsOut "E.encodeList(E.encodeBytes, xs)" + actual `shouldBe` smpEncodeList vals + + it "encodeList empty" $ do + let vals = [] :: [B.ByteString] + actual <- + callNode $ + impEnc <> jsOut "E.encodeList(E.encodeBytes, [])" + actual `shouldBe` smpEncodeList vals + + it "encodeNonEmpty" $ do + let vals = ["ab", "cd"] :: [B.ByteString] + actual <- + callNode $ + impEnc + <> "const xs = [" + <> intercalate "," (map jsUint8 vals) + <> "];" + <> jsOut "E.encodeNonEmpty(E.encodeBytes, xs)" + actual `shouldBe` smpEncode (NE.fromList vals) + + describe "decode round-trips" $ do + it "decodeWord16" $ do + let encoded = smpEncode (42 :: Word16) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeWord16(E.decodeWord16(d))" + actual `shouldBe` encoded + + it "decodeWord32" $ do + let encoded = smpEncode (100000 :: Word32) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeWord32(E.decodeWord32(d))" + actual `shouldBe` encoded + + it "decodeInt64" $ do + let encoded = smpEncode (1234567890123456789 :: Int64) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeInt64(E.decodeInt64(d))" + actual `shouldBe` encoded + + it "decodeInt64 negative" $ do + let encoded = smpEncode (-42 :: Int64) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeInt64(E.decodeInt64(d))" + actual `shouldBe` encoded + + it "decodeBytes" $ do + let encoded = smpEncode ("hello" :: B.ByteString) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeBytes(E.decodeBytes(d))" + actual `shouldBe` encoded + + it "decodeLarge" $ do + let encoded = smpEncode (Large "large data") + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeLarge(E.decodeLarge(d))" + actual `shouldBe` encoded + + it "decodeBool" $ do + let encoded = smpEncode True + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeBool(E.decodeBool(d))" + actual `shouldBe` encoded + + it "decodeString" $ do + let encoded = smpEncode ("hello" :: String) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeString(E.decodeString(d))" + actual `shouldBe` encoded + + it "decodeMaybe Just" $ do + let encoded = smpEncode (Just ("hello" :: B.ByteString)) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeMaybe(E.encodeBytes, E.decodeMaybe(E.decodeBytes, d))" + actual `shouldBe` encoded + + it "decodeMaybe Nothing" $ do + let encoded = smpEncode (Nothing :: Maybe B.ByteString) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeMaybe(E.encodeBytes, E.decodeMaybe(E.decodeBytes, d))" + actual `shouldBe` encoded + + it "decodeList" $ do + let encoded = smpEncodeList (["ab", "cd", "ef"] :: [B.ByteString]) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeList(E.encodeBytes, E.decodeList(E.decodeBytes, d))" + actual `shouldBe` encoded + +-- ── crypto/padding ───────────────────────────────────────────────── + +tsPaddingTests :: Spec +tsPaddingTests = describe "crypto/padding" $ do + it "pad" $ do + let msg = "hello" :: B.ByteString + paddedLen = 256 :: Int + expected = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + actual `shouldBe` expected + + it "pad minimal" $ do + let msg = "ab" :: B.ByteString + paddedLen = 16 :: Int + expected = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + actual `shouldBe` expected + + it "Haskell pad -> TS unPad" $ do + let msg = "cross-language test" :: B.ByteString + paddedLen = 128 :: Int + padded = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.unPad(" <> jsUint8 padded <> ")") + actual `shouldBe` msg + + it "TS pad -> Haskell unPad" $ do + let msg = "ts to haskell" :: B.ByteString + paddedLen = 64 :: Int + tsPadded <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + let actual = either (error . show) id $ C.unPad tsPadded + actual `shouldBe` msg + + it "padLazy" $ do + let msg = "hello" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + expected = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- + callNode $ + impPad <> jsOut ("P.padLazy(" <> jsUint8 msg <> ", " <> show msgLen <> "n, " <> show paddedLen <> "n)") + actual `shouldBe` LB.toStrict expected + + it "Haskell padLazy -> TS unPadLazy" $ do + let msg = "cross-language lazy" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + padded = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- callNode $ impPad <> jsOut ("P.unPadLazy(" <> jsUint8 (LB.toStrict padded) <> ")") + actual `shouldBe` msg + + it "TS padLazy -> Haskell unPadLazy" $ do + let msg = "ts to haskell lazy" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 128 :: Int64 + tsPadded <- + callNode $ + impPad <> jsOut ("P.padLazy(" <> jsUint8 msg <> ", " <> show msgLen <> "n, " <> show paddedLen <> "n)") + let actual = either (error . show) id $ LC.unPad (LB.fromStrict tsPadded) + actual `shouldBe` LB.fromStrict msg + + it "splitLen" $ do + let msg = "test content" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + padded = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- + callNode $ + impEnc + <> impPad + <> "const r = P.splitLen(" + <> jsUint8 (LB.toStrict padded) + <> ");" + <> "const len = E.encodeInt64(r.len);" + <> jsOut "E.concatBytes(len, r.content)" + let (expectedLen, expectedContent) = either (error . show) id $ LC.splitLen padded + expectedBytes = smpEncode expectedLen <> LB.toStrict expectedContent + actual `shouldBe` expectedBytes + +-- ── crypto/digest ────────────────────────────────────────────────── + +tsDigestTests :: Spec +tsDigestTests = describe "crypto/digest" $ do + it "sha256" $ do + let input = "hello world" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + + it "sha256 empty" $ do + let input = "" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + + it "sha512" $ do + let input = "hello world" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha512(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha512Hash input + + it "sha512 empty" $ do + let input = "" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha512(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha512Hash input + + it "sha256 binary" $ do + let input = B.pack [0, 1, 2, 255, 254, 128] + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + +-- ── crypto/keys ────────────────────────────────────────────────── + +tsKeyTests :: Spec +tsKeyTests = describe "crypto/keys" $ do + describe "DER encoding" $ do + it "encodePubKeyEd25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + expectedDer = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.encodePubKeyEd25519(" <> jsUint8 rawPub <> ")") + actual `shouldBe` expectedDer + + it "decodePubKeyEd25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + der = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.decodePubKeyEd25519(" <> jsUint8 der <> ")") + actual `shouldBe` rawPub + + it "encodePubKeyX25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + expectedDer = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.encodePubKeyX25519(" <> jsUint8 rawPub <> ")") + actual `shouldBe` expectedDer + + it "encodePrivKeyEd25519" $ do + let seed = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x04, 0x22, 0x04, 0x20] + expectedDer = derPrefix <> seed + actual <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut "K.encodePrivKeyEd25519(kp.privateKey)" + actual `shouldBe` expectedDer + + it "encodePrivKeyX25519" $ do + let rawPriv = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x04, 0x22, 0x04, 0x20] + expectedDer = derPrefix <> rawPriv + actual <- callNode $ impKey <> jsOut ("K.encodePrivKeyX25519(" <> jsUint8 rawPriv <> ")") + actual `shouldBe` expectedDer + + it "DER round-trip Ed25519 pubkey" $ do + actual <- + callNode $ + impKey + <> "const kp = K.generateEd25519KeyPair();" + <> "const der = K.encodePubKeyEd25519(kp.publicKey);" + <> "const decoded = K.decodePubKeyEd25519(der);" + <> "const match = decoded.length === kp.publicKey.length && decoded.every((b,i) => b === kp.publicKey[i]);" + <> jsOut "new Uint8Array([match ? 1 : 0])" + actual `shouldBe` B.pack [1] + + it "encodePubKeyEd25519 matches Haskell" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + rawPub = BA.convert pk :: B.ByteString + haskellDer = C.encodePubKey (C.PublicKeyEd25519 pk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePubKeyEd25519(" <> jsUint8 rawPub <> ")") + tsDer `shouldBe` haskellDer + + it "encodePubKeyX25519 matches Haskell" $ do + let rawPriv = B.pack [1 .. 32] + sk = throwCryptoError $ X25519.secretKey rawPriv + pk = X25519.toPublic sk + rawPub = BA.convert pk :: B.ByteString + haskellDer = C.encodePubKey (C.PublicKeyX25519 pk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePubKeyX25519(" <> jsUint8 rawPub <> ")") + tsDer `shouldBe` haskellDer + + it "encodePrivKeyEd25519 matches Haskell" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + haskellDer = C.encodePrivKey (C.PrivateKeyEd25519 sk) + tsDer <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut "K.encodePrivKeyEd25519(kp.privateKey)" + tsDer `shouldBe` haskellDer + + it "encodePrivKeyX25519 matches Haskell" $ do + let rawPriv = B.pack [1 .. 32] + sk = throwCryptoError $ X25519.secretKey rawPriv + haskellDer = C.encodePrivKey (C.PrivateKeyX25519 sk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePrivKeyX25519(" <> jsUint8 rawPriv <> ")") + tsDer `shouldBe` haskellDer + + describe "Ed25519 sign/verify" $ do + it "sign determinism" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "deterministic test" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut ("K.sign(kp.privateKey, " <> jsUint8 msg <> ")") + actual `shouldBe` rawSig + + it "Haskell sign -> TS verify" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "cross-language sign test" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawPub = BA.convert pk :: B.ByteString + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const ok = K.verify(" + <> jsUint8 rawPub + <> ", " + <> jsUint8 rawSig + <> ", " + <> jsUint8 msg + <> ");" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + actual `shouldBe` B.pack [1] + + it "TS sign -> Haskell verify" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "ts-to-haskell sign" :: B.ByteString + rawSig <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut ("K.sign(kp.privateKey, " <> jsUint8 msg <> ")") + let sig = throwCryptoError $ Ed25519.signature rawSig + Ed25519.verify pk msg sig `shouldBe` True + + it "verify rejects wrong message" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "original message" :: B.ByteString + wrongMsg = "wrong message" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawPub = BA.convert pk :: B.ByteString + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const ok = K.verify(" + <> jsUint8 rawPub + <> ", " + <> jsUint8 rawSig + <> ", " + <> jsUint8 wrongMsg + <> ");" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + actual `shouldBe` B.pack [0] + + describe "X25519 DH" $ do + it "DH cross-language" $ do + let seed1 = B.pack [1 .. 32] + seed2 = B.pack [33 .. 64] + sk1 = throwCryptoError $ X25519.secretKey seed1 + sk2 = throwCryptoError $ X25519.secretKey seed2 + pk2 = X25519.toPublic sk2 + dhHs = X25519.dh pk2 sk1 + rawPk2 = BA.convert pk2 :: B.ByteString + rawDh = BA.convert dhHs :: B.ByteString + actual <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk2 <> ", " <> jsUint8 seed1 <> ")") + actual `shouldBe` rawDh + + it "DH commutativity" $ do + let seed1 = B.pack [1 .. 32] + seed2 = B.pack [33 .. 64] + sk1 = throwCryptoError $ X25519.secretKey seed1 + pk1 = X25519.toPublic sk1 + sk2 = throwCryptoError $ X25519.secretKey seed2 + pk2 = X25519.toPublic sk2 + rawPk1 = BA.convert pk1 :: B.ByteString + rawPk2 = BA.convert pk2 :: B.ByteString + dh1 <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk2 <> ", " <> jsUint8 seed1 <> ")") + dh2 <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk1 <> ", " <> jsUint8 seed2 <> ")") + dh1 `shouldBe` dh2 + + describe "keyHash" $ do + it "keyHash matches Haskell sha256Hash of DER" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + der = derPrefix <> rawPub + expectedHash = C.sha256Hash der + actual <- + callNode $ + impKey + <> "const der = K.encodePubKeyEd25519(" + <> jsUint8 rawPub + <> ");" + <> jsOut "K.keyHash(der)" + actual `shouldBe` expectedHash + +-- ── crypto/secretbox ────────────────────────────────────────────── + +tsSecretboxTests :: Spec +tsSecretboxTests = describe "crypto/secretbox" $ do + let key32 = B.pack [1 .. 32] + nonce24 = B.pack [1 .. 24] + cbNonceVal = C.cbNonce nonce24 + sbKeyVal = C.unsafeSbKey key32 + + describe "NaCl secretbox (tag prepended)" $ do + it "cbEncrypt matches Haskell sbEncrypt_" $ do + let msg = "hello NaCl secretbox" :: B.ByteString + paddedLen = 256 :: Int + hsResult = either (error . show) id $ C.sbEncrypt_ key32 cbNonceVal msg paddedLen + tsResult <- + callNode $ + impSb <> jsOut ("S.cbEncrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 msg <> "," <> show paddedLen <> ")") + tsResult `shouldBe` hsResult + + it "Haskell sbEncrypt_ -> TS cbDecrypt" $ do + let msg = "cross-language decrypt" :: B.ByteString + paddedLen = 128 :: Int + cipher = either (error . show) id $ C.sbEncrypt_ key32 cbNonceVal msg paddedLen + tsResult <- + callNode $ + impSb <> jsOut ("S.cbDecrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 cipher <> ")") + tsResult `shouldBe` msg + + it "TS cbEncrypt -> Haskell sbDecrypt_" $ do + let msg = "ts-to-haskell NaCl" :: B.ByteString + paddedLen = 64 :: Int + tsCipher <- + callNode $ + impSb <> jsOut ("S.cbEncrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 msg <> "," <> show paddedLen <> ")") + let hsResult = either (error . show) id $ C.sbDecrypt_ key32 cbNonceVal tsCipher + hsResult `shouldBe` msg + + describe "streaming tail-tag" $ do + it "sbEncryptTailTag matches Haskell" $ do + let msg = "hello streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + hsResult = + either (error . show) id $ + LC.sbEncryptTailTag sbKeyVal cbNonceVal (LB.fromStrict msg) msgLen paddedLen + tsResult <- + callNode $ + impSb + <> jsOut + ( "S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n)" + ) + tsResult `shouldBe` LB.toStrict hsResult + + it "Haskell encrypt -> TS decrypt (tail tag)" $ do + let msg = "haskell-to-ts streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 128 :: Int64 + cipher = + either (error . show) id $ + LC.sbEncryptTailTag sbKeyVal cbNonceVal (LB.fromStrict msg) msgLen paddedLen + tsResult <- + callNode $ + impSb + <> "const r = S.sbDecryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show paddedLen + <> "n," + <> jsUint8 (LB.toStrict cipher) + <> ");" + <> jsOut "new Uint8Array([r.valid ? 1 : 0, ...r.content])" + let (validByte, content) = B.splitAt 1 tsResult + validByte `shouldBe` B.pack [1] + content `shouldBe` msg + + it "TS encrypt -> Haskell decrypt (tail tag)" $ do + let msg = "ts-to-haskell streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsCipher <- + callNode $ + impSb + <> jsOut + ( "S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n)" + ) + let (valid, plaintext) = + either (error . show) id $ + LC.sbDecryptTailTag sbKeyVal cbNonceVal paddedLen (LB.fromStrict tsCipher) + valid `shouldBe` True + LB.toStrict plaintext `shouldBe` msg + + it "tag tampering detection" $ do + let msg = "tamper test" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsResult <- + callNode $ + impSb + <> "const enc = S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n);" + <> "enc[enc.length - 1] ^= 1;" + <> "const r = S.sbDecryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show paddedLen + <> "n, enc);" + <> jsOut "new Uint8Array([r.valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + describe "internal consistency" $ do + it "streaming matches NaCl secretbox (TS-only)" $ do + let msg = "salsa20 validation" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsResult <- + callNode $ + impPad + <> impSb + <> "const msg = " + <> jsUint8 msg + <> ";" + <> "const key = " + <> jsUint8 key32 + <> ";" + <> "const nonce = " + <> jsUint8 nonce24 + <> ";" + <> "const padded = P.padLazy(msg, " + <> show msgLen + <> "n, " + <> show paddedLen + <> "n);" + <> "const nacl = S.cryptoBox(key, nonce, padded);" + <> "const stream = S.sbEncryptTailTag(key, nonce, msg, " + <> show msgLen + <> "n, " + <> show paddedLen + <> "n);" + <> "const naclTag = nacl.subarray(0, 16);" + <> "const naclCipher = nacl.subarray(16);" + <> "const streamCipher = stream.subarray(0, " + <> show paddedLen + <> ");" + <> "const streamTag = stream.subarray(" + <> show paddedLen + <> ");" + <> "const cipherMatch = naclCipher.length === streamCipher.length && naclCipher.every((b,i) => b === streamCipher[i]);" + <> "const tagMatch = naclTag.length === streamTag.length && naclTag.every((b,i) => b === streamTag[i]);" + <> jsOut "new Uint8Array([cipherMatch ? 1 : 0, tagMatch ? 1 : 0])" + tsResult `shouldBe` B.pack [1, 1] + + it "multi-chunk matches single-shot (TS-only)" $ do + let msg = B.pack [1 .. 200] + tsResult <- + callNode $ + impSb + <> "const key = " + <> jsUint8 key32 + <> ";" + <> "const nonce = " + <> jsUint8 nonce24 + <> ";" + <> "const msg = " + <> jsUint8 msg + <> ";" + <> "const st1 = S.sbInit(key, nonce);" + <> "const c1 = S.sbEncryptChunk(st1, msg);" + <> "const t1 = S.sbAuth(st1);" + <> "const st2 = S.sbInit(key, nonce);" + <> "const parts = [msg.subarray(0,50), msg.subarray(50,100), msg.subarray(100,150), msg.subarray(150)];" + <> "const c2parts = parts.map(p => S.sbEncryptChunk(st2, p));" + <> "const c2 = new Uint8Array(200); let off = 0; c2parts.forEach(p => { c2.set(p, off); off += p.length; });" + <> "const t2 = S.sbAuth(st2);" + <> "const cipherMatch = c1.length === c2.length && c1.every((b,i) => b === c2[i]);" + <> "const tagMatch = t1.length === t2.length && t1.every((b,i) => b === t2[i]);" + <> jsOut "new Uint8Array([cipherMatch ? 1 : 0, tagMatch ? 1 : 0])" + tsResult `shouldBe` B.pack [1, 1] + +-- ── crypto/file ───────────────────────────────────────────────── + +tsFileCryptoTests :: Spec +tsFileCryptoTests = describe "crypto/file" $ do + let key32 = B.pack [1 .. 32] + nonce24 = B.pack [1 .. 24] + cbNonceVal = C.cbNonce nonce24 + sbKeyVal = C.unsafeSbKey key32 + + describe "FileHeader encoding" $ do + it "encodeFileHeader matches Haskell" $ do + let hdr = FileHeader "test.txt" Nothing + hsEncoded = smpEncode hdr + tsEncoded <- callNode $ impFile <> jsOut "F.encodeFileHeader({fileName: 'test.txt', fileExtra: null})" + tsEncoded `shouldBe` hsEncoded + + it "encodeFileHeader with fileExtra" $ do + let hdr = FileHeader "document.pdf" (Just "v2") + hsEncoded = smpEncode hdr + tsEncoded <- callNode $ impFile <> jsOut "F.encodeFileHeader({fileName: 'document.pdf', fileExtra: 'v2'})" + tsEncoded `shouldBe` hsEncoded + + it "Haskell encode -> TS parseFileHeader" $ do + let hdr = FileHeader "photo.jpg" (Just "extra") + encoded = smpEncode hdr + trailing = B.pack [10, 20, 30, 40, 50] + input = encoded <> trailing + tsResult <- + callNode $ + impFile + <> "const r = F.parseFileHeader(" + <> jsUint8 input + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.rest])" + tsResult `shouldBe` input + + describe "file encryption" $ do + it "encryptFile matches Haskell" $ do + let source = "Hello, this is test file content!" :: B.ByteString + hdr = FileHeader "test.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + hsEncrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsEncrypted <- + callNode $ + impFile + <> "const source = " + <> jsUint8 source + <> ";" + <> "const fileHdr = F.encodeFileHeader({fileName: 'test.txt', fileExtra: null});" + <> jsOut + ( "F.encryptFile(source, fileHdr, " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show fileSize' + <> "n," + <> show encSize + <> "n)" + ) + tsEncrypted `shouldBe` hsEncrypted + + it "Haskell encrypt -> TS decryptChunks" $ do + let source = "cross-language file test data" :: B.ByteString + hdr = FileHeader "data.bin" (Just "meta") + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsResult <- + callNode $ + impFile + <> "const r = F.decryptChunks(" + <> show encSize + <> "n, [" + <> jsUint8 encrypted + <> "], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "TS encryptFile -> Haskell decrypt" $ do + let source = "ts-to-haskell file" :: B.ByteString + hdr = FileHeader "note.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + paddedLen = encSize - 16 + tsEncrypted <- + callNode $ + impFile + <> "const source = " + <> jsUint8 source + <> ";" + <> "const fileHdr = F.encodeFileHeader({fileName: 'note.txt', fileExtra: null});" + <> jsOut + ( "F.encryptFile(source, fileHdr, " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show fileSize' + <> "n," + <> show encSize + <> "n)" + ) + let (valid, plaintext) = + either (error . show) id $ + LC.sbDecryptTailTag sbKeyVal cbNonceVal paddedLen (LB.fromStrict tsEncrypted) + valid `shouldBe` True + LB.toStrict plaintext `shouldBe` (fileHdr <> source) + + it "multi-chunk decrypt" $ do + let source = "multi-chunk file content" :: B.ByteString + hdr = FileHeader "multi.bin" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + (chunk1, rest) = B.splitAt 50 encrypted + (chunk2, chunk3) = B.splitAt 50 rest + tsResult <- + callNode $ + impFile + <> "const r = F.decryptChunks(" + <> show encSize + <> "n, [" + <> jsUint8 chunk1 + <> "," + <> jsUint8 chunk2 + <> "," + <> jsUint8 chunk3 + <> "], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "auth tag tampering detection" $ do + let source = "tamper detection file" :: B.ByteString + hdr = FileHeader "secret.dat" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsResult <- + callNode $ + impFile + <> "const enc = " + <> jsUint8 encrypted + <> ";" + <> "enc[enc.length - 1] ^= 1;" + <> "let ok = 0;" + <> "try { F.decryptChunks(" + <> show encSize + <> "n, [enc], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "); ok = 1; } catch(e) { ok = 0; }" + <> jsOut "new Uint8Array([ok])" + tsResult `shouldBe` B.pack [0] + +-- ── protocol/commands ──────────────────────────────────────────── + +tsCommandTests :: Spec +tsCommandTests = describe "protocol/commands" $ do + let sndKey = B.pack [1 .. 8] + rcvKey1 = B.pack [11 .. 18] + rcvKey2 = B.pack [21 .. 28] + digest = B.pack [31 .. 38] + size32 = 12345 :: Word32 + authKey = B.pack [41 .. 48] + dhKey = B.pack [51 .. 58] + + describe "encode" $ do + it "encodeFileInfo" $ do + let expected = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> jsOut "Cmd.encodeFileInfo(fi)" + tsResult `shouldBe` expected + + it "encodeFNEW with auth" $ do + let fileInfo = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + rcvKeys = smpEncodeList [rcvKey1, rcvKey2] + auth = B.singleton 0x31 <> smpEncode authKey + expected = "FNEW " <> fileInfo <> rcvKeys <> auth + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> "const rks = [" + <> jsUint8 rcvKey1 + <> "," + <> jsUint8 rcvKey2 + <> "];" + <> jsOut ("Cmd.encodeFNEW(fi, rks, " <> jsUint8 authKey <> ")") + tsResult `shouldBe` expected + + it "encodeFNEW without auth" $ do + let fileInfo = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + rcvKeys = smpEncodeList [rcvKey1] + expected = "FNEW " <> fileInfo <> rcvKeys <> "0" + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> "const rks = [" + <> jsUint8 rcvKey1 + <> "];" + <> jsOut "Cmd.encodeFNEW(fi, rks, null)" + tsResult `shouldBe` expected + + it "encodeFADD" $ do + let expected = "FADD " <> smpEncodeList [rcvKey1, rcvKey2] + tsResult <- + callNode $ + impCmd + <> jsOut ("Cmd.encodeFADD([" <> jsUint8 rcvKey1 <> "," <> jsUint8 rcvKey2 <> "])") + tsResult `shouldBe` expected + + it "encodeFPUT" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodeFPUT()" + tsResult `shouldBe` "FPUT" + + it "encodeFDEL" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodeFDEL()" + tsResult `shouldBe` "FDEL" + + it "encodeFGET" $ do + let expected = "FGET " <> smpEncode dhKey + tsResult <- + callNode $ + impCmd <> jsOut ("Cmd.encodeFGET(" <> jsUint8 dhKey <> ")") + tsResult `shouldBe` expected + + it "encodePING" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodePING()" + tsResult `shouldBe` "PING" + + describe "decode" $ do + it "decodeResponse OK" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("OK" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FROk' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse PONG" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("PONG" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRPong' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse ERR AUTH" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("ERR AUTH" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRErr' && r.err.type === 'AUTH' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse ERR CMD SYNTAX" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("ERR CMD SYNTAX" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRErr' && r.err.type === 'CMD' && r.err.cmdErr === 'SYNTAX' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse SIDS" $ do + let senderId = B.pack [1 .. 24] + rId1 = B.pack [25 .. 48] + rId2 = B.pack [49 .. 72] + sidsBytes = "SIDS " <> smpEncode senderId <> smpEncodeList [rId1, rId2] + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 sidsBytes + <> ");" + <> "if (r.type !== 'FRSndIds') throw new Error('wrong type');" + <> jsOut "E.concatBytes(r.senderId, ...r.recipientIds)" + tsResult `shouldBe` (senderId <> rId1 <> rId2) + + it "decodeResponse RIDS" $ do + let rId1 = B.pack [1 .. 16] + rId2 = B.pack [17 .. 32] + ridsBytes = "RIDS " <> smpEncodeList [rId1, rId2] + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ridsBytes + <> ");" + <> "if (r.type !== 'FRRcvIds') throw new Error('wrong type');" + <> jsOut "E.concatBytes(...r.recipientIds)" + tsResult `shouldBe` (rId1 <> rId2) + + it "decodeResponse FILE" $ do + let rawPub = B.pack [1 .. 32] + x25519Der = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + derKey = x25519Der <> rawPub + nonce = B.pack [201 .. 224] + fileBytes = "FILE " <> smpEncode derKey <> nonce + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 fileBytes + <> ");" + <> "if (r.type !== 'FRFile') throw new Error('wrong type: ' + r.type);" + <> jsOut "E.concatBytes(r.rcvDhKey, r.nonce)" + tsResult `shouldBe` (rawPub <> nonce) + +-- ── protocol/transmission ────────────────────────────────────────── + +tsTransmissionTests :: Spec +tsTransmissionTests = describe "protocol/transmission" $ do + describe "blockPad / blockUnpad" $ do + it "blockPad matches C.pad" $ do + let msg = "hello pad test" :: B.ByteString + blockSize = 256 :: Int + hsPadded = either (error . show) id $ C.pad msg blockSize + tsPadded <- + callNode $ + impTx <> jsOut ("Tx.blockPad(" <> jsUint8 msg <> ", " <> show blockSize <> ")") + tsPadded `shouldBe` hsPadded + + it "Haskell C.pad -> TS blockUnpad" $ do + let msg = "cross-language unpad" :: B.ByteString + blockSize = 128 :: Int + hsPadded = either (error . show) id $ C.pad msg blockSize + tsResult <- + callNode $ + impTx <> jsOut ("Tx.blockUnpad(" <> jsUint8 hsPadded <> ")") + tsResult `shouldBe` msg + + it "TS blockPad -> Haskell C.unPad" $ do + let msg = "ts-to-haskell pad" :: B.ByteString + blockSize = 128 :: Int + tsPadded <- + callNode $ + impTx <> jsOut ("Tx.blockPad(" <> jsUint8 msg <> ", " <> show blockSize <> ")") + let hsResult = either (error . show) id $ C.unPad tsPadded + hsResult `shouldBe` msg + + describe "transmission encoding" $ do + it "encodeTransmission unsigned (PING)" $ do + let sessionId = B.pack [201 .. 232] + corrId = "abc" :: B.ByteString + entityId = "" :: B.ByteString + cmdBytes = "PING" :: B.ByteString + -- implySessId = False: sessionId on wire + tWire = smpEncode sessionId <> smpEncode corrId <> smpEncode entityId <> cmdBytes + authenticator = smpEncode ("" :: B.ByteString) + encoded = authenticator <> tWire + batch = B.singleton 1 <> smpEncode (Large encoded) + expected = either (error . show) id $ C.pad batch 16384 + tsResult <- + callNode $ + impTx + <> jsOut + ( "Tx.encodeTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 corrId + <> ", " + <> jsUint8 entityId + <> ", " + <> jsUint8 cmdBytes + <> ")" + ) + tsResult `shouldBe` expected + + it "encodeAuthTransmission signed" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + sessionId = B.pack [101 .. 132] + corrId = "xyz" :: B.ByteString + entityId = B.pack [1 .. 24] + cmdBytes = "FPUT" :: B.ByteString + tInner = smpEncode corrId <> smpEncode entityId <> cmdBytes + tForAuth = smpEncode sessionId <> tInner + sig = Ed25519.sign sk pk tForAuth + rawSig = BA.convert sig :: B.ByteString + authenticator = smpEncode rawSig + -- implySessId = False: tToSend = tForAuth (sessionId on wire) + encoded = authenticator <> tForAuth + batch = B.singleton 1 <> smpEncode (Large encoded) + expected = either (error . show) id $ C.pad batch 16384 + tsResult <- + callNode $ + impTx + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut + ( "Tx.encodeAuthTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 corrId + <> ", " + <> jsUint8 entityId + <> ", " + <> jsUint8 cmdBytes + <> ", kp.privateKey)" + ) + tsResult `shouldBe` expected + + it "decodeTransmission" $ do + let sessionId = B.pack [201 .. 232] + corrId = "r01" :: B.ByteString + entityId = B.pack [1 .. 16] + cmdBytes = "OK" :: B.ByteString + -- implySessId = False: sessionId on wire + tWire = smpEncode sessionId <> smpEncode corrId <> smpEncode entityId <> cmdBytes + authenticator = smpEncode ("" :: B.ByteString) + encoded = authenticator <> tWire + batch = B.singleton 1 <> smpEncode (Large encoded) + block = either (error . show) id $ C.pad batch 256 + tsResult <- + callNode $ + impTx + <> "const t = Tx.decodeTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 block + <> ");" + <> jsOut "E.concatBytes(t.corrId, t.entityId, t.command)" + tsResult `shouldBe` (corrId <> entityId <> cmdBytes) + +-- ── protocol/handshake ──────────────────────────────────────────── + +tsHandshakeTests :: Spec +tsHandshakeTests = describe "protocol/handshake" $ do + describe "version range" $ do + it "encodeVersionRange" $ do + let expected = smpEncode (1 :: Word16) <> smpEncode (3 :: Word16) + tsResult <- + callNode $ + impHs + <> jsOut "Hs.encodeVersionRange({minVersion: 1, maxVersion: 3})" + tsResult `shouldBe` expected + + it "decodeVersionRange" $ do + let vrBytes = smpEncode (2 :: Word16) <> smpEncode (5 :: Word16) + tsResult <- + callNode $ + impHs + <> "const d = new E.Decoder(" + <> jsUint8 vrBytes + <> ");" + <> "const vr = Hs.decodeVersionRange(d);" + <> jsOut "E.concatBytes(E.encodeWord16(vr.minVersion), E.encodeWord16(vr.maxVersion))" + tsResult `shouldBe` vrBytes + + it "compatibleVRange (compatible)" $ do + -- intersection of [1,3] and [2,5] = [2,3] + let expected = smpEncode (2 :: Word16) <> smpEncode (3 :: Word16) + tsResult <- + callNode $ + impHs + <> "const r = Hs.compatibleVRange({minVersion:1,maxVersion:3},{minVersion:2,maxVersion:5});" + <> "if (!r) throw new Error('expected compatible');" + <> jsOut "Hs.encodeVersionRange(r)" + tsResult `shouldBe` expected + + it "compatibleVRange (incompatible)" $ do + tsResult <- + callNode $ + impHs + <> "const r = Hs.compatibleVRange({minVersion:1,maxVersion:2},{minVersion:3,maxVersion:5});" + <> jsOut "new Uint8Array([r === null ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + describe "client handshake" $ do + it "encodeClientHandshake" $ do + let kh = B.pack [1 .. 32] + body = smpEncode (3 :: Word16) <> smpEncode kh + expected = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> jsOut ("Hs.encodeClientHandshake({xftpVersion:3,keyHash:" <> jsUint8 kh <> "})") + tsResult `shouldBe` expected + + describe "client hello" $ do + it "encodeClientHello (Nothing)" $ do + let expected = smpEncode (XFTPClientHello {webChallenge = Nothing}) + tsResult <- + callNode $ + impHs + <> jsOut "Hs.encodeClientHello({webChallenge: null})" + tsResult `shouldBe` expected + + it "encodeClientHello (Just challenge)" $ do + let challenge = B.pack [1 .. 32] + expected = either (error . show) id $ C.pad (smpEncode (XFTPClientHello {webChallenge = Just challenge})) xftpBlockSize + tsResult <- + callNode $ + impHs + <> jsOut ("Hs.encodeClientHello({webChallenge:" <> jsUint8 challenge <> "})") + tsResult `shouldBe` expected + + describe "server handshake" $ do + it "decodeServerHandshake" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] -- 100 bytes + cert2 = B.pack [201 .. 232] -- 32 bytes + signedKeyBytes = B.pack [1 .. 120] + -- Encode server handshake body matching Haskell wire format: + -- smpEncode (versionRange, sessionId, certChainPubKey) + -- where certChainPubKey = (NonEmpty Large certChain, Large signedKey) + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut + ( "E.concatBytes(" + <> "E.encodeWord16(hs.xftpVersionRange.minVersion)," + <> "E.encodeWord16(hs.xftpVersionRange.maxVersion)," + <> "hs.sessionId," + <> "...hs.certChainDer," + <> "hs.signedKeyDer)" + ) + -- Expected: vmin(2) + vmax(2) + sessId(32) + cert1(100) + cert2(32) + signedKey(120) = 288 bytes + tsResult + `shouldBe` ( smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> sessId + <> cert1 + <> cert2 + <> signedKeyBytes + ) + + it "decodeServerHandshake with webIdentityProof" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + signedKeyBytes = B.pack [1 .. 120] + sigBytes = B.pack [1 .. 64] + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + <> smpEncode sigBytes + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut "hs.webIdentityProof || new Uint8Array(0)" + tsResult `shouldBe` sigBytes + + it "decodeServerHandshake without webIdentityProof" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + signedKeyBytes = B.pack [1 .. 120] + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + <> smpEncode ("" :: B.ByteString) + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut "new Uint8Array([hs.webIdentityProof === null ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + describe "certificate utilities" $ do + it "caFingerprint" $ do + let cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + expected = C.sha256Hash cert2 + tsResult <- + callNode $ + impHs + <> "const chain = [" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "];" + <> jsOut "Hs.caFingerprint(chain)" + tsResult `shouldBe` expected + + it "caFingerprint 3 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + expected = C.sha256Hash cert2 + tsResult <- + callNode $ + impHs + <> "const chain = [" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "];" + <> jsOut "Hs.caFingerprint(chain)" + tsResult `shouldBe` expected + + it "chainIdCaCerts 2 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert2) + + it "chainIdCaCerts 3 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert3) + + it "chainIdCaCerts 4 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + cert4 = B.pack [31 .. 40] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "," + <> jsUint8 cert4 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert4) + + describe "SignedExact parsing" $ do + it "extractSignedKey" $ do + -- Generate signing key (Ed25519) + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + -- Generate DH key (X25519) + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + -- SubjectPublicKeyInfo DER for X25519 (44 bytes) + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + -- Sign the SPKI with Ed25519 + sig = Ed25519.sign signSk signPk spkiDer + sigRaw = BA.convert sig :: B.ByteString + -- AlgorithmIdentifier for Ed25519 (7 bytes) + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + -- BIT STRING wrapper (3 + 64 = 67 bytes) + bitString = B.pack [0x03, 0x41, 0x00] <> sigRaw + -- Outer SEQUENCE: content = 44 + 7 + 67 = 118 = 0x76 + content = spkiDer <> algId <> bitString + signedExactDer = B.pack [0x30, 0x76] <> content + tsResult <- + callNode $ + impHs + <> "const sk = Hs.extractSignedKey(" + <> jsUint8 signedExactDer + <> ");" + <> jsOut "E.concatBytes(sk.dhKey, sk.signature)" + -- dhKey (32) + signature (64) = 96 bytes + tsResult `shouldBe` (dhPkRaw <> sigRaw) + + it "extractSignedKey signature verifies" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + sig = Ed25519.sign signSk signPk spkiDer + sigRaw = BA.convert sig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> sigRaw + content = spkiDer <> algId <> bitString + signedExactDer = B.pack [0x30, 0x76] <> content + tsResult <- + callNode $ + impHs + <> "const sk = Hs.extractSignedKey(" + <> jsUint8 signedExactDer + <> ");" + <> "const ok = K.verify(" + <> jsUint8 signPkRaw + <> ", sk.signature, sk.objectDer);" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + +-- ── crypto/identity ────────────────────────────────────────────── + +-- Construct a minimal X.509 certificate DER with an Ed25519 public key. +-- Structurally valid for DER navigation but not a real certificate. +mkFakeCertDer :: B.ByteString -> B.ByteString +mkFakeCertDer pubKey32 = + let spki = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] <> pubKey32 + tbsContents = + B.concat + [ B.pack [0xa0, 0x03, 0x02, 0x01, 0x02], + B.pack [0x02, 0x01, 0x01], + B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70], + B.pack [0x30, 0x00], + B.pack [0x30, 0x00], + B.pack [0x30, 0x00], + spki + ] + tbs = B.pack [0x30, fromIntegral $ B.length tbsContents] <> tbsContents + certContents = + B.concat + [ tbs, + B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70], + B.pack [0x03, 0x41, 0x00] <> B.replicate 64 0 + ] + certLen = B.length certContents + in B.pack [0x30, 0x81, fromIntegral certLen] <> certContents + +tsIdentityTests :: Spec +tsIdentityTests = describe "crypto/identity" $ do + describe "extractCertPublicKeyInfo" $ do + it "extracts SPKI from X.509 DER" $ do + let pubKey = B.pack [1 .. 32] + certDer = mkFakeCertDer pubKey + expectedSpki = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] <> pubKey + tsResult <- + callNode $ + impId + <> jsOut ("Id.extractCertPublicKeyInfo(" <> jsUint8 certDer <> ")") + tsResult `shouldBe` expectedSpki + + it "extractCertPublicKeyInfo + decodePubKey returns raw 32-byte key" $ do + let pubKey = B.pack [1 .. 32] + certDer = mkFakeCertDer pubKey + tsResult <- + callNode $ + impId + <> jsOut ("K.decodePubKeyEd25519(Id.extractCertPublicKeyInfo(" <> jsUint8 certDer <> "))") + tsResult `shouldBe` pubKey + + describe "verifyIdentityProof" $ do + it "valid proof returns true" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + -- Challenge signature + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "wrong keyHash returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + wrongKeyHash = B.replicate 32 0xff + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 wrongKeyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "wrong challenge sig returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + wrongChallenge = B.pack [1 .. 32] + wrongSig = Ed25519.sign signSk signPk (wrongChallenge <> sessionId) + wrongSigRaw = BA.convert wrongSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 wrongSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "wrong DH key sig returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key signed by a DIFFERENT key + otherSeed = B.pack [51 .. 82] + otherSk = throwCryptoError $ Ed25519.secretKey otherSeed + otherPk = Ed25519.toPublic otherSk + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign otherSk otherPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + +-- ── protocol/description ────────────────────────────────────────── + +tsDescriptionTests :: Spec +tsDescriptionTests = describe "protocol/description" $ do + describe "base64url" $ do + it "encode matches Haskell strEncode" $ do + let bs = B.pack [0 .. 31] + tsResult <- + callNode $ + impDesc + <> jsOut ("new TextEncoder().encode(Desc.base64urlEncode(" <> jsUint8 bs <> "))") + tsResult `shouldBe` strEncode bs + + it "decode recovers original" $ do + let bs = B.pack [0 .. 31] + encoded = strEncode bs + tsResult <- + callNode $ + impDesc + <> "const s = new TextDecoder().decode(" + <> jsUint8 encoded + <> ");" + <> jsOut "Desc.base64urlDecode(s)" + tsResult `shouldBe` bs + + it "round-trip 256 bytes" $ do + let bs = B.pack [0 .. 255] + tsResult <- + callNode $ + impDesc + <> "const data = " + <> jsUint8 bs + <> ";" + <> "const encoded = Desc.base64urlEncode(data);" + <> jsOut "Desc.base64urlDecode(encoded)" + tsResult `shouldBe` bs + + describe "FileSize" $ do + it "encodeFileSize" $ do + let sizes = [500, 1024, 2048, 1048576, 8388608, 1073741824, 27262976 :: Int64] + expected = B.intercalate "," $ map (strEncode . FileSize) sizes + tsResult <- + callNode $ + impDesc + <> "const sizes = [500, 1024, 2048, 1048576, 8388608, 1073741824, 27262976];" + <> jsOut "new TextEncoder().encode(sizes.map(Desc.encodeFileSize).join(','))" + tsResult `shouldBe` expected + + it "decodeFileSize" $ do + tsResult <- + callNode $ + impDesc + <> "const strs = ['500','1kb','2kb','1mb','8mb','1gb'];" + <> jsOut "new TextEncoder().encode(strs.map(s => String(Desc.decodeFileSize(s))).join(','))" + tsResult `shouldBe` "500,1024,2048,1048576,8388608,1073741824" + + describe "FileDescription" $ do + it "fixture YAML round-trip" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const reEncoded = Desc.encodeFileDescription(fd);" + <> jsOut "new TextEncoder().encode(reEncoded)" + tsResult `shouldBe` fixture + + it "fixture parsed structure" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const r = [" + <> "fd.party," + <> "String(fd.size)," + <> "String(fd.chunkSize)," + <> "String(fd.chunks.length)," + <> "String(fd.chunks[0].replicas.length)," + <> "String(fd.chunks[3].chunkSize)," + <> "fd.redirect === null ? 'null' : 'redirect'" + <> "].join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "recipient,27262976,8388608,4,2,2097152,null" + + it "encode with redirect round-trips" $ do + tsResult <- + callNode $ + impDesc + <> "const fd = {" + <> " party: 'sender'," + <> " size: 1024," + <> " digest: new Uint8Array([1,2,3])," + <> " key: new Uint8Array(32)," + <> " nonce: new Uint8Array(24)," + <> " chunkSize: 1024," + <> " chunks: [{chunkNo: 1, chunkSize: 1024, digest: new Uint8Array([4,5,6])," + <> " replicas: [{server: 'xftp://abc=@example.com', replicaId: new Uint8Array([7,8,9])," + <> " replicaKey: new Uint8Array([10,11,12])}]}]," + <> " redirect: {size: 512, digest: new Uint8Array([13,14,15])}" + <> "};" + <> "const yaml = Desc.encodeFileDescription(fd);" + <> "const fd2 = Desc.decodeFileDescription(yaml);" + <> "const r = [" + <> "fd2.party," + <> "String(fd2.redirect !== null)," + <> "String(fd2.redirect?.size)," + <> "Desc.base64urlEncode(fd2.redirect?.digest || new Uint8Array())" + <> "].join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "sender,true,512,DQ4P" + + it "fdSeparator" $ do + tsResult <- + callNode $ + impDesc + <> jsOut "new TextEncoder().encode(Desc.fdSeparator)" + tsResult `shouldBe` "################################\n" + + it "web URI encoding matches Haskell" $ do + yaml <- B.readFile "tests/fixtures/file_description.yaml" + fd <- either fail pure (strDecode yaml :: Either String (FileDescription 'FRecipient)) + let hsEncoded = encodeWebURI fd + -- Haskell round-trip + ValidFileDescription fd' <- either (fail . ("decode: " <>)) pure $ decodeWebURI hsEncoded + fd `shouldBe` fd' + -- Cross-language: TS reads same fixture, encodes, should match + tsEncoded <- + callNode $ + "import {readFileSync} from 'node:fs';\ + \import * as Agent from './dist/agent.js';\ + \import * as Desc from './dist/protocol/description.js';\ + \const yaml = readFileSync('../tests/fixtures/file_description.yaml', 'utf8');\ + \const fd = Desc.decodeFileDescription(yaml);\ + \const uri = Agent.encodeDescriptionURI(fd);\ + \process.stdout.write(Buffer.from(uri));" + hsEncoded `shouldBe` tsEncoded + + describe "validation" $ do + it "valid description" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r === null ? 'ok' : r)" + tsResult `shouldBe` "ok" + + it "non-sequential chunks" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "fd.chunks[1].chunkNo = 5;" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r || 'ok')" + tsResult `shouldBe` "chunk numbers are not sequential" + + it "mismatched size" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "fd.size = 999;" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r || 'ok')" + tsResult `shouldBe` "chunks total size is different than file size" + +-- ── protocol/chunks ─────────────────────────────────────────────── + +tsChunkTests :: Spec +tsChunkTests = describe "protocol/chunks" $ do + describe "prepareChunkSizes" $ do + it "matches Haskell for various sizes" $ do + let sizes = [100, 65536, 130000, 200000, 500000, 800000, 5000000, 27262976 :: Int64] + hsResults = map prepareChunkSizes sizes + expected = B.intercalate "|" $ map (\cs -> B.intercalate "," $ map (strEncode . FileSize) cs) hsResults + tsResult <- + callNode $ + impChk + <> "const sizes = [100, 65536, 130000, 200000, 500000, 800000, 5000000, 27262976];" + <> "const results = sizes.map(s => Chk.prepareChunkSizes(s).map(Desc.encodeFileSize).join(','));" + <> jsOut "new TextEncoder().encode(results.join('|'))" + tsResult `shouldBe` expected + + it "zero size" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.prepareChunkSizes(0).join(','))" + tsResult `shouldBe` "" + + describe "singleChunkSize" $ do + it "finds smallest fitting chunk size" $ do + tsResult <- + callNode $ + impChk + <> "const sizes = [100, 65536, 262144, 300000, 1048576, 4194304, 5000000];" + <> "const results = sizes.map(s => {" + <> " const r = Chk.singleChunkSize(s);" + <> " return r === null ? 'null' : Desc.encodeFileSize(r);" + <> "});" + <> jsOut "new TextEncoder().encode(results.join(','))" + tsResult `shouldBe` "64kb,64kb,256kb,1mb,1mb,4mb,null" + + describe "prepareChunkSpecs" $ do + it "generates correct offsets" $ do + tsResult <- + callNode $ + impChk + <> "const specs = Chk.prepareChunkSpecs([4194304, 4194304, 1048576]);" + <> "const r = specs.map(s => s.chunkOffset + ':' + s.chunkSize).join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "0:4194304,4194304:4194304,8388608:1048576" + + describe "getChunkDigest" $ do + it "matches Haskell sha256Hash" $ do + let chunk = B.pack [0 .. 63] + expected = C.sha256Hash chunk + tsResult <- + callNode $ + impChk + <> jsOut ("Chk.getChunkDigest(" <> jsUint8 chunk <> ")") + tsResult `shouldBe` expected + + describe "constants" $ do + it "serverChunkSizes" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.serverChunkSizes.map(Desc.encodeFileSize).join(','))" + tsResult `shouldBe` "64kb,256kb,1mb,4mb" + + it "fileSizeLen and authTagSize" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.fileSizeLen + ',' + Chk.authTagSize)" + tsResult `shouldBe` "8,16" + +-- ── protocol/client ───────────────────────────────────────────── + +tsClientTests :: Spec +tsClientTests = describe "protocol/client" $ do + -- Fixed X25519 key pairs for deterministic tests + let privARaw = B.pack [1 .. 32] + privA = throwCryptoError $ X25519.secretKey privARaw + pubA = X25519.toPublic privA + pubARaw = BA.convert pubA :: B.ByteString + privBRaw = B.pack [33 .. 64] + privB = throwCryptoError $ X25519.secretKey privBRaw + pubB = X25519.toPublic privB + pubBRaw = BA.convert pubB :: B.ByteString + nonce24 = B.pack [0 .. 23] + + describe "cbAuthenticate" $ do + it "matches Haskell output" $ do + let msg = "hello world authenticator test" + C.CbAuthenticator expected = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const auth = Cli.cbAuthenticate(" + <> jsUint8 pubARaw + <> "," + <> jsUint8 privBRaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "auth" + tsResult `shouldBe` expected + + it "is 80 bytes" $ do + let msg = "size test" + C.CbAuthenticator expected = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + B.length expected `shouldBe` 80 + + describe "cbVerify" $ do + it "validates Haskell authenticator" $ do + let msg = "test message for verify" + C.CbAuthenticator authBytes_ = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const valid = Cli.cbVerify(" + <> jsUint8 pubBRaw + <> "," + <> jsUint8 privARaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 authBytes_ + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "new Uint8Array([valid ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "rejects wrong message" $ do + let msg = "correct message" + wrongMsg = "wrong message" + C.CbAuthenticator authBytes_ = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const valid = Cli.cbVerify(" + <> jsUint8 pubBRaw + <> "," + <> jsUint8 privARaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 authBytes_ + <> "," + <> jsUint8 wrongMsg + <> ");" + <> jsOut "new Uint8Array([valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "round-trip: TS authenticate, Haskell verify" $ do + let msg = "round trip test" + tsAuth <- + callNode $ + impCli + <> "const auth = Cli.cbAuthenticate(" + <> jsUint8 pubARaw + <> "," + <> jsUint8 privBRaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "auth" + let hsValid = + C.cbVerify + (C.PublicKeyX25519 pubB) + (C.PrivateKeyX25519 privA) + (C.cbNonce nonce24) + (C.CbAuthenticator tsAuth) + msg + hsValid `shouldBe` True + + describe "transport chunk encryption" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + + it "encryptTransportChunk matches Haskell" $ do + let plaintext = B.pack [100 .. 199] + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 plaintext + tag = BA.convert $ LC.sbAuth state1 :: B.ByteString + expected = cipher <> tag + tsResult <- + callNode $ + impCli + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 plaintext + <> ");" + <> jsOut "enc" + tsResult `shouldBe` expected + + it "decryptTransportChunk decrypts Haskell-encrypted data" $ do + let plaintext = B.pack ([200 .. 255] <> [0 .. 99]) + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 plaintext + tag = BA.convert $ LC.sbAuth state1 :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impCli + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> ");" + <> "if (!r.valid) throw new Error('invalid');" + <> jsOut "r.content" + tsResult `shouldBe` plaintext + + it "round-trip encrypt then decrypt" $ do + let plaintext = B.pack [42, 42, 42, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + tsResult <- + callNode $ + impCli + <> "const plain = " + <> jsUint8 plaintext + <> ";" + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",plain);" + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",enc);" + <> "if (!r.valid) throw new Error('invalid');" + <> jsOut "r.content" + tsResult `shouldBe` plaintext + + it "rejects tampered ciphertext" $ do + let plaintext = B.pack [10 .. 40] + tsResult <- + callNode $ + impCli + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 plaintext + <> ");" + <> "enc[0] ^= 0xff;" + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",enc);" + <> jsOut "new Uint8Array([r.valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + describe "constants" $ do + it "cbAuthenticatorSize" $ do + tsResult <- + callNode $ + impCli <> jsOut "new TextEncoder().encode(String(Cli.cbAuthenticatorSize))" + tsResult `shouldBe` "80" + +-- ── download (integration) ────────────────────────────────────────── + +tsDownloadTests :: Spec +tsDownloadTests = describe "download" $ do + -- Fixed X25519 key pairs (same as client tests) + let privARaw = B.pack [1 .. 32] + privA = throwCryptoError $ X25519.secretKey privARaw + pubA = X25519.toPublic privA + pubARaw = BA.convert pubA :: B.ByteString + privBRaw = B.pack [33 .. 64] + privB = throwCryptoError $ X25519.secretKey privBRaw + pubB = X25519.toPublic privB + pubBRaw = BA.convert pubB :: B.ByteString + nonce24 = B.pack [0 .. 23] + -- File-level key/nonce (different from transport) + fileKey32 = B.pack [1 .. 32] + fileNonce24 = B.pack [1 .. 24] + fileCbNonce = C.cbNonce fileNonce24 + fileSbKey = C.unsafeSbKey fileKey32 + + describe "processFileResponse" $ do + it "derives DH secret matching Haskell" $ do + -- Simulate: client has privA, server sends pubB + let hsDhSecret = C.dh' (C.PublicKeyX25519 pubB) (C.PrivateKeyX25519 privA) + hsDhBytes = case hsDhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + tsDhSecret <- + callNode $ + impDl + <> "const dh = Dl.processFileResponse(" + <> jsUint8 privARaw + <> "," + <> jsUint8 pubBRaw + <> ");" + <> jsOut "dh" + tsDhSecret `shouldBe` hsDhBytes + + describe "decryptReceivedChunk" $ do + it "transport decrypt with digest verification" $ do + -- Haskell: transport-encrypt a chunk + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [50 .. 149] + chunkDigest = C.sha256Hash chunkData + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "const r = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> "," + <> jsUint8 chunkDigest + <> ");" + <> jsOut "r" + tsResult `shouldBe` chunkData + + it "rejects wrong digest" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [50 .. 149] + wrongDigest = B.replicate 32 0xff + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "let ok = false; try { Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> "," + <> jsUint8 wrongDigest + <> "); } catch(e) { ok = e.message.includes('digest'); }" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "allows null digest (skip verification)" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [10 .. 50] + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "const r = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> ",null);" + <> jsOut "r" + tsResult `shouldBe` chunkData + + describe "full pipeline" $ do + it "Haskell file-encrypt + transport-encrypt -> TS transport-decrypt + file-decrypt" $ do + -- Step 1: file-level encryption (matches Haskell encryptFile) + let source = "Integration test: full download pipeline!" :: B.ByteString + hdr = FileHeader "pipeline.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- Step 2: transport-level encryption (simulates server sending chunk) + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + ts0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (transportCipher, ts1) = LC.sbEncryptChunk ts0 fileEncrypted + transportTag = BA.convert (LC.sbAuth ts1) :: B.ByteString + transportEncData = transportCipher <> transportTag + -- Step 3: TS decrypts transport, then file-level + tsResult <- + callNode $ + impDl + <> "const chunk = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 transportEncData + <> ",null);" + <> "const r = F.decryptChunks(" + <> show encSize + <> "n,[chunk]," + <> jsUint8 fileKey32 + <> "," + <> jsUint8 fileNonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "multi-chunk file: Haskell encrypt -> TS decrypt" $ do + -- File content that spans two chunks when file-encrypted + let source = B.pack (take 200 $ cycle [0 .. 255]) + hdr = FileHeader "multi.bin" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 512 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- Split file-encrypted data into two "chunks" and transport-encrypt each + let splitPt = B.length fileEncrypted `div` 2 + fileChunk1 = B.take splitPt fileEncrypted + fileChunk2 = B.drop splitPt fileEncrypted + -- Transport encrypt chunk 1 (with separate DH / nonce per chunk) + dhSecret1 = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecret1Bytes = case dhSecret1 of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + nonce1 = nonce24 + t1s0 = either (error . show) id $ LC.cbInit dhSecret1 (C.cbNonce nonce1) + (t1cipher, t1s1) = LC.sbEncryptChunk t1s0 fileChunk1 + t1tag = BA.convert (LC.sbAuth t1s1) :: B.ByteString + transportEnc1 = t1cipher <> t1tag + -- Transport encrypt chunk 2 (different nonce) + nonce2 = B.pack [24 .. 47] + dhSecret2 = C.dh' (C.PublicKeyX25519 pubB) (C.PrivateKeyX25519 privA) + dhSecret2Bytes = case dhSecret2 of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + t2s0 = either (error . show) id $ LC.cbInit dhSecret2 (C.cbNonce nonce2) + (t2cipher, t2s1) = LC.sbEncryptChunk t2s0 fileChunk2 + t2tag = BA.convert (LC.sbAuth t2s1) :: B.ByteString + transportEnc2 = t2cipher <> t2tag + -- TS: transport-decrypt each chunk, then file-level decrypt the concatenation + tsResult <- + callNode $ + impDl + <> "const c1 = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecret1Bytes + <> "," + <> jsUint8 nonce1 + <> "," + <> jsUint8 transportEnc1 + <> ",null);" + <> "const c2 = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecret2Bytes + <> "," + <> jsUint8 nonce2 + <> "," + <> jsUint8 transportEnc2 + <> ",null);" + <> "const r = F.decryptChunks(" + <> show encSize + <> "n,[c1,c2]," + <> jsUint8 fileKey32 + <> "," + <> jsUint8 fileNonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + describe "FGET + FRFile round-trip" $ do + it "encode FGET -> decode FRFile -> process -> transport decrypt" $ do + -- Client side: generate FGET command + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + chunkData = "FGET round-trip test data" :: B.ByteString + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + -- Simulate server response: FILE + -- Server sends pubA (client has privB to do DH) + serverPubDer = C.encodePubKey (C.PublicKeyX25519 pubA) + fileResponseBytes = "FILE " <> smpEncode serverPubDer <> nonce24 + -- TS: parse FRFile response, derive DH secret, decrypt transport chunk + tsResult <- + callNode $ + impDl + <> "const resp = Cmd.decodeResponse(" + <> jsUint8 fileResponseBytes + <> ");" + <> "if (resp.type !== 'FRFile') throw new Error('expected FRFile');" + <> "const dhSecret = Dl.processFileResponse(" + <> jsUint8 privBRaw + <> ",resp.rcvDhKey);" + <> "const r = Dl.decryptReceivedChunk(dhSecret," + <> "resp.nonce," + <> jsUint8 encData + <> ",null);" + <> jsOut "r" + tsResult `shouldBe` chunkData + + describe "processDownloadedFile" $ do + it "decrypts file from transport-decrypted chunks" $ do + let source = "processDownloadedFile test" :: B.ByteString + hdr = FileHeader "download.txt" (Just "v1") + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- TS: call processDownloadedFile with a minimal FileDescription-like object + tsResult <- + callNode $ + impDl + <> "const fd = {size: " + <> show encSize + <> "," + <> "key: " + <> jsUint8 fileKey32 + <> "," + <> "nonce: " + <> jsUint8 fileNonce24 + <> "};" + <> "const r = Dl.processDownloadedFile(fd, [" + <> jsUint8 fileEncrypted + <> "]);" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + +-- ── protocol/address ────────────────────────────────────────────── + +tsAddressTests :: Spec +tsAddressTests = describe "protocol/address" $ do + it "parseXFTPServer with port" $ do + let addr = "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" :: String + expectedKH :: B.ByteString + expectedKH = either error id $ strDecode "LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=" + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('" + <> addr + <> "');" + <> jsOut "new Uint8Array([...s.keyHash, ...new TextEncoder().encode(s.host + ':' + s.port)])" + let (kh, hostPort) = B.splitAt 32 result + kh `shouldBe` expectedKH + hostPort `shouldBe` "localhost:8000" + + it "parseXFTPServer default port" $ do + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@example.com');" + <> jsOut "new TextEncoder().encode(s.host + ':' + s.port)" + result `shouldBe` "example.com:443" + + it "parseXFTPServer multi-host takes first" $ do + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@host1.com:5000,host2.com');" + <> jsOut "new TextEncoder().encode(s.host + ':' + s.port)" + result `shouldBe` "host1.com:5000" + +-- ── integration ─────────────────────────────────────────────────── + +tsIntegrationTests :: Spec +tsIntegrationTests = describe "integration" $ do + it "web handshake with Ed25519 identity verification" $ + webHandshakeTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "web handshake with Ed448 identity verification" $ + webHandshakeTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "connectXFTP + pingXFTP" $ + pingTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "full round-trip: create, upload, download, ack, addRecipients, delete" $ + fullRoundTripTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent URI round-trip" agentURIRoundTripTest + it "agent upload + download round-trip" $ + agentUploadDownloadTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent delete + verify gone" $ + agentDeleteTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent redirect: upload with redirect, download" $ + agentRedirectTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "cross-language: TS upload, Haskell download" $ + tsUploadHaskellDownloadTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "cross-language: TS upload with redirect, Haskell download" $ + tsUploadRedirectHaskellDownloadTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "cross-language: Haskell upload, TS download" $ + haskellUploadTsDownloadTest testXFTPServerConfigSNI + +webHandshakeTest :: XFTPServerConfig -> FilePath -> Expectation +webHandshakeTest cfg caFile = do + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import http2 from 'node:http2';\ + \import crypto from 'node:crypto';\ + \import sodium from 'libsodium-wrappers-sumo';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Hs from './dist/protocol/handshake.js';\ + \import * as Id from './dist/crypto/identity.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const readBody = s => new Promise((ok, err) => {\ + \const c = [];\ + \s.on('data', d => c.push(d));\ + \s.on('end', () => ok(Buffer.concat(c)));\ + \s.on('error', err);\ + \});\ + \const client = http2.connect('https://' + server.host + ':' + server.port, {rejectUnauthorized: false});\ + \const challenge = new Uint8Array(crypto.randomBytes(32));\ + \const s1 = client.request({':method': 'POST', ':path': '/', 'xftp-web-hello': '1'});\ + \s1.end(Buffer.from(Hs.encodeClientHello({webChallenge: challenge})));\ + \const hs = Hs.decodeServerHandshake(new Uint8Array(await readBody(s1)));\ + \const idOk = hs.webIdentityProof\ + \ ? Id.verifyIdentityProof({certChainDer: hs.certChainDer, signedKeyDer: hs.signedKeyDer,\ + \sigBytes: hs.webIdentityProof, challenge, sessionId: hs.sessionId, keyHash: server.keyHash})\ + \ : false;\ + \const ver = hs.xftpVersionRange.maxVersion;\ + \const s2 = client.request({':method': 'POST', ':path': '/', 'xftp-handshake': '1'});\ + \s2.end(Buffer.from(Hs.encodeClientHandshake({xftpVersion: ver, keyHash: server.keyHash})));\ + \const ack = await readBody(s2);\ + \client.close();" + <> jsOut "new Uint8Array([idOk ? 1 : 0, ack.length === 0 ? 1 : 0])" + result `shouldBe` B.pack [1, 1] + +pingTest :: XFTPServerConfig -> FilePath -> Expectation +pingTest cfg caFile = do + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import * as Addr from './dist/protocol/address.js';\ + \import {newXFTPAgent, closeXFTPAgent} from './dist/client.js';\ + \import {pingXFTP} from './dist/client.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = newXFTPAgent();\ + \await pingXFTP(agent, server);\ + \closeXFTPAgent(agent);" + <> jsOut "new Uint8Array([1])" + result `shouldBe` B.pack [1] + +fullRoundTripTest :: XFTPServerConfig -> FilePath -> Expectation +fullRoundTripTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as K from './dist/crypto/keys.js';\ + \import {sha256} from './dist/crypto/digest.js';\ + \import {newXFTPAgent, closeXFTPAgent, createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk,\ + \ addXFTPRecipients, deleteXFTPChunk} from './dist/client.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = newXFTPAgent();\ + \const sndKp = K.generateEd25519KeyPair();\ + \const rcvKp1 = K.generateEd25519KeyPair();\ + \const rcvKp2 = K.generateEd25519KeyPair();\ + \const chunkData = new Uint8Array(crypto.randomBytes(65536));\ + \const digest = sha256(chunkData);\ + \const file = {\ + \ sndKey: K.encodePubKeyEd25519(sndKp.publicKey),\ + \ size: chunkData.length,\ + \ digest\ + \};\ + \const rcvKeys = [K.encodePubKeyEd25519(rcvKp1.publicKey)];\ + \const {senderId, recipientIds} = await createXFTPChunk(agent, server, sndKp.privateKey, file, rcvKeys, null);\ + \await uploadXFTPChunk(agent, server, sndKp.privateKey, senderId, chunkData);\ + \const dl1 = await downloadXFTPChunk(agent, server, rcvKp1.privateKey, recipientIds[0], digest);\ + \const match1 = dl1.length === chunkData.length && dl1.every((b, i) => b === chunkData[i]);\ + \const newIds = await addXFTPRecipients(agent, server, sndKp.privateKey, senderId,\ + \ [K.encodePubKeyEd25519(rcvKp2.publicKey)]);\ + \const dl2 = await downloadXFTPChunk(agent, server, rcvKp2.privateKey, newIds[0], digest);\ + \const match2 = dl2.length === chunkData.length && dl2.every((b, i) => b === chunkData[i]);\ + \await deleteXFTPChunk(agent, server, sndKp.privateKey, senderId);\ + \closeXFTPAgent(agent);" + <> jsOut "new Uint8Array([match1 ? 1 : 0, match2 ? 1 : 0])" + result `shouldBe` B.pack [1, 1] + +agentURIRoundTripTest :: Expectation +agentURIRoundTripTest = do + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import * as Agent from './dist/agent.js';\ + \import * as Desc from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const fd = {\ + \ party: 'recipient',\ + \ size: 65536,\ + \ digest: new Uint8Array(64).fill(0xab),\ + \ key: new Uint8Array(32).fill(0x01),\ + \ nonce: new Uint8Array(24).fill(0x02),\ + \ chunkSize: 65536,\ + \ chunks: [{\ + \ chunkNo: 1,\ + \ chunkSize: 65536,\ + \ digest: new Uint8Array(32).fill(0xcd),\ + \ replicas: [{\ + \ server: 'xftp://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=@example.com:443',\ + \ replicaId: new Uint8Array([1,2,3]),\ + \ replicaKey: new Uint8Array([48,46,2,1,0,48,5,6,3,43,101,112,4,34,4,32,\ + \ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])\ + \ }]\ + \ }],\ + \ redirect: null\ + \};\ + \const uri = Agent.encodeDescriptionURI(fd);\ + \const fd2 = Agent.decodeDescriptionURI(uri);\ + \const yaml1 = Desc.encodeFileDescription(fd);\ + \const yaml2 = Desc.encodeFileDescription(fd2);\ + \const match = yaml1 === yaml2 ? 1 : 0;" + <> jsOut "new Uint8Array([match])" + result `shouldBe` B.pack [1] + +agentUploadDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +agentUploadDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = Agent.newXFTPAgent();\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'test-file.bin');\ + \const {rcvDescription, sndDescription, uri} = await Agent.uploadFile(agent, [server], encrypted);\ + \const fd = Agent.decodeDescriptionURI(uri);\ + \const {header, content} = await Agent.downloadFile(agent, fd);\ + \Agent.closeXFTPAgent(agent);\ + \const nameMatch = header.fileName === 'test-file.bin' ? 1 : 0;\ + \const sizeMatch = content.length === originalData.length ? 1 : 0;\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== originalData[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1] + +agentDeleteTest :: XFTPServerConfig -> FilePath -> Expectation +agentDeleteTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = Agent.newXFTPAgent();\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'del-test.bin');\ + \const {rcvDescription, sndDescription} = await Agent.uploadFile(agent, [server], encrypted);\ + \await Agent.deleteFile(agent, sndDescription);\ + \let deleted = 0;\ + \try {\ + \ await Agent.downloadFile(agent, rcvDescription);\ + \} catch (e) {\ + \ deleted = 1;\ + \}\ + \Agent.closeXFTPAgent(agent);" + <> jsOut "new Uint8Array([deleted])" + result `shouldBe` B.pack [1] + +agentRedirectTest :: XFTPServerConfig -> FilePath -> Expectation +agentRedirectTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = Agent.newXFTPAgent();\ + \const originalData = new Uint8Array(crypto.randomBytes(100000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'redirect-test.bin');\ + \const {rcvDescription, uri} = await Agent.uploadFile(agent, [server], encrypted, {redirectThreshold: 50});\ + \const fd = Agent.decodeDescriptionURI(uri);\ + \const hasRedirect = fd.redirect !== null ? 1 : 0;\ + \const {header, content} = await Agent.downloadFile(agent, fd);\ + \Agent.closeXFTPAgent(agent);\ + \const nameMatch = header.fileName === 'redirect-test.bin' ? 1 : 0;\ + \const sizeMatch = content.length === originalData.length ? 1 : 0;\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== originalData[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([hasRedirect, nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1, 1] + +tsUploadHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadHaskellDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False recipientFiles + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + (yamlDesc, originalData) <- + callNode2 $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \import {encodeFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = Agent.newXFTPAgent();\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'ts-to-hs.bin');\ + \const {rcvDescription} = await Agent.uploadFile(agent, [server], encrypted);\ + \Agent.closeXFTPAgent(agent);\ + \const yaml = encodeFileDescription(rcvDescription);" + <> jsOut2 "Buffer.from(yaml)" "Buffer.from(originalData)" + let vfd :: ValidFileDescription 'FRecipient = either error id $ strDecode yamlDesc + withAgent 1 agentCfg initAgentServers testDB $ \rcp -> do + runRight_ $ xftpStartWorkers rcp (Just recipientFiles) + _ <- runRight $ xftpReceiveFile rcp 1 vfd Nothing True + rfProgress rcp 50000 + (_, _, RFDONE outPath) <- rfGet rcp + downloadedData <- B.readFile outPath + downloadedData `shouldBe` originalData + +tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadRedirectHaskellDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False recipientFiles + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + (yamlDesc, originalData) <- + callNode2 $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \import {encodeFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const agent = Agent.newXFTPAgent();\ + \const originalData = new Uint8Array(crypto.randomBytes(100000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'ts-redirect-to-hs.bin');\ + \const {rcvDescription} = await Agent.uploadFile(agent, [server], encrypted, {redirectThreshold: 50});\ + \Agent.closeXFTPAgent(agent);\ + \const yaml = encodeFileDescription(rcvDescription);" + <> jsOut2 "Buffer.from(yaml)" "Buffer.from(originalData)" + let vfd@(ValidFileDescription fd) :: ValidFileDescription 'FRecipient = either error id $ strDecode yamlDesc + redirect fd `shouldSatisfy` (/= Nothing) + withAgent 1 agentCfg initAgentServers testDB $ \rcp -> do + runRight_ $ xftpStartWorkers rcp (Just recipientFiles) + _ <- runRight $ xftpReceiveFile rcp 1 vfd Nothing True + outPath <- waitRfDone rcp + downloadedData <- B.readFile outPath + downloadedData `shouldBe` originalData + +haskellUploadTsDownloadTest :: XFTPServerConfig -> Expectation +haskellUploadTsDownloadTest cfg = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False senderFiles + let filePath = senderFiles <> "/hs-to-ts.bin" + originalData <- B.pack <$> replicateM 50000 (randomIO :: IO Word8) + B.writeFile filePath originalData + withXFTPServerCfg cfg $ \_ -> do + vfd <- withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do + runRight_ $ xftpStartWorkers sndr (Just senderFiles) + _ <- runRight $ xftpSendFile sndr 1 (CF.plain filePath) 1 + sfProgress sndr 50000 + (_, _, SFDONE _ [rfd]) <- sfGet sndr + pure rfd + let yamlDesc = strEncode vfd + tmpYaml = "tests/tmp/hs-to-ts-desc.yaml" + tmpData = "tests/tmp/hs-to-ts-data.bin" + B.writeFile tmpYaml yamlDesc + B.writeFile tmpData originalData + result <- + callNode $ + "import fs from 'node:fs';\ + \import sodium from 'libsodium-wrappers-sumo';\ + \import * as Agent from './dist/agent.js';\ + \import {decodeFileDescription, validateFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const yaml = fs.readFileSync('../tests/tmp/hs-to-ts-desc.yaml', 'utf-8');\ + \const expected = new Uint8Array(fs.readFileSync('../tests/tmp/hs-to-ts-data.bin'));\ + \const fd = decodeFileDescription(yaml);\ + \const err = validateFileDescription(fd);\ + \if (err) throw new Error(err);\ + \const agent = Agent.newXFTPAgent();\ + \const {header, content} = await Agent.downloadFile(agent, fd);\ + \Agent.closeXFTPAgent(agent);\ + \const nameMatch = header.fileName === 'hs-to-ts.bin' ? 1 : 0;\ + \const sizeMatch = content.length === expected.length ? 1 : 0;\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== expected[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1] + +rfProgress :: AgentClient -> Int64 -> IO () +rfProgress c _expected = loop 0 + where + loop prev = do + (_, _, RFPROG rcvd total) <- rfGet c + when (rcvd < total && rcvd > prev) $ loop rcvd + +sfProgress :: AgentClient -> Int64 -> IO () +sfProgress c _expected = loop 0 + where + loop prev = do + (_, _, SFPROG sent total) <- sfGet c + when (sent < total && sent > prev) $ loop sent + +waitRfDone :: AgentClient -> IO FilePath +waitRfDone c = do + ev <- rfGet c + case ev of + (_, _, RFDONE outPath) -> pure outPath + (_, _, RFPROG _ _) -> waitRfDone c + (_, _, RFERR e) -> error $ "RFERR: " <> show e + _ -> error $ "Unexpected event: " <> show ev + +callNode2 :: String -> IO (B.ByteString, B.ByteString) +callNode2 script = do + out <- callNode script + let (len1Bytes, rest1) = B.splitAt 4 out + len1 = fromIntegral (B.index len1Bytes 0) + fromIntegral (B.index len1Bytes 1) * 256 + fromIntegral (B.index len1Bytes 2) * 65536 + fromIntegral (B.index len1Bytes 3) * 16777216 + (data1, rest2) = B.splitAt len1 rest1 + (len2Bytes, rest3) = B.splitAt 4 rest2 + len2 = fromIntegral (B.index len2Bytes 0) + fromIntegral (B.index len2Bytes 1) * 256 + fromIntegral (B.index len2Bytes 2) * 65536 + fromIntegral (B.index len2Bytes 3) * 16777216 + data2 = B.take len2 rest3 + pure (data1, data2) + +jsOut2 :: String -> String -> String +jsOut2 a b = "const __a = " <> a <> "; const __b = " <> b <> "; const __buf = Buffer.alloc(8 + __a.length + __b.length); __buf.writeUInt32LE(__a.length, 0); __a.copy(__buf, 4); __buf.writeUInt32LE(__b.length, 4 + __a.length); __b.copy(__buf, 8 + __a.length); process.stdout.write(__buf);" diff --git a/tests/fixtures/ca.srl b/tests/fixtures/ca.srl new file mode 100644 index 0000000000..1b461e9f37 --- /dev/null +++ b/tests/fixtures/ca.srl @@ -0,0 +1 @@ +6395D75F2A7A37CA274B8BE766187EA9ECC64665 diff --git a/tests/fixtures/ed25519/ca.crt b/tests/fixtures/ed25519/ca.crt new file mode 100644 index 0000000000..d487b0de60 --- /dev/null +++ b/tests/fixtures/ed25519/ca.crt @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBazCCAR2gAwIBAgIUSTqS4QptGQWYoukUUuYqC6iV5TMwBQYDK2VwMCoxFjAU +BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjYwMjAy +MDkxMTM1WhgPMjEyNjAxMDkwOTExMzVaMCoxFjAUBgNVBAMMDVNNUCBzZXJ2ZXIg +Q0ExEDAOBgNVBAoMB1NpbXBsZVgwKjAFBgMrZXADIQAv7I91vFk1tu6bj7J8HfkA +c7vjTnae9LFz+fXXtjkJVqNTMFEwHQYDVR0OBBYEFJSRDsRRvAyWhRMrXfW0Apsw +FbIHMB8GA1UdIwQYMBaAFJSRDsRRvAyWhRMrXfW0ApswFbIHMA8GA1UdEwEB/wQF +MAMBAf8wBQYDK2VwA0EAa9btje9yq4avTR8AOOkLHvGG0F6CskcGUFCkEbdCU+7I +9Qx1E8TlK6SwtLAKGi+qoK89dsdKL7rY2KbSP3SMAg== +-----END CERTIFICATE----- diff --git a/tests/fixtures/ed25519/ca.key b/tests/fixtures/ed25519/ca.key new file mode 100644 index 0000000000..45ca424a1e --- /dev/null +++ b/tests/fixtures/ed25519/ca.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEINrfCroxhwopILZmG394xna73ethj6Z6IJSdBY2KjmW2 +-----END PRIVATE KEY----- diff --git a/tests/fixtures/ed25519/server.crt b/tests/fixtures/ed25519/server.crt new file mode 100644 index 0000000000..feaa345de5 --- /dev/null +++ b/tests/fixtures/ed25519/server.crt @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBcTCCASOgAwIBAgIUGMY4bIefHdfLBMptm/MOtg3ekGEwBQYDK2VwMCoxFjAU +BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjYwMjAy +MDkxMTM1WhgPMjEyNjAxMDkwOTExMzVaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDAq +MAUGAytlcAMhANYHFcaIJ540sL66lt5GmPrd0HX3mogATKrnWHPWQaGmo28wbTAJ +BgNVHRMEAjAAMAsGA1UdDwQEAwIDyDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV +HQ4EFgQUQlsiIdymULnrH8KY+N+dd5RQADMwHwYDVR0jBBgwFoAUlJEOxFG8DJaF +Eytd9bQCmzAVsgcwBQYDK2VwA0EAFXpm1Ucdoa4W1ZPE/28FRkoHeHiEfyHX0NFx +qz7fiV6ys6KnnlC+xLDX0HVLcppImdnm4qmKddCagRfE7h0zAw== +-----END CERTIFICATE----- diff --git a/tests/fixtures/ed25519/server.key b/tests/fixtures/ed25519/server.key new file mode 100644 index 0000000000..065e2e7e23 --- /dev/null +++ b/tests/fixtures/ed25519/server.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIJjrvSfyWU9Xdiery1u85BK0Syw5jmxIJdzo0idiIasu +-----END PRIVATE KEY----- diff --git a/tests/fixtures/web.crt b/tests/fixtures/web.crt index 4de287b1e2..570cfc58d4 100644 --- a/tests/fixtures/web.crt +++ b/tests/fixtures/web.crt @@ -1,34 +1,61 @@ -----BEGIN CERTIFICATE----- -MIIDnTCCAx2gAwIBAgIUFhZZsKj9uBgGnUrr+Cf3XFf7t6IwBQYDK2VxMCoxFjAU -BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjQwOTI2 -MTIyNTEyWhgPNDc2MjA4MjMxMjI1MTJaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCC -AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALB59b8oyxP5YtXI1kemBzJU -Pt0xLN/Tmzdul283DhbNCJV+eUn4fNz+PjiRS/F2vZLb3WXInPi3bc57hw2Yu94o -7MXH5DTWkaubNq0bV0Koi17zZBSCOOq+MbPN7bUT1sOwOHadLh3IWTfkz9EufowD -ivpymNKWbeAHMXlXsBJnHfHuM05MWlP87PTHd3D7YQmmgbISgEGG4GchWBqnnCxx -gOOa09f/n+gWJFbN3hkbVZKMEpT5gu9WWsgv9BDhJzcBSw13MMz0sByxYKzhwQBJ -ikFz+16AttZ0ccoDaWwajZzK8+yfFv9T3b8kWmioHi2dw2vBgSove78liUqYCsOU -Bt5MNk3P037KgSJPdp6azsF3bMKmPssEhT9vHMPgSkiBfmBlJ7dTTRd9dh/cLKIO -AMzu4O+pEodIOJDXTARBE6VX1qoEZQuft5+ljVy4i9ySpmHnkxLocF40rKV1G0c5 -LnVNTtr5GokC9sfIXZPZw0EEpk3eAseNWccwuyRfHQfL6yjcDig2IdLvLVcm9JyA -2P5QpP15EoA3Ow9uX8HmBbSFe1F35rqcNwY0lhDXEboSA/X4xDLnu4aVhNPiUnRq -NXqVlgz5ybRAUHd8fDBwK8fT5VhvuEnCja7+8hVc33gK56vu+28ZMkN2Y4z0GNQd -iamPUZJlUcCJzNI2cz27AgMBAAGjbzBtMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgPI -MBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSWiPT6Nl13/CTjaHCkHp17 -GWoyvzAfBgNVHSMEGDAWgBQcUJvR7mm26yxMQfCsWgbnwMmJVDAFBgMrZXEDcwDC -DTbvSA61ydoRA8mTHFW1EYL+xfQjo0aH56N1Aqn47DzLGQZjP/fxoW929+Jwoiz0 -UgUtUAeFjgA9wfvDv7mMm/K4wqyiZzFuWVZdQV6AUwBJK0hN5qlXpvJzMKLrj3Ap -dRELAgLJvC2e/xVc3dXSFwA= +MIIFQTCCAymgAwIBAgIUZ7vJLAGbbk9wG8fLSTClM6NneS4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAwwLWEZUUCBXZWIgQ0EwIBcNMjYwMjEwMjMzMTQxWhgPNDc2 +NDAxMDcyMzMxNDFaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBALB59b8oyxP5YtXI1kemBzJUPt0xLN/Tmzdul283 +DhbNCJV+eUn4fNz+PjiRS/F2vZLb3WXInPi3bc57hw2Yu94o7MXH5DTWkaubNq0b +V0Koi17zZBSCOOq+MbPN7bUT1sOwOHadLh3IWTfkz9EufowDivpymNKWbeAHMXlX +sBJnHfHuM05MWlP87PTHd3D7YQmmgbISgEGG4GchWBqnnCxxgOOa09f/n+gWJFbN +3hkbVZKMEpT5gu9WWsgv9BDhJzcBSw13MMz0sByxYKzhwQBJikFz+16AttZ0ccoD +aWwajZzK8+yfFv9T3b8kWmioHi2dw2vBgSove78liUqYCsOUBt5MNk3P037KgSJP +dp6azsF3bMKmPssEhT9vHMPgSkiBfmBlJ7dTTRd9dh/cLKIOAMzu4O+pEodIOJDX +TARBE6VX1qoEZQuft5+ljVy4i9ySpmHnkxLocF40rKV1G0c5LnVNTtr5GokC9sfI +XZPZw0EEpk3eAseNWccwuyRfHQfL6yjcDig2IdLvLVcm9JyA2P5QpP15EoA3Ow9u +X8HmBbSFe1F35rqcNwY0lhDXEboSA/X4xDLnu4aVhNPiUnRqNXqVlgz5ybRAUHd8 +fDBwK8fT5VhvuEnCja7+8hVc33gK56vu+28ZMkN2Y4z0GNQdiamPUZJlUcCJzNI2 +cz27AgMBAAGjgYYwgYMwFAYDVR0RBA0wC4IJbG9jYWxob3N0MAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgPIMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSWiPT6 +Nl13/CTjaHCkHp17GWoyvzAfBgNVHSMEGDAWgBTrt7AAg953MNz2yI3mMIOGdhp4 +xTANBgkqhkiG9w0BAQsFAAOCAgEAdBNI0bmuthjyzKxR99GLm/hYPCi5GobQcv35 +sWzU/ivzkrKrjh7lZewc6Y5TyINgYYtLTnX82pJJdHDJQ8tQ2whW3eyiVF988HSO +4Upw0TyyCAPN2PoCPQ338tfSwcNC63cK7z6/8aRFm9zMY+Lu14s2pDU1ry8bY8CZ +pYDWT1qNgzBCt0geX93rU48RWO0/hdTWZksVfErDjhogtyV1DiEq6+fteRSjCNHV +qQmgKoRNdphnduR/JMDWcHpmPdCqq4ffIGOsa3qRdjMqNTCc5Jgp1M92bXgbg+kh +6K9PEcC+YKhTlJRkLxw98fxkZ16iFgKmPFfj9X4yye38lJJimK6c8/lXQcUQxKO7 +cqoIPVQ65xSxIxIprGDtF6CHZKGMkcNOycQjqEGq46qSXoNHNO47bheDlyCpmDIq +B966RIpcOIZxsSn3mFLYK8vxdNMu4MuUmyoSQlgKGVchiSjErpQxyL+Ra1SSiZdZ +uKxYQOXnCqg8VZKGmwRSCOSnwXT1bS8bc0wVe7MQpBnwWDjH7/GpfWXjvGmKfRIN +loJ7I4akQ25xMa5CGzzsy5COJrXbq8vEDgJMZq02dgfVibK7minLSeOT7+xB1+pW ++sULqQgT+zOfVtdzQ7MrowNlEkUA2Sl9loQ9yhPHy5y4e6Z0Wv9yBps07dMdf8Oq +mkhfGdY= -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- -MIIBtjCCATagAwIBAgIUe2PryrWo0xXX9vcA3WfbCzcdmgAwBQYDK2VxMCoxFjAU -BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjIwMTEx -MTExNjM5WhgPNDc1OTEyMDgxMTE2MzlaMCoxFjAUBgNVBAMMDVNNUCBzZXJ2ZXIg -Q0ExEDAOBgNVBAoMB1NpbXBsZVgwQzAFBgMrZXEDOgCAcvFwVicR+RLZpiEWPFNR -XYTbf+mFcX1NHIyPQDugFwOCgqJAW1fsjYgFhtQJSMH/lc1N7clfm4CjUzBRMB0G -A1UdDgQWBBQcUJvR7mm26yxMQfCsWgbnwMmJVDAfBgNVHSMEGDAWgBQcUJvR7mm2 -6yxMQfCsWgbnwMmJVDAPBgNVHRMBAf8EBTADAQH/MAUGAytlcQNzAAAP/hMPNxyW -fyJi+iJViodU+C/aklnvHtjh5P3AbiVCSUfY6+PEdvkC8Ov0pBAYpYi5ukSNNVXl -ABVRlipB+vOcLQStNyaZ7kXzQ2IO/0btmIidh+G6SP8I4aytYIYYcV5pEUZpG1L1 -57g8P29SDv81AA== +MIIFDzCCAvegAwIBAgIUI0uZLHpqLKV7FHEJonLx8bsild8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAwwLWEZUUCBXZWIgQ0EwIBcNMjYwMjEwMjMzMTM2WhgPNDc2 +NDAxMDcyMzMxMzZaMBYxFDASBgNVBAMMC1hGVFAgV2ViIENBMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAycPQtKmG0tzXtJz46Jh7IWwRpg1YmvR+n8KM +sfCKKFmMYECHIQcHbKd/2aerElHmm3t13NCBn3biB1Rh1+tB8g9pNOadxSnl7Www +QTI4uIsTeRVLOcwXkGg3BbQd7DKGQPEHPsS1gE47CGB+W48kndhzm5jrXeq/z/gJ +mdiimrGWP+RP6touApyyjzDoEV6dmI5fcnf1Y0YCGQT7hEnA/OZH8YHZf+CbUnxZ +B4BUuaUko+oji5OG2+UjI4X0CYmdvlAuUZ1uDe8kwQaJIgpnJidVBcCZP93VUr0I +Upm8wJXoQdIAvfbT32LTMPcno0uyv9FmCEJY8fdteS1ByLZYNgM09IisEEIzVf8A +Mx35vxm1Q7eCr8aoiOlgaA3EmTExhLuAYa1XbGSPwahOOtdLfdkr2Vxp2OdxY1AE +Ze9b2iVvSdlwxpKRkaEI0rleEnBJkvmNP4dG2JBIv7PPu4OVfHHVCFmhFUxAjrcf +FDyskq+58Wx8vvcTNeAGsv/fFGmt5C/muhOOZnm2ig6TRaE72QHZqfBA2PDKWC3p +C+Oltm+9jkl1Ofo1IMFKf8fR3plltGez4vL4gc9o5aGoAB1f99Ig3D2Q40lOYucf +GjsXQue+3pqzj8Dz90s3T1Rr340JtFpsw5+THUeIz/qM+mZiOvWOOM77k8iiKjOD +VHRkBK0CAwEAAaNTMFEwHQYDVR0OBBYEFOu3sACD3ncw3PbIjeYwg4Z2GnjFMB8G +A1UdIwQYMBaAFOu3sACD3ncw3PbIjeYwg4Z2GnjFMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggIBAGa2kZszMmzN55No/DFcnYchaszJ2Pn5duU/cjNf +ZTme6e4CbB60Ot4gANnQa5geOiVGWw3WMNpqwInlMAKepuElhniZV3jDuKfsdGyy +yicXzZe6QTDQc8uMqMU7eAZo/nbx+hObyh094a5KOXODiTnbAT7+udqlIcPGJkWa +Sll5hZtlo3mIK59WsTjA1RxRAmjF/BxubBl1iSnxuO2fXLiO+EWTBMFMsVWZYFRC +KK1HY4E0zVC+9qrjIPcc9nLYw1UV8EVFXcOSxvvMEsQ9mzGqhg6A65nD6TVOUSbf +t+IWQXEvP6cDxTrsmdV4kTSeGPte2ANascE9BMOXgmWS/6mpa2NWKhbGvGZK/yTP +3mHIEMxxve8KLeiFv2bQaHojIco6i85Y3a9EPGNfuzCsxXuK0lmT5A3mih3FgnUF +KpVM4ci4O2qWjdrby6ydPjdU/KAywfNPg/htxoHen8wXm4fAVwy1JD7dM9OxgyyP +8c+zThvQ8ueHrrzv68LPpMMwH5wskdSWt9+1bZyAZJN5MD05eqEJ6KoXj+oTnLTC +4ERV1yxtECM8zqhT2fM7+UOKVjBRQWmZj4zlowRhogENHQKEXxghrHy0aKbz2daW +usrIscN+SG9zHw3iq6Gf+hVKuLfTDPuBQDL+kIPep8mM0NI/Tayefzt7fyRn4R4W +QBe2 -----END CERTIFICATE----- diff --git a/tests/fixtures/web_ca.crt b/tests/fixtures/web_ca.crt new file mode 100644 index 0000000000..a9edf4966f --- /dev/null +++ b/tests/fixtures/web_ca.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFDzCCAvegAwIBAgIUI0uZLHpqLKV7FHEJonLx8bsild8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAwwLWEZUUCBXZWIgQ0EwIBcNMjYwMjEwMjMzMTM2WhgPNDc2 +NDAxMDcyMzMxMzZaMBYxFDASBgNVBAMMC1hGVFAgV2ViIENBMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAycPQtKmG0tzXtJz46Jh7IWwRpg1YmvR+n8KM +sfCKKFmMYECHIQcHbKd/2aerElHmm3t13NCBn3biB1Rh1+tB8g9pNOadxSnl7Www +QTI4uIsTeRVLOcwXkGg3BbQd7DKGQPEHPsS1gE47CGB+W48kndhzm5jrXeq/z/gJ +mdiimrGWP+RP6touApyyjzDoEV6dmI5fcnf1Y0YCGQT7hEnA/OZH8YHZf+CbUnxZ +B4BUuaUko+oji5OG2+UjI4X0CYmdvlAuUZ1uDe8kwQaJIgpnJidVBcCZP93VUr0I +Upm8wJXoQdIAvfbT32LTMPcno0uyv9FmCEJY8fdteS1ByLZYNgM09IisEEIzVf8A +Mx35vxm1Q7eCr8aoiOlgaA3EmTExhLuAYa1XbGSPwahOOtdLfdkr2Vxp2OdxY1AE +Ze9b2iVvSdlwxpKRkaEI0rleEnBJkvmNP4dG2JBIv7PPu4OVfHHVCFmhFUxAjrcf +FDyskq+58Wx8vvcTNeAGsv/fFGmt5C/muhOOZnm2ig6TRaE72QHZqfBA2PDKWC3p +C+Oltm+9jkl1Ofo1IMFKf8fR3plltGez4vL4gc9o5aGoAB1f99Ig3D2Q40lOYucf +GjsXQue+3pqzj8Dz90s3T1Rr340JtFpsw5+THUeIz/qM+mZiOvWOOM77k8iiKjOD +VHRkBK0CAwEAAaNTMFEwHQYDVR0OBBYEFOu3sACD3ncw3PbIjeYwg4Z2GnjFMB8G +A1UdIwQYMBaAFOu3sACD3ncw3PbIjeYwg4Z2GnjFMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggIBAGa2kZszMmzN55No/DFcnYchaszJ2Pn5duU/cjNf +ZTme6e4CbB60Ot4gANnQa5geOiVGWw3WMNpqwInlMAKepuElhniZV3jDuKfsdGyy +yicXzZe6QTDQc8uMqMU7eAZo/nbx+hObyh094a5KOXODiTnbAT7+udqlIcPGJkWa +Sll5hZtlo3mIK59WsTjA1RxRAmjF/BxubBl1iSnxuO2fXLiO+EWTBMFMsVWZYFRC +KK1HY4E0zVC+9qrjIPcc9nLYw1UV8EVFXcOSxvvMEsQ9mzGqhg6A65nD6TVOUSbf +t+IWQXEvP6cDxTrsmdV4kTSeGPte2ANascE9BMOXgmWS/6mpa2NWKhbGvGZK/yTP +3mHIEMxxve8KLeiFv2bQaHojIco6i85Y3a9EPGNfuzCsxXuK0lmT5A3mih3FgnUF +KpVM4ci4O2qWjdrby6ydPjdU/KAywfNPg/htxoHen8wXm4fAVwy1JD7dM9OxgyyP +8c+zThvQ8ueHrrzv68LPpMMwH5wskdSWt9+1bZyAZJN5MD05eqEJ6KoXj+oTnLTC +4ERV1yxtECM8zqhT2fM7+UOKVjBRQWmZj4zlowRhogENHQKEXxghrHy0aKbz2daW +usrIscN+SG9zHw3iq6Gf+hVKuLfTDPuBQDL+kIPep8mM0NI/Tayefzt7fyRn4R4W +QBe2 +-----END CERTIFICATE----- diff --git a/tests/fixtures/web_ca.key b/tests/fixtures/web_ca.key new file mode 100644 index 0000000000..4295542025 --- /dev/null +++ b/tests/fixtures/web_ca.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJw9C0qYbS3Ne0 +nPjomHshbBGmDVia9H6fwoyx8IooWYxgQIchBwdsp3/Zp6sSUeabe3Xc0IGfduIH +VGHX60HyD2k05p3FKeXtbDBBMji4ixN5FUs5zBeQaDcFtB3sMoZA8Qc+xLWATjsI +YH5bjySd2HObmOtd6r/P+AmZ2KKasZY/5E/q2i4CnLKPMOgRXp2Yjl9yd/VjRgIZ +BPuEScD85kfxgdl/4JtSfFkHgFS5pSSj6iOLk4bb5SMjhfQJiZ2+UC5RnW4N7yTB +BokiCmcmJ1UFwJk/3dVSvQhSmbzAlehB0gC99tPfYtMw9yejS7K/0WYIQljx9215 +LUHItlg2AzT0iKwQQjNV/wAzHfm/GbVDt4KvxqiI6WBoDcSZMTGEu4BhrVdsZI/B +qE4610t92SvZXGnY53FjUARl71vaJW9J2XDGkpGRoQjSuV4ScEmS+Y0/h0bYkEi/ +s8+7g5V8cdUIWaEVTECOtx8UPKySr7nxbHy+9xM14Aay/98Uaa3kL+a6E45mebaK +DpNFoTvZAdmp8EDY8MpYLekL46W2b72OSXU5+jUgwUp/x9HemWW0Z7Pi8viBz2jl +oagAHV/30iDcPZDjSU5i5x8aOxdC577emrOPwPP3SzdPVGvfjQm0WmzDn5MdR4jP ++oz6ZmI69Y44zvuTyKIqM4NUdGQErQIDAQABAoICABULRvPo8KaCoT711mJQMRuJ +zYdvweubj69zF8ChcY+G04TghheUR5p2F/goLEgjYbWa2W64EqUCvc186B2grIam +Q9dgLFl5psEuNRQ/vDnHS7cn2OpE1rpZnE+Me0h89pLEcPiDhNjGkBKuGc/L7MpH +3rr+ZqIrj2kOGlZBCaiv2Bd+5TT+H6lnFZqow7O4FiDozJzAVUhO734TjnY8SRQ3 +oo5WeEyFrT2buZ92K6AXUOyFycJPl1PNIO6eFJeAEoNckpAxpt5uMHuvhlMWGa8Z +kB0i+vj11r+j9r/CyDSgDhL7Z9dobK3SfWuQg8Jc7V5jZUx8OxG1uGXYqhFYr3d3 +N2CMz5f6T6VoM/f9WNtcaKNfeRHfysxLU1P7g57l5jo1ktts5lNOsrg2Pt8isAGF +Yb13x1RRAKYlBzkuqUOx99g/wZvAHRzug2GBdmKpVTcFbqUQjhglxNm8mkYsb2ed +RGs0jkq9IlJI7UxRaZbWCgBCwz/C2JBRpCbcom6WHqWSwjALGVI9inqZa9/Q01O4 +ZxXwBKpjVgkItqccjFOMppbx1HcUM5qUsRsdC3kGihWhAjO58j25yD4mO5JAlU6/ +u/EOOJ+F6/Dw6lDIfrkvkrt/GXruL7TY2BoIYhjOyXO/KEba7qyEShCtXMi44aj0 +m0Ldt1HKm3vbGQpffXsjAoIBAQDy8sGdw4YQQXXpBlASAsij8J+LoUlWOV+bxORo +zES4VbF8OP3wLMgLj69nfnwdq1v3zggPHCdMmu4GqnD12HzalXDjT9T6ujxFawJK +ApyPe3c7iBDUSX3tCfkEekKy6GPeDmFOrxeIuBA/lADYMGIY7qQisen2O5Qt0Vb0 +/kRyqrIDsWcVH4Cjx4zFFeOQ14vAzP69K/BeZVVJkeimY9EMVu2NJIrJmtBCw0yI +7CaJa8Vu36y/u83ZC2jXxN1MhEJJCeBc2ydvyEjhuzvq2/dQbFHow6z/4AkAn9yS +B1IIMz3X911FYxRnrTlDknWfB2r3ZVwHtXvIKjAtXT+OM1oPAoIBAQDUmqsOG4Ak +lrXKPCs90YWqLnzOQPamyec3HP3Ma98Kw+O7ulQTFvjf+YB+OplFdtpaI04gk0Gm +Vi5diu+HoABil//jduUOqUj5tNifv60XnrzVpVzek8oPphYCTq7P+vmBN66qQ6Sw +JlYo6SBJZHR3FCvfIUTxQBPqZ9NL061lKt5g38YIxl8z2pK0JxrEfaxo7IXIFCm3 +/7DmtiikvQOrMLZ6r0pTnPaBf5+f8ySKU4x4kFJu3Hb9idBc1r13WMjqRPaqeS8p +TAT6icyktA0N/aAoTbNj99wRvfjHvlXx9jh3jy9Ka2Dn2cE+YtY1/bIAvFQxiRzt +8EW6sBKrbyGDAoIBAQCYsHN5WNpYOxwFAV+vgiphxqgvVIXH+DUbrEo1hzQlek4b +GaKXoT107rA55mfRKdKaUtYD0Rjt721rqRFnodEOe9/ALXtYvVWF93Qv2aZWEy3j +r2eMVEgdgygLZV+oG6Awfm8vyaGL3srvenBxby8oJkvoNlMp21YM2cXCIlAYlSle +Ys+7mdn6lT7m2xP0A1QlL3FmqUffu+Y3X8mNUaygCb4w9+d2P6NmYmImp+ysb5xd +S5zBwCHmqGITQfonzfPu/ZMSKPaHLaSIomlM+URdOkbceKaxBjgCOXaiHJG076eN +pTzskBHR+y/DRThBY6MZq42Els4eBk3TJQj9sU6HAoIBAQCShsa9wlZe4UAJUc67 +nFvzHncF7+AOs7iXU3PYH8BpOvkJuTGYtoxwURUt6lUYewGifhKqgNMOQPdToR3U +64FYckn6C0dzA1k4QFvMPd6eGNkspfuLq2/nuSASFwiEbwTm+el3j4dBoCphp8qI +yqM6LrzN27AYVYFkXIpUCF/JCfKZ8aAbDB0xL8NMRmc8ZSEeb2UEsGDQX3kciQ8Z ++us8YSZjB8zCM7vxJHRvWLQmYc6+iTlHDsszknf4hEewqZBPZZhbhYnrfGkyAyb3 +nOAidFqdbG/mxjz2PWfowlWZnYjtXdHKCJeRM5Lr3FKmg2La/vFH8qftlVt5f0Be +xwjhAoIBAQDSznM/ZO+yVMOME/L3Rphaep+vxzO2WHnlIeTFCep/fYS4PeeXvGQu +Sg3JzMk6hh93OeZ0TYj0QCoi4/1RhQ2pbtIX0+FU/FtdfXJKWz9NXTT81DZtJK+n +krVlutTEGSeDROkymcFmn56vGC6D9N/kv0tUoQ2tr4/5WLqE868Kl2Q3/AoLbIiF +lH83oNTnXicaJAfrDlAlMieRmhIYbjYBdqjdiSkxG7N3t8TyTR6VRzCM7jy2/ZN1 +pFbX1ol+Dbf9zm6f3erTn1TnPju6gBZheQoCn2w7O00NapCgd2w6pVJT0I/srMS0 +7C4nNFR/SOa1Zpf0FsRY3pYK6usQu/9T +-----END PRIVATE KEY----- diff --git a/tests/fixtures/web_ca.srl b/tests/fixtures/web_ca.srl new file mode 100644 index 0000000000..f629f9cd08 --- /dev/null +++ b/tests/fixtures/web_ca.srl @@ -0,0 +1 @@ +67BBC92C019B6E4F701BC7CB4930A533A367792E diff --git a/xftp-web/.gitignore b/xftp-web/.gitignore new file mode 100644 index 0000000000..507b50d80c --- /dev/null +++ b/xftp-web/.gitignore @@ -0,0 +1,4 @@ +node_modules/ +dist/ +dist-web/ +package-lock.json diff --git a/xftp-web/README.md b/xftp-web/README.md new file mode 100644 index 0000000000..9b118b336a --- /dev/null +++ b/xftp-web/README.md @@ -0,0 +1,47 @@ +# xftp-web + +Browser-compatible XFTP file transfer client in TypeScript. + +## Prerequisites + +- Haskell toolchain with `cabal` (to build `xftp-server`) +- Node.js 20+ +- Chromium system dependencies (see below) + +## Setup + +```bash +# Build the XFTP server binary (from repo root) +cabal build xftp-server + +# Install JS dependencies +cd xftp-web +npm install + +# Install Chromium for Playwright (browser tests) +npx playwright install chromium +``` + +If Chromium fails to launch due to missing system libraries, install them with: + +```bash +# Requires root +npx playwright install-deps chromium +``` + +## Running tests + +```bash +# Browser round-trip test (vitest + Playwright headless Chromium) +npm run test +``` + +The browser test automatically starts an `xftp-server` instance on port 7000 via `globalSetup`, using certs from `tests/fixtures/`. + +## Build + +```bash +npm run build +``` + +Output goes to `dist/`. diff --git a/xftp-web/package.json b/xftp-web/package.json new file mode 100644 index 0000000000..67610689d9 --- /dev/null +++ b/xftp-web/package.json @@ -0,0 +1,37 @@ +{ + "name": "xftp-web", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "scripts": { + "postinstall": "ln -sf ../../../libsodium-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs node_modules/libsodium-wrappers-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs && npx playwright install chromium", + "build": "tsc", + "test": "vitest", + "dev": "npx tsx test/runSetup.ts && vite --mode development", + "build:local": "npx tsx test/runSetup.ts && vite build --mode development", + "build:prod": "vite build --mode production", + "preview": "vite preview", + "preview:local": "npm run build:local && vite preview", + "preview:prod": "vite build --mode production && vite preview", + "check:web": "tsc -p tsconfig.web.json --noEmit && tsc -p tsconfig.worker.json --noEmit", + "test:page": "playwright test test/page.spec.ts" + }, + "devDependencies": { + "@types/libsodium-wrappers-sumo": "^0.7.8", + "@types/node": "^20.0.0", + "@types/pako": "^2.0.3", + "@vitest/browser": "^3.0.0", + "@playwright/test": "^1.50.0", + "playwright": "^1.50.0", + "typescript": "^5.4.0", + "vite": "^6.0.0", + "vitest": "^3.0.0" + }, + "dependencies": { + "@noble/curves": "^1.9.7", + "libsodium-wrappers-sumo": "^0.7.13", + "pako": "^2.1.0" + } +} diff --git a/xftp-web/playwright.config.ts b/xftp-web/playwright.config.ts new file mode 100644 index 0000000000..bdfd579f8e --- /dev/null +++ b/xftp-web/playwright.config.ts @@ -0,0 +1,27 @@ +import {defineConfig} from '@playwright/test' + +export default defineConfig({ + testDir: './test', + testMatch: '**/*.spec.ts', + timeout: 60_000, + use: { + ignoreHTTPSErrors: true, + launchOptions: { + // --ignore-certificate-errors makes fetch() accept self-signed certs + args: [ + '--ignore-certificate-errors', + '--ignore-certificate-errors-spki-list', + '--allow-insecure-localhost', + ] + } + }, + // Note: globalSetup runs AFTER webServer plugins in playwright 1.58+, so we + // run setup from the webServer command instead + globalTeardown: './test/globalTeardown.ts', + webServer: { + // Run setup script first (starts XFTP server + proxy), then build, then preview + command: 'npx tsx test/runSetup.ts && npx vite build --mode development && npx vite preview --mode development', + url: 'http://localhost:4173', + reuseExistingServer: false + }, +}) diff --git a/xftp-web/src/agent.ts b/xftp-web/src/agent.ts new file mode 100644 index 0000000000..c8c8922341 --- /dev/null +++ b/xftp-web/src/agent.ts @@ -0,0 +1,389 @@ +// XFTP upload/download orchestration + URI encoding -- Simplex.FileTransfer.Client.Main +// +// Combines all building blocks: encryption, chunking, XFTP client commands, +// file descriptions, and DEFLATE-compressed URI encoding. + +import pako from "pako" +import {encryptFile, encodeFileHeader} from "./crypto/file.js" +import {generateEd25519KeyPair, encodePubKeyEd25519, encodePrivKeyEd25519, decodePrivKeyEd25519, ed25519KeyPairFromSeed} from "./crypto/keys.js" +import {sha512Streaming} from "./crypto/digest.js" +import {prepareChunkSizes, prepareChunkSpecs, getChunkDigest, fileSizeLen, authTagSize} from "./protocol/chunks.js" +import { + encodeFileDescription, decodeFileDescription, validateFileDescription, + base64urlEncode, base64urlDecode, + type FileDescription +} from "./protocol/description.js" +import type {FileInfo} from "./protocol/commands.js" +import { + createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk, downloadXFTPChunkRaw, + deleteXFTPChunk, type XFTPClientAgent +} from "./client.js" +export {newXFTPAgent, closeXFTPAgent, type XFTPClientAgent, type TransportConfig} from "./client.js" +import {processDownloadedFile, decryptReceivedChunk} from "./download.js" +import type {XFTPServer} from "./protocol/address.js" +import {formatXFTPServer, parseXFTPServer} from "./protocol/address.js" +import type {FileHeader} from "./crypto/file.js" + +// -- Types + +interface SentChunk { + chunkNo: number + senderId: Uint8Array + senderKey: Uint8Array // 64B libsodium Ed25519 private key + recipientId: Uint8Array + recipientKey: Uint8Array // 64B libsodium Ed25519 private key + chunkSize: number + digest: Uint8Array // SHA-256 + server: XFTPServer +} + +export interface EncryptedFileMetadata { + digest: Uint8Array // SHA-512 of encData + key: Uint8Array // 32B SbKey + nonce: Uint8Array // 24B CbNonce + chunkSizes: number[] +} + +export interface EncryptedFileInfo extends EncryptedFileMetadata { + encData: Uint8Array +} + +export interface UploadResult { + rcvDescription: FileDescription + sndDescription: FileDescription + uri: string // base64url-encoded compressed YAML (no leading #) +} + +export interface DownloadResult { + header: FileHeader + content: Uint8Array +} + +// -- URI encoding/decoding (RFC section 4.1: DEFLATE + base64url) + +export function encodeDescriptionURI(fd: FileDescription): string { + const yaml = encodeFileDescription(fd) + const compressed = pako.deflateRaw(new TextEncoder().encode(yaml)) + return base64urlEncode(compressed) +} + +export function decodeDescriptionURI(fragment: string): FileDescription { + const compressed = base64urlDecode(fragment) + const yaml = new TextDecoder().decode(pako.inflateRaw(compressed)) + const fd = decodeFileDescription(yaml) + const err = validateFileDescription(fd) + if (err) throw new Error("decodeDescriptionURI: " + err) + return fd +} + +// -- Upload + +export function encryptFileForUpload(source: Uint8Array, fileName: string): EncryptedFileInfo { + const key = new Uint8Array(32) + const nonce = new Uint8Array(24) + crypto.getRandomValues(key) + crypto.getRandomValues(nonce) + const fileHdr = encodeFileHeader({fileName, fileExtra: null}) + const fileSize = BigInt(fileHdr.length + source.length) + const payloadSize = Number(fileSize) + fileSizeLen + authTagSize + const chunkSizes = prepareChunkSizes(payloadSize) + const encSize = BigInt(chunkSizes.reduce((a, b) => a + b, 0)) + const encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize) + const digest = sha512Streaming([encData]) + console.log(`[AGENT-DBG] encrypt: encData.len=${encData.length} digest=${_dbgHex(digest, 64)} chunkSizes=[${chunkSizes.join(',')}]`) + return {encData, digest, key, nonce, chunkSizes} +} + +const DEFAULT_REDIRECT_THRESHOLD = 400 + +export interface UploadOptions { + onProgress?: (uploaded: number, total: number) => void + redirectThreshold?: number + readChunk?: (offset: number, size: number) => Promise +} + +export async function uploadFile( + agent: XFTPClientAgent, + servers: XFTPServer[], + encrypted: EncryptedFileMetadata, + options?: UploadOptions +): Promise { + if (servers.length === 0) throw new Error("uploadFile: servers list is empty") + const {onProgress, redirectThreshold, readChunk: readChunkOpt} = options ?? {} + const readChunk: (offset: number, size: number) => Promise = readChunkOpt + ? readChunkOpt + : ('encData' in encrypted + ? (off, sz) => Promise.resolve((encrypted as EncryptedFileInfo).encData.subarray(off, off + sz)) + : () => { throw new Error("uploadFile: readChunk required when encData is absent") }) + const total = encrypted.chunkSizes.reduce((a, b) => a + b, 0) + const specs = prepareChunkSpecs(encrypted.chunkSizes) + + // Pre-assign servers and group by server for parallel upload + const chunkJobs = specs.map((spec, i) => ({ + index: i, + spec, + server: servers[Math.floor(Math.random() * servers.length)] + })) + const byServer = new Map() + for (const job of chunkJobs) { + const key = formatXFTPServer(job.server) + if (!byServer.has(key)) byServer.set(key, []) + byServer.get(key)!.push(job) + } + + // Upload groups in parallel, sequential within each group + const sentChunks: SentChunk[] = new Array(specs.length) + let uploaded = 0 + await Promise.all([...byServer.values()].map(async (jobs) => { + for (const {index, spec, server} of jobs) { + const chunkNo = index + 1 + const sndKp = generateEd25519KeyPair() + const rcvKp = generateEd25519KeyPair() + const chunkData = await readChunk(spec.chunkOffset, spec.chunkSize) + const chunkDigest = getChunkDigest(chunkData) + const fileInfo: FileInfo = { + sndKey: encodePubKeyEd25519(sndKp.publicKey), + size: spec.chunkSize, + digest: chunkDigest + } + const rcvKeysForChunk = [encodePubKeyEd25519(rcvKp.publicKey)] + const {senderId, recipientIds} = await createXFTPChunk( + agent, server, sndKp.privateKey, fileInfo, rcvKeysForChunk + ) + await uploadXFTPChunk(agent, server, sndKp.privateKey, senderId, chunkData) + sentChunks[index] = { + chunkNo, senderId, senderKey: sndKp.privateKey, + recipientId: recipientIds[0], recipientKey: rcvKp.privateKey, + chunkSize: spec.chunkSize, digest: chunkDigest, server + } + uploaded += spec.chunkSize + onProgress?.(uploaded, total) + } + })) + const rcvDescription = buildDescription("recipient", encrypted, sentChunks) + const sndDescription = buildDescription("sender", encrypted, sentChunks) + let uri = encodeDescriptionURI(rcvDescription) + let finalRcvDescription = rcvDescription + const threshold = redirectThreshold ?? DEFAULT_REDIRECT_THRESHOLD + if (uri.length > threshold && sentChunks.length > 1) { + finalRcvDescription = await uploadRedirectDescription(agent, servers, rcvDescription) + uri = encodeDescriptionURI(finalRcvDescription) + } + return {rcvDescription: finalRcvDescription, sndDescription, uri} +} + +function buildDescription( + party: "recipient" | "sender", + enc: EncryptedFileMetadata, + chunks: SentChunk[] +): FileDescription { + const defChunkSize = enc.chunkSizes[0] + return { + party, + size: enc.chunkSizes.reduce((a, b) => a + b, 0), + digest: enc.digest, + key: enc.key, + nonce: enc.nonce, + chunkSize: defChunkSize, + chunks: chunks.map(c => ({ + chunkNo: c.chunkNo, + chunkSize: c.chunkSize, + digest: c.digest, + replicas: [{ + server: formatXFTPServer(c.server), + replicaId: party === "recipient" ? c.recipientId : c.senderId, + replicaKey: encodePrivKeyEd25519(party === "recipient" ? c.recipientKey : c.senderKey) + }] + })), + redirect: null + } +} + +async function uploadRedirectDescription( + agent: XFTPClientAgent, + servers: XFTPServer[], + innerFd: FileDescription +): Promise { + const yaml = encodeFileDescription(innerFd) + const yamlBytes = new TextEncoder().encode(yaml) + const enc = encryptFileForUpload(yamlBytes, "") + const specs = prepareChunkSpecs(enc.chunkSizes) + const sentChunks: SentChunk[] = [] + for (let i = 0; i < specs.length; i++) { + const spec = specs[i] + const chunkNo = i + 1 + const server = servers[Math.floor(Math.random() * servers.length)] + const sndKp = generateEd25519KeyPair() + const rcvKp = generateEd25519KeyPair() + const chunkData = enc.encData.subarray(spec.chunkOffset, spec.chunkOffset + spec.chunkSize) + const chunkDigest = getChunkDigest(chunkData) + const fileInfo: FileInfo = { + sndKey: encodePubKeyEd25519(sndKp.publicKey), + size: spec.chunkSize, + digest: chunkDigest + } + const rcvKeysForChunk = [encodePubKeyEd25519(rcvKp.publicKey)] + const {senderId, recipientIds} = await createXFTPChunk( + agent, server, sndKp.privateKey, fileInfo, rcvKeysForChunk + ) + await uploadXFTPChunk(agent, server, sndKp.privateKey, senderId, chunkData) + sentChunks.push({ + chunkNo, senderId, senderKey: sndKp.privateKey, + recipientId: recipientIds[0], recipientKey: rcvKp.privateKey, + chunkSize: spec.chunkSize, digest: chunkDigest, server + }) + } + return { + party: "recipient", + size: enc.chunkSizes.reduce((a, b) => a + b, 0), + digest: enc.digest, + key: enc.key, + nonce: enc.nonce, + chunkSize: enc.chunkSizes[0], + chunks: sentChunks.map(c => ({ + chunkNo: c.chunkNo, + chunkSize: c.chunkSize, + digest: c.digest, + replicas: [{ + server: formatXFTPServer(c.server), + replicaId: c.recipientId, + replicaKey: encodePrivKeyEd25519(c.recipientKey) + }] + })), + redirect: {size: innerFd.size, digest: innerFd.digest} + } +} + +// -- Download + +export interface RawDownloadedChunk { + chunkNo: number + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array + digest: Uint8Array +} + +export interface DownloadRawOptions { + onProgress?: (downloaded: number, total: number) => void + concurrency?: number +} + +export async function downloadFileRaw( + agent: XFTPClientAgent, + fd: FileDescription, + onRawChunk: (chunk: RawDownloadedChunk) => Promise, + options?: DownloadRawOptions +): Promise { + const err = validateFileDescription(fd) + if (err) throw new Error("downloadFileRaw: " + err) + const {onProgress, concurrency = 1} = options ?? {} + // Resolve redirect on main thread (redirect data is small) + if (fd.redirect !== null) { + console.log(`[AGENT-DBG] resolving redirect: outer size=${fd.size} chunks=${fd.chunks.length}`) + fd = await resolveRedirect(agent, fd) + console.log(`[AGENT-DBG] resolved: size=${fd.size} chunks=${fd.chunks.length} digest=${Array.from(fd.digest.slice(0, 16)).map(x => x.toString(16).padStart(2, '0')).join('')}…`) + } + const resolvedFd = fd + // Group chunks by server, sequential within each server, parallel across servers + let downloaded = 0 + const byServer = new Map() + for (const chunk of resolvedFd.chunks) { + const srv = chunk.replicas[0]?.server ?? "" + if (!byServer.has(srv)) byServer.set(srv, []) + byServer.get(srv)!.push(chunk) + } + await Promise.all([...byServer.entries()].map(async ([srv, chunks]) => { + const server = parseXFTPServer(srv) + for (const chunk of chunks) { + const replica = chunk.replicas[0] + if (!replica) throw new Error("downloadFileRaw: chunk has no replicas") + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + const raw = await downloadXFTPChunkRaw(agent, server, kp.privateKey, replica.replicaId) + console.log(`[AGENT-DBG] chunk=${chunk.chunkNo} body.len=${raw.body.length} expectedChunkSize=${chunk.chunkSize} digest=${_dbgHex(chunk.digest, 32)} body.byteOffset=${raw.body.byteOffset} body.buffer.byteLength=${raw.body.buffer.byteLength}`) + await onRawChunk({ + chunkNo: chunk.chunkNo, + dhSecret: raw.dhSecret, + nonce: raw.nonce, + body: raw.body, + digest: chunk.digest + }) + downloaded += chunk.chunkSize + onProgress?.(downloaded, resolvedFd.size) + } + })) + return resolvedFd +} + +export async function downloadFile( + agent: XFTPClientAgent, + fd: FileDescription, + onProgress?: (downloaded: number, total: number) => void +): Promise { + const chunks: Uint8Array[] = [] + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + chunks[raw.chunkNo - 1] = decryptReceivedChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest + ) + }, {onProgress}) + const totalSize = chunks.reduce((s, c) => s + c.length, 0) + if (totalSize !== resolvedFd.size) throw new Error("downloadFile: file size mismatch") + const digest = sha512Streaming(chunks) + if (!digestEqual(digest, resolvedFd.digest)) throw new Error("downloadFile: file digest mismatch") + return processDownloadedFile(resolvedFd, chunks) +} + +async function resolveRedirect( + agent: XFTPClientAgent, + fd: FileDescription +): Promise { + const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length) + for (const chunk of fd.chunks) { + const replica = chunk.replicas[0] + if (!replica) throw new Error("resolveRedirect: chunk has no replicas") + const server = parseXFTPServer(replica.server) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + const data = await downloadXFTPChunk(agent, server, kp.privateKey, replica.replicaId, chunk.digest) + plaintextChunks[chunk.chunkNo - 1] = data + } + const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0) + if (totalSize !== fd.size) throw new Error("resolveRedirect: redirect file size mismatch") + const digest = sha512Streaming(plaintextChunks) + if (!digestEqual(digest, fd.digest)) throw new Error("resolveRedirect: redirect file digest mismatch") + const {content: yamlBytes} = processDownloadedFile(fd, plaintextChunks) + const yamlStr = new TextDecoder().decode(yamlBytes) + const innerFd = decodeFileDescription(yamlStr) + const innerErr = validateFileDescription(innerFd) + if (innerErr) throw new Error("resolveRedirect: inner description invalid: " + innerErr) + if (innerFd.size !== fd.redirect!.size) throw new Error("resolveRedirect: redirect size mismatch") + if (!digestEqual(innerFd.digest, fd.redirect!.digest)) throw new Error("resolveRedirect: redirect digest mismatch") + return innerFd +} + +// -- Delete + +export async function deleteFile(agent: XFTPClientAgent, sndDescription: FileDescription): Promise { + for (const chunk of sndDescription.chunks) { + const replica = chunk.replicas[0] + if (!replica) throw new Error("deleteFile: chunk has no replicas") + const server = parseXFTPServer(replica.server) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + await deleteXFTPChunk(agent, server, kp.privateKey, replica.replicaId) + } +} + +// -- Internal + +function _dbgHex(b: Uint8Array, n = 8): string { + return Array.from(b.slice(0, n)).map(x => x.toString(16).padStart(2, '0')).join('') +} + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/client.ts b/xftp-web/src/client.ts new file mode 100644 index 0000000000..52fce11e83 --- /dev/null +++ b/xftp-web/src/client.ts @@ -0,0 +1,447 @@ +// XFTP HTTP/2 client -- Simplex.FileTransfer.Client +// +// Connects to XFTP server via HTTP/2, performs web handshake, +// sends authenticated commands, receives responses. +// +// Uses node:http2 in Node.js (tests), fetch() in browsers. + +import { + encodeAuthTransmission, encodeTransmission, decodeTransmission, + XFTP_BLOCK_SIZE, initialXFTPVersion, currentXFTPVersion +} from "./protocol/transmission.js" +import { + encodeClientHello, encodeClientHandshake, decodeServerHandshake, + compatibleVRange +} from "./protocol/handshake.js" +import {verifyIdentityProof} from "./crypto/identity.js" +import {generateX25519KeyPair, encodePubKeyX25519, dh} from "./crypto/keys.js" +import { + encodeFNEW, encodeFADD, encodeFPUT, encodeFGET, encodeFDEL, encodePING, + decodeResponse, type FileResponse, type FileInfo, type XFTPErrorType +} from "./protocol/commands.js" +import {decryptReceivedChunk} from "./download.js" +import type {XFTPServer} from "./protocol/address.js" +import {formatXFTPServer} from "./protocol/address.js" +import {concatBytes} from "./protocol/encoding.js" +import {blockUnpad} from "./protocol/transmission.js" + +// -- Error types + +export class XFTPRetriableError extends Error { + constructor(public readonly errorType: string) { + super(humanReadableMessage(errorType)) + this.name = "XFTPRetriableError" + } +} + +export class XFTPPermanentError extends Error { + constructor(public readonly errorType: string, message: string) { + super(message) + this.name = "XFTPPermanentError" + } +} + +export function isRetriable(e: unknown): boolean { + if (e instanceof XFTPRetriableError) return true + if (e instanceof XFTPPermanentError) return false + if (e instanceof TypeError) return true // fetch network error + if (e instanceof Error && e.name === "AbortError") return true // timeout + return false +} + +export function categorizeError(e: unknown): Error { + if (e instanceof XFTPRetriableError || e instanceof XFTPPermanentError) return e + if (e instanceof TypeError) return new XFTPRetriableError("NETWORK") + if (e instanceof Error && e.name === "AbortError") return new XFTPRetriableError("TIMEOUT") + return e instanceof Error ? e : new Error(String(e)) +} + +export function humanReadableMessage(errorType: string | XFTPErrorType): string { + const t = typeof errorType === "string" ? errorType : errorType.type + switch (t) { + case "SESSION": return "Session expired, reconnecting..." + case "HANDSHAKE": return "Connection interrupted, reconnecting..." + case "NETWORK": return "Network error, retrying..." + case "TIMEOUT": return "Server timeout, retrying..." + case "AUTH": return "File is invalid, expired, or has been removed" + case "NO_FILE": return "File not found — it may have expired" + case "SIZE": return "File size exceeds server limit" + case "QUOTA": return "Server storage quota exceeded" + case "BLOCKED": return "File has been blocked by server" + case "DIGEST": return "File integrity check failed" + case "INTERNAL": return "Server internal error" + case "CMD": return "Protocol error" + default: return "Server error: " + t + } +} + +// -- Types + +export interface XFTPClient { + baseUrl: string + sessionId: Uint8Array + xftpVersion: number + transport: Transport +} + +export interface TransportConfig { + timeoutMs: number // default 30000 (30s), lower for tests +} + +const DEFAULT_TRANSPORT_CONFIG: TransportConfig = {timeoutMs: 30000} + +interface Transport { + post(body: Uint8Array, headers?: Record): Promise + close(): void +} + +// -- Transport implementations + +const isNode = typeof (globalThis as any).process !== "undefined" && (globalThis as any).process.versions?.node + +// In development mode, use HTTP proxy to avoid self-signed cert issues in browser +// __XFTP_PROXY_PORT__ is injected by vite build (null in production) +declare const __XFTP_PROXY_PORT__: string | null + +async function createTransport(baseUrl: string, config: TransportConfig): Promise { + if (isNode) { + return createNodeTransport(baseUrl, config) + } else { + return createBrowserTransport(baseUrl, config) + } +} + +async function createNodeTransport(baseUrl: string, config: TransportConfig): Promise { + // @ts-ignore node:http2 unavailable in browser tsconfig + const http2: any = await import("node:http2") + const session = http2.connect(baseUrl, {rejectUnauthorized: false}) + return { + async post(body: Uint8Array, headers?: Record): Promise { + return new Promise((resolve, reject) => { + const req = session.request({":method": "POST", ":path": "/", ...headers}) + req.setTimeout(config.timeoutMs, () => { + req.close() + reject(Object.assign(new Error("Request timeout"), {name: "AbortError"})) + }) + const chunks: any[] = [] + req.on("data", (chunk: any) => chunks.push(chunk)) + req.on("end", () => { + const B = (globalThis as any).Buffer + resolve(new Uint8Array(B.concat(chunks))) + }) + req.on("error", reject) + req.end(body) + }) + }, + close() { + session.close() + } + } +} + +function createBrowserTransport(baseUrl: string, config: TransportConfig): Transport { + // In dev mode, route through /xftp-proxy to avoid self-signed cert rejection + // __XFTP_PROXY_PORT__ is 'proxy' in dev mode (uses relative path), null in production + const effectiveUrl = typeof __XFTP_PROXY_PORT__ !== 'undefined' && __XFTP_PROXY_PORT__ + ? '/xftp-proxy' + : baseUrl + return { + async post(body: Uint8Array, headers?: Record): Promise { + const controller = new AbortController() + const timer = setTimeout(() => controller.abort(), config.timeoutMs) + try { + const resp = await fetch(effectiveUrl, { + method: "POST", + headers, + body: body as any, + signal: controller.signal + }) + if (!resp.ok) { + console.error('[XFTP] fetch %s failed: %d %s', effectiveUrl, resp.status, resp.statusText) + throw new Error(`Server request failed: ${resp.status} ${resp.statusText}`) + } + return new Uint8Array(await resp.arrayBuffer()) + } finally { + clearTimeout(timer) + } + }, + close() {} + } +} + +// -- Client agent (connection pool with Promise-based lock) + +interface ServerConnection { + client: Promise // resolves to connected client; replaced on reconnect + queue: Promise // tail of sequential command chain +} + +export interface XFTPClientAgent { + connections: Map + /** @internal Injectable for testing — defaults to connectXFTP */ + _connectFn: (server: XFTPServer) => Promise +} + +export function newXFTPAgent(): XFTPClientAgent { + return {connections: new Map(), _connectFn: connectXFTP} +} + +export function getXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): Promise { + const key = formatXFTPServer(server) + let conn = agent.connections.get(key) + if (!conn) { + const p = agent._connectFn(server) + conn = {client: p, queue: Promise.resolve()} + agent.connections.set(key, conn) + p.catch(() => { + const cur = agent.connections.get(key) + if (cur && cur.client === p) agent.connections.delete(key) + }) + } + return conn.client +} + +export function reconnectClient(agent: XFTPClientAgent, server: XFTPServer): Promise { + const key = formatXFTPServer(server) + const old = agent.connections.get(key) + old?.client.then(c => c.transport.close(), () => {}) + const p = agent._connectFn(server) + const conn: ServerConnection = {client: p, queue: old?.queue ?? Promise.resolve()} + agent.connections.set(key, conn) + p.catch(() => { + const cur = agent.connections.get(key) + if (cur && cur.client === p) agent.connections.delete(key) + }) + return p +} + +export function removeStaleConnection( + agent: XFTPClientAgent, server: XFTPServer, failedP: Promise +): void { + const key = formatXFTPServer(server) + const conn = agent.connections.get(key) + if (conn && conn.client === failedP) { + agent.connections.delete(key) + failedP.then(c => c.transport.close(), () => {}) + } +} + +export function closeXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): void { + const key = formatXFTPServer(server) + const conn = agent.connections.get(key) + if (conn) { + agent.connections.delete(key) + conn.client.then(c => c.transport.close(), () => {}) + } +} + +export function closeXFTPAgent(agent: XFTPClientAgent): void { + for (const conn of agent.connections.values()) { + conn.client.then(c => c.transport.close(), () => {}) + } + agent.connections.clear() +} + +// -- Connect + handshake + +async function connectXFTP(server: XFTPServer, config?: Partial): Promise { + const cfg: TransportConfig = {...DEFAULT_TRANSPORT_CONFIG, ...config} + const baseUrl = "https://" + server.host + ":" + server.port + const transport = await createTransport(baseUrl, cfg) + + try { + // Step 1: send client hello with web challenge + const challenge = new Uint8Array(32) + crypto.getRandomValues(challenge) + const clientHelloBytes = encodeClientHello({webChallenge: challenge}) + const shsBody = await transport.post(clientHelloBytes, {"xftp-web-hello": "1"}) + + // Step 2: decode + verify server handshake + const hs = decodeServerHandshake(shsBody) + if (!hs.webIdentityProof) { + console.error('[XFTP] Server did not provide web identity proof') + throw new Error("Server did not provide web identity proof") + } + const idOk = verifyIdentityProof({ + certChainDer: hs.certChainDer, + signedKeyDer: hs.signedKeyDer, + sigBytes: hs.webIdentityProof, + challenge, + sessionId: hs.sessionId, + keyHash: server.keyHash + }) + if (!idOk) { + console.error('[XFTP] Server identity verification failed') + throw new Error("Server identity verification failed") + } + + // Step 3: version negotiation + const vr = compatibleVRange(hs.xftpVersionRange, {minVersion: initialXFTPVersion, maxVersion: currentXFTPVersion}) + if (!vr) { + console.error('[XFTP] Incompatible server version: %o', hs.xftpVersionRange) + throw new Error("Incompatible server version") + } + const xftpVersion = vr.maxVersion + + // Step 4: send client handshake + const ack = await transport.post(encodeClientHandshake({xftpVersion, keyHash: server.keyHash}), {"xftp-handshake": "1"}) + if (ack.length !== 0) { + console.error('[XFTP] Non-empty handshake ack (%d bytes)', ack.length) + throw new Error("Server handshake failed") + } + + return {baseUrl, sessionId: hs.sessionId, xftpVersion, transport} + } catch (e) { + console.error('[XFTP] Connection to %s failed:', baseUrl, e) + transport.close() + throw e + } +} + +// -- Send command (single attempt, no retry) + +async function sendXFTPCommandOnce( + client: XFTPClient, + privateKey: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + chunkData?: Uint8Array +): Promise<{response: FileResponse, body: Uint8Array}> { + const corrId = new Uint8Array(0) + const block = encodeAuthTransmission(client.sessionId, corrId, entityId, cmdBytes, privateKey) + const reqBody = chunkData ? concatBytes(block, chunkData) : block + const fullResp = await client.transport.post(reqBody) + console.log(`[XFTP-DBG] sendOnce: fullResp.length=${fullResp.length} entityId=${_hex(entityId)} cmdTag=${cmdBytes[0]}`) + if (fullResp.length < XFTP_BLOCK_SIZE) { + console.error('[XFTP] Response too short: %d bytes (expected >= %d)', fullResp.length, XFTP_BLOCK_SIZE) + throw new Error("Server response too short") + } + const respBlock = fullResp.subarray(0, XFTP_BLOCK_SIZE) + const body = fullResp.subarray(XFTP_BLOCK_SIZE) + console.log(`[XFTP-DBG] sendOnce: body.length=${body.length} body.byteOffset=${body.byteOffset} body.buffer.byteLength=${body.buffer.byteLength}`) + // Detect padded error strings (HANDSHAKE, SESSION) before decodeTransmission + const raw = blockUnpad(respBlock) + if (raw.length < 20) { + const text = new TextDecoder().decode(raw) + if (/^[A-Z_]+$/.test(text)) { + throw new XFTPRetriableError(text) + } + } + const {command} = decodeTransmission(client.sessionId, respBlock) + const response = decodeResponse(command) + if (response.type === "FRErr") { + const err = response.err + if (err.type === "SESSION" || err.type === "HANDSHAKE") { + throw new XFTPRetriableError(err.type) + } + throw new XFTPPermanentError(err.type, humanReadableMessage(err)) + } + return {response, body} +} + +function _hex(b: Uint8Array, n = 8): string { + return Array.from(b.slice(0, n)).map(x => x.toString(16).padStart(2, '0')).join('') +} + +// -- Send command (with retry + reconnect) + +export async function sendXFTPCommand( + agent: XFTPClientAgent, + server: XFTPServer, + privateKey: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + chunkData?: Uint8Array, + maxRetries: number = 3 +): Promise<{response: FileResponse, body: Uint8Array}> { + let clientP = getXFTPServerClient(agent, server) + let client = await clientP + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + if (attempt > 1) console.log(`[XFTP-DBG] sendCmd: retry attempt=${attempt}/${maxRetries}`) + return await sendXFTPCommandOnce(client, privateKey, entityId, cmdBytes, chunkData) + } catch (e) { + console.log(`[XFTP-DBG] sendCmd: attempt=${attempt} failed: ${e instanceof Error ? e.message : String(e)} retriable=${isRetriable(e)}`) + if (!isRetriable(e)) { + throw categorizeError(e) + } + if (attempt === maxRetries) { + removeStaleConnection(agent, server, clientP) + throw categorizeError(e) + } + clientP = reconnectClient(agent, server) + client = await clientP + } + } + throw new Error("unreachable") +} + +// -- Command wrappers + +export async function createXFTPChunk( + agent: XFTPClientAgent, server: XFTPServer, spKey: Uint8Array, file: FileInfo, + rcvKeys: Uint8Array[], auth: Uint8Array | null = null +): Promise<{senderId: Uint8Array, recipientIds: Uint8Array[]}> { + const {response} = await sendXFTPCommand(agent, server, spKey, new Uint8Array(0), encodeFNEW(file, rcvKeys, auth)) + if (response.type !== "FRSndIds") throw new Error("unexpected response: " + response.type) + return {senderId: response.senderId, recipientIds: response.recipientIds} +} + +export async function addXFTPRecipients( + agent: XFTPClientAgent, server: XFTPServer, spKey: Uint8Array, fId: Uint8Array, rcvKeys: Uint8Array[] +): Promise { + const {response} = await sendXFTPCommand(agent, server, spKey, fId, encodeFADD(rcvKeys)) + if (response.type !== "FRRcvIds") throw new Error("unexpected response: " + response.type) + return response.recipientIds +} + +export async function uploadXFTPChunk( + agent: XFTPClientAgent, server: XFTPServer, spKey: Uint8Array, fId: Uint8Array, chunkData: Uint8Array +): Promise { + const {response} = await sendXFTPCommand(agent, server, spKey, fId, encodeFPUT(), chunkData) + if (response.type !== "FROk") throw new Error("unexpected response: " + response.type) +} + +export interface RawChunkResponse { + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array +} + +export async function downloadXFTPChunkRaw( + agent: XFTPClientAgent, server: XFTPServer, rpKey: Uint8Array, fId: Uint8Array +): Promise { + const {publicKey, privateKey} = generateX25519KeyPair() + const cmd = encodeFGET(encodePubKeyX25519(publicKey)) + const {response, body} = await sendXFTPCommand(agent, server, rpKey, fId, cmd) + if (response.type !== "FRFile") throw new Error("unexpected response: " + response.type) + const dhSecret = dh(response.rcvDhKey, privateKey) + console.log(`[XFTP-DBG] dlChunkRaw: body.length=${body.length} nonce=${_hex(response.nonce, 24)} dhSecret=${_hex(dhSecret)} body[0..8]=${_hex(body)} body[-8..]=${_hex(body.slice(-8))}`) + return {dhSecret, nonce: response.nonce, body} +} + +export async function downloadXFTPChunk( + agent: XFTPClientAgent, server: XFTPServer, rpKey: Uint8Array, fId: Uint8Array, digest?: Uint8Array +): Promise { + const {dhSecret, nonce, body} = await downloadXFTPChunkRaw(agent, server, rpKey, fId) + return decryptReceivedChunk(dhSecret, nonce, body, digest ?? null) +} + +export async function deleteXFTPChunk( + agent: XFTPClientAgent, server: XFTPServer, spKey: Uint8Array, sId: Uint8Array +): Promise { + const {response} = await sendXFTPCommand(agent, server, spKey, sId, encodeFDEL()) + if (response.type !== "FROk") throw new Error("unexpected response: " + response.type) +} + +export async function pingXFTP(agent: XFTPClientAgent, server: XFTPServer): Promise { + const client = await getXFTPServerClient(agent, server) + const corrId = new Uint8Array(0) + const block = encodeTransmission(client.sessionId, corrId, new Uint8Array(0), encodePING()) + const fullResp = await client.transport.post(block) + if (fullResp.length < XFTP_BLOCK_SIZE) throw new Error("pingXFTP: response too short") + const {command} = decodeTransmission(client.sessionId, fullResp.subarray(0, XFTP_BLOCK_SIZE)) + const response = decodeResponse(command) + if (response.type !== "FRPong") throw new Error("unexpected response: " + response.type) +} + diff --git a/xftp-web/src/crypto/digest.ts b/xftp-web/src/crypto/digest.ts new file mode 100644 index 0000000000..95bf2679c4 --- /dev/null +++ b/xftp-web/src/crypto/digest.ts @@ -0,0 +1,26 @@ +// Cryptographic hash functions matching Simplex.Messaging.Crypto (sha256Hash, sha512Hash). + +import sodium from "libsodium-wrappers-sumo" + +// SHA-256 digest (32 bytes) -- Crypto.hs:1006 +export function sha256(data: Uint8Array): Uint8Array { + return sodium.crypto_hash_sha256(data) +} + +// SHA-512 digest (64 bytes) -- Crypto.hs:1011 +export function sha512(data: Uint8Array): Uint8Array { + return sodium.crypto_hash_sha512(data) +} + +// Streaming SHA-512 over multiple chunks -- avoids copying large data into WASM memory at once. +// Internally segments chunks larger than 4MB to limit peak WASM memory usage. +export function sha512Streaming(chunks: Iterable): Uint8Array { + const SEG = 4 * 1024 * 1024 + const state = sodium.crypto_hash_sha512_init() as unknown as sodium.StateAddress + for (const chunk of chunks) { + for (let off = 0; off < chunk.length; off += SEG) { + sodium.crypto_hash_sha512_update(state, chunk.subarray(off, Math.min(off + SEG, chunk.length))) + } + } + return sodium.crypto_hash_sha512_final(state) +} diff --git a/xftp-web/src/crypto/file.ts b/xftp-web/src/crypto/file.ts new file mode 100644 index 0000000000..0088a680b8 --- /dev/null +++ b/xftp-web/src/crypto/file.ts @@ -0,0 +1,94 @@ +// File-level encryption/decryption matching Simplex.FileTransfer.Crypto. +// Operates on in-memory Uint8Array (no file I/O needed for browser). + +import {Decoder, concatBytes, encodeInt64, encodeString, decodeString, encodeMaybe, decodeMaybe} from "../protocol/encoding.js" +import {sbInit, sbEncryptChunk, sbDecryptTailTag, sbAuth} from "./secretbox.js" + +const AUTH_TAG_SIZE = 16n + +// -- FileHeader + +export interface FileHeader { + fileName: string + fileExtra: string | null +} + +// Encoding matches Haskell: smpEncode (fileName, fileExtra) +// = smpEncode fileName <> smpEncode fileExtra +// = encodeString(fileName) + encodeMaybe(encodeString, fileExtra) +export function encodeFileHeader(hdr: FileHeader): Uint8Array { + return concatBytes( + encodeString(hdr.fileName), + encodeMaybe(encodeString, hdr.fileExtra) + ) +} + +// Parse FileHeader from decrypted content (first 1024 bytes examined). +// Returns the parsed header and remaining bytes (file content). +export function parseFileHeader(data: Uint8Array): {header: FileHeader, rest: Uint8Array} { + const hdrLen = Math.min(1024, data.length) + const d = new Decoder(data.subarray(0, hdrLen)) + const fileName = decodeString(d) + const fileExtra = decodeMaybe(decodeString, d) + const consumed = d.offset() + return { + header: {fileName, fileExtra}, + rest: data.subarray(consumed) + } +} + +// -- Encryption (FileTransfer.Crypto:encryptFile) + +// Encrypt file content with streaming XSalsa20-Poly1305. +// Output format: encrypted(Int64 fileSize | fileHdr | source | '#' padding) | 16-byte auth tag +// +// source -- raw file content +// fileHdr -- pre-encoded FileHeader bytes (from encodeFileHeader) +// key -- 32-byte symmetric key +// nonce -- 24-byte nonce +// fileSize -- BigInt(fileHdr.length + source.length) +// encSize -- total output size (including 16-byte auth tag) +export function encryptFile( + source: Uint8Array, + fileHdr: Uint8Array, + key: Uint8Array, + nonce: Uint8Array, + fileSize: bigint, + encSize: bigint +): Uint8Array { + const state = sbInit(key, nonce) + const lenStr = encodeInt64(fileSize) + const padLen = Number(encSize - AUTH_TAG_SIZE - fileSize - 8n) + if (padLen < 0) throw new Error("encryptFile: encSize too small") + const hdr = sbEncryptChunk(state, concatBytes(lenStr, fileHdr)) + const encSource = sbEncryptChunk(state, source) + const padding = new Uint8Array(padLen) + padding.fill(0x23) // '#' + const encPad = sbEncryptChunk(state, padding) + const tag = sbAuth(state) + return concatBytes(hdr, encSource, encPad, tag) +} + +// -- Decryption (FileTransfer.Crypto:decryptChunks) + +// Decrypt one or more XFTP chunks into a FileHeader and file content. +// Chunks are concatenated, then decrypted as a single stream. +// +// encSize -- total encrypted size (including 16-byte auth tag) +// chunks -- downloaded XFTP chunk data (concatenated = full encrypted file) +// key -- 32-byte symmetric key +// nonce -- 24-byte nonce +export function decryptChunks( + encSize: bigint, + chunks: Uint8Array[], + key: Uint8Array, + nonce: Uint8Array +): {header: FileHeader, content: Uint8Array} { + if (chunks.length === 0) throw new Error("decryptChunks: empty chunks") + const paddedLen = encSize - AUTH_TAG_SIZE + const data = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + const {valid, content} = sbDecryptTailTag(key, nonce, paddedLen, data) + if (!valid) throw new Error("decryptChunks: invalid auth tag") + const {header, rest} = parseFileHeader(content) + return {header, content: rest} +} diff --git a/xftp-web/src/crypto/identity.ts b/xftp-web/src/crypto/identity.ts new file mode 100644 index 0000000000..9ba675f394 --- /dev/null +++ b/xftp-web/src/crypto/identity.ts @@ -0,0 +1,112 @@ +// Web handshake identity proof verification. +// +// Verifies server identity in the XFTP web handshake using the certificate +// chain from the protocol handshake (independent of TLS certificates). +// Ed25519 via libsodium, Ed448 via @noble/curves. + +import {Decoder, concatBytes} from "../protocol/encoding.js" +import {sha256} from "./digest.js" +import {verify, decodePubKeyEd25519, verifyEd448, decodePubKeyEd448} from "./keys.js" +import {chainIdCaCerts, extractSignedKey} from "../protocol/handshake.js" + +// -- ASN.1 DER helpers (minimal, for X.509 parsing) + +function derLen(d: Decoder): number { + const first = d.anyByte() + if (first < 0x80) return first + const n = first & 0x7f + if (n === 0 || n > 4) throw new Error("DER: unsupported length encoding") + let len = 0 + for (let i = 0; i < n; i++) len = (len << 8) | d.anyByte() + return len +} + +function derSkip(d: Decoder): void { + d.anyByte() + d.take(derLen(d)) +} + +function derReadElement(d: Decoder): Uint8Array { + const start = d.offset() + d.anyByte() + d.take(derLen(d)) + return d.buf.subarray(start, d.offset()) +} + +// -- X.509 certificate public key extraction + +// Extract SubjectPublicKeyInfo DER from a full X.509 certificate DER. +// Navigates: Certificate -> TBSCertificate -> skip version, serialNumber, +// signatureAlg, issuer, validity, subject -> SubjectPublicKeyInfo. +export function extractCertPublicKeyInfo(certDer: Uint8Array): Uint8Array { + const d = new Decoder(certDer) + if (d.anyByte() !== 0x30) throw new Error("X.509: expected Certificate SEQUENCE") + derLen(d) + if (d.anyByte() !== 0x30) throw new Error("X.509: expected TBSCertificate SEQUENCE") + derLen(d) + if (d.buf[d.offset()] === 0xa0) derSkip(d) // version [0] EXPLICIT (optional) + derSkip(d) // serialNumber + derSkip(d) // signature AlgorithmIdentifier + derSkip(d) // issuer + derSkip(d) // validity + derSkip(d) // subject + return derReadElement(d) // SubjectPublicKeyInfo +} + +// Detect certificate key algorithm from SPKI DER prefix. +// Ed25519 OID 1.3.101.112: byte 8 = 0x70, SPKI = 44 bytes +// Ed448 OID 1.3.101.113: byte 8 = 0x71, SPKI = 69 bytes +type CertKeyAlgorithm = 'ed25519' | 'ed448' + +function detectKeyAlgorithm(spki: Uint8Array): CertKeyAlgorithm { + if (spki.length === 44 && spki[8] === 0x70) return 'ed25519' + if (spki.length === 69 && spki[8] === 0x71) return 'ed448' + throw new Error("unsupported certificate key algorithm") +} + +// Extract raw public key from SPKI DER, auto-detecting Ed25519 or Ed448. +function extractCertRawKey(spki: Uint8Array): {key: Uint8Array, alg: CertKeyAlgorithm} { + const alg = detectKeyAlgorithm(spki) + const key = alg === 'ed25519' ? decodePubKeyEd25519(spki) : decodePubKeyEd448(spki) + return {key, alg} +} + +// Verify signature using the appropriate algorithm. +function verifySig(alg: CertKeyAlgorithm, key: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + return alg === 'ed25519' ? verify(key, sig, msg) : verifyEd448(key, sig, msg) +} + +// -- Identity proof verification + +export interface IdentityVerification { + certChainDer: Uint8Array[] + signedKeyDer: Uint8Array + sigBytes: Uint8Array + challenge: Uint8Array + sessionId: Uint8Array + keyHash: Uint8Array +} + +// Verify server identity proof from XFTP web handshake. +// 1. Certificate chain has valid structure (2-4 certs) +// 2. SHA-256(idCert) matches expected keyHash +// 3. Challenge signature valid: verify(leafKey, sigBytes, challenge || sessionId) +// 4. DH key signature valid: verify(leafKey, signedKey.signature, signedKey.objectDer) +export function verifyIdentityProof(v: IdentityVerification): boolean { + const cc = chainIdCaCerts(v.certChainDer) + if (cc.type !== 'valid') return false + const fp = sha256(cc.idCert) + if (!constantTimeEqual(fp, v.keyHash)) return false + const spki = extractCertPublicKeyInfo(cc.leafCert) + const {key, alg} = extractCertRawKey(spki) + if (!verifySig(alg, key, v.sigBytes, concatBytes(v.challenge, v.sessionId))) return false + const sk = extractSignedKey(v.signedKeyDer) + return verifySig(alg, key, sk.signature, sk.objectDer) +} + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/crypto/keys.ts b/xftp-web/src/crypto/keys.ts new file mode 100644 index 0000000000..21b217906f --- /dev/null +++ b/xftp-web/src/crypto/keys.ts @@ -0,0 +1,173 @@ +// Key generation, signing, DH -- Simplex.Messaging.Crypto (Ed25519/X25519/Ed448 functions). + +import sodium from "libsodium-wrappers-sumo" +await sodium.ready +import {ed448} from "@noble/curves/ed448" +import {sha256} from "./digest.js" +import {concatBytes} from "../protocol/encoding.js" + +// -- Ed25519 key generation (Crypto.hs:726 generateAuthKeyPair) + +export interface Ed25519KeyPair { + publicKey: Uint8Array // 32 bytes raw + privateKey: Uint8Array // 64 bytes (libsodium: seed || pubkey) +} + +export function generateEd25519KeyPair(): Ed25519KeyPair { + const kp = sodium.crypto_sign_keypair() + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// Generate from known 32-byte seed (deterministic, for testing/interop). +export function ed25519KeyPairFromSeed(seed: Uint8Array): Ed25519KeyPair { + const kp = sodium.crypto_sign_seed_keypair(seed) + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// -- X25519 key generation (Crypto.hs via generateKeyPair) + +export interface X25519KeyPair { + publicKey: Uint8Array // 32 bytes + privateKey: Uint8Array // 32 bytes +} + +export function generateX25519KeyPair(): X25519KeyPair { + const kp = sodium.crypto_box_keypair() + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// Derive X25519 keypair from raw 32-byte private key. +export function x25519KeyPairFromPrivate(privateKey: Uint8Array): X25519KeyPair { + const publicKey = sodium.crypto_scalarmult_base(privateKey) + return {publicKey, privateKey} +} + +// -- Ed25519 signing (Crypto.hs:1175 sign') + +export function sign(privateKey: Uint8Array, msg: Uint8Array): Uint8Array { + return sodium.crypto_sign_detached(msg, privateKey) +} + +// -- Ed25519 verification (Crypto.hs:1270 verify') + +export function verify(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + try { + return sodium.crypto_sign_verify_detached(sig, msg, publicKey) + } catch { + return false + } +} + +// -- X25519 Diffie-Hellman (Crypto.hs:1280 dh') + +export function dh(publicKey: Uint8Array, privateKey: Uint8Array): Uint8Array { + return sodium.crypto_scalarmult(privateKey, publicKey) +} + +// -- DER encoding for Ed25519 public keys (RFC 8410, SubjectPublicKeyInfo) +// SEQUENCE { SEQUENCE { OID 1.3.101.112 } BIT STRING { 0x00 <32 bytes> } } + +const ED25519_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00, +]) + +const X25519_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00, +]) + +export function encodePubKeyEd25519(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(ED25519_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyEd25519(der: Uint8Array): Uint8Array { + if (der.length !== 44) throw new Error("decodePubKeyEd25519: invalid length") + for (let i = 0; i < ED25519_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED25519_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyEd25519: invalid DER prefix") + } + return der.subarray(12) +} + +export function encodePubKeyX25519(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(X25519_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyX25519(der: Uint8Array): Uint8Array { + if (der.length !== 44) throw new Error("decodePubKeyX25519: invalid length") + for (let i = 0; i < X25519_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== X25519_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyX25519: invalid DER prefix") + } + return der.subarray(12) +} + +// -- DER encoding for Ed448 public keys (RFC 8410, SubjectPublicKeyInfo) +// SEQUENCE { SEQUENCE { OID 1.3.101.113 } BIT STRING { 0x00 <57 bytes> } } + +const ED448_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x43, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x71, 0x03, 0x3a, 0x00, +]) + +export function encodePubKeyEd448(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(ED448_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyEd448(der: Uint8Array): Uint8Array { + if (der.length !== 69) throw new Error("decodePubKeyEd448: invalid length") + for (let i = 0; i < ED448_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED448_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyEd448: invalid DER prefix") + } + return der.subarray(12) +} + +// -- Ed448 verification via @noble/curves (Crypto.hs:1270 verify') + +export function verifyEd448(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + try { + return ed448.verify(sig, msg, publicKey) + } catch { + return false + } +} + +// -- DER encoding for private keys (PKCS8 OneAsymmetricKey, RFC 8410) +// SEQUENCE { INTEGER 0, SEQUENCE { OID }, OCTET STRING { OCTET STRING { <32 bytes> } } } + +const ED25519_PRIVKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x04, 0x22, 0x04, 0x20, +]) + +const X25519_PRIVKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x04, 0x22, 0x04, 0x20, +]) + +export function encodePrivKeyEd25519(privateKey: Uint8Array): Uint8Array { + // privateKey is 64 bytes (libsodium: seed || pubkey), seed is first 32 bytes + const seed = privateKey.subarray(0, 32) + return concatBytes(ED25519_PRIVKEY_DER_PREFIX, seed) +} + +export function decodePrivKeyEd25519(der: Uint8Array): Uint8Array { + if (der.length !== 48) throw new Error("decodePrivKeyEd25519: invalid length") + for (let i = 0; i < ED25519_PRIVKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED25519_PRIVKEY_DER_PREFIX[i]) throw new Error("decodePrivKeyEd25519: invalid DER prefix") + } + // Returns 32-byte seed; call ed25519KeyPairFromSeed to get full keypair. + return der.subarray(16) +} + +export function encodePrivKeyX25519(privateKey: Uint8Array): Uint8Array { + return concatBytes(X25519_PRIVKEY_DER_PREFIX, privateKey) +} + +export function decodePrivKeyX25519(der: Uint8Array): Uint8Array { + if (der.length !== 48) throw new Error("decodePrivKeyX25519: invalid length") + for (let i = 0; i < X25519_PRIVKEY_DER_PREFIX.length; i++) { + if (der[i] !== X25519_PRIVKEY_DER_PREFIX[i]) throw new Error("decodePrivKeyX25519: invalid DER prefix") + } + return der.subarray(16) +} + +// -- KeyHash: SHA-256 of DER-encoded public key (Crypto.hs:981) + +export function keyHash(derPubKey: Uint8Array): Uint8Array { + return sha256(derPubKey) +} diff --git a/xftp-web/src/crypto/padding.ts b/xftp-web/src/crypto/padding.ts new file mode 100644 index 0000000000..d669c19e5c --- /dev/null +++ b/xftp-web/src/crypto/padding.ts @@ -0,0 +1,61 @@ +// Block padding matching Simplex.Messaging.Crypto (strict) and Simplex.Messaging.Crypto.Lazy. +// Strict: 2-byte BE length prefix + message + '#' fill. +// Lazy: 8-byte Int64 length prefix + message + '#' fill. + +import {encodeWord16, decodeWord16, encodeInt64, decodeInt64, Decoder} from "../protocol/encoding.js" + +const HASH = 0x23 // '#' + +// -- Strict pad/unPad (protocol messages) -- Crypto.hs:1077 + +export function pad(msg: Uint8Array, paddedLen: number): Uint8Array { + const len = msg.length + if (len > 65535) throw new Error("pad: message too large for Word16 length") + const fillLen = paddedLen - len - 2 + if (fillLen < 0) throw new Error("pad: message exceeds padded size") + const result = new Uint8Array(paddedLen) + const lenBytes = encodeWord16(len) + result.set(lenBytes, 0) + result.set(msg, 2) + result.fill(HASH, 2 + len) + return result +} + +export function unPad(padded: Uint8Array): Uint8Array { + if (padded.length < 2) throw new Error("unPad: input too short") + const d = new Decoder(padded) + const len = decodeWord16(d) + if (padded.length - 2 < len) throw new Error("unPad: invalid length") + return padded.subarray(2, 2 + len) +} + +// -- Lazy pad/unPad (file encryption) -- Crypto/Lazy.hs:70 + +export function padLazy(msg: Uint8Array, msgLen: bigint, padLen: bigint): Uint8Array { + const fillLen = padLen - msgLen - 8n + if (fillLen < 0n) throw new Error("padLazy: message exceeds padded size") + const totalLen = Number(padLen) + const result = new Uint8Array(totalLen) + const lenBytes = encodeInt64(msgLen) + result.set(lenBytes, 0) + result.set(msg.subarray(0, Number(msgLen)), 8) + result.fill(HASH, 8 + Number(msgLen)) + return result +} + +export function unPadLazy(padded: Uint8Array): Uint8Array { + return splitLen(padded).content +} + +// splitLen: extract 8-byte Int64 length and content -- Crypto/Lazy.hs:96 +// Does not fail if content is shorter than declared length (for chunked decryption). +export function splitLen(data: Uint8Array): {len: bigint; content: Uint8Array} { + if (data.length < 8) throw new Error("splitLen: input too short") + const d = new Decoder(data) + const len = decodeInt64(d) + if (len < 0n) throw new Error("splitLen: negative length") + const numLen = Number(len) + const available = data.length - 8 + const takeLen = Math.min(numLen, available) + return {len, content: data.subarray(8, 8 + takeLen)} +} diff --git a/xftp-web/src/crypto/secretbox.ts b/xftp-web/src/crypto/secretbox.ts new file mode 100644 index 0000000000..77c4184ece --- /dev/null +++ b/xftp-web/src/crypto/secretbox.ts @@ -0,0 +1,219 @@ +// Streaming XSalsa20-Poly1305 -- Simplex.Messaging.Crypto / Crypto.Lazy +// +// Libsodium-wrappers-sumo does not expose crypto_stream_xsalsa20_xor_ic, +// so the Salsa20/20 stream cipher core is implemented here. +// HSalsa20 uses libsodium's crypto_core_hsalsa20. +// Poly1305 uses libsodium's streaming crypto_onetimeauth_* API. + +import sodium, {StateAddress} from "libsodium-wrappers-sumo" +import {concatBytes} from "../protocol/encoding.js" +import {pad, unPad, padLazy, unPadLazy} from "./padding.js" + +// crypto_core_hsalsa20 exists at runtime but is missing from @types/libsodium-wrappers-sumo +const _sodium = sodium as unknown as { + crypto_core_hsalsa20(input: Uint8Array, key: Uint8Array, constant?: Uint8Array): Uint8Array +} & typeof sodium + +// -- Salsa20/20 stream cipher core + +function readU32LE(buf: Uint8Array, off: number): number { + return ((buf[off] | (buf[off + 1] << 8) | (buf[off + 2] << 16) | (buf[off + 3] << 24)) >>> 0) +} + +function writeU32LE(buf: Uint8Array, off: number, val: number): void { + buf[off] = val & 0xff + buf[off + 1] = (val >>> 8) & 0xff + buf[off + 2] = (val >>> 16) & 0xff + buf[off + 3] = (val >>> 24) & 0xff +} + +function rotl32(v: number, n: number): number { + return ((v << n) | (v >>> (32 - n))) >>> 0 +} + +const SIGMA_0 = 0x61707865 +const SIGMA_1 = 0x3320646e +const SIGMA_2 = 0x79622d32 +const SIGMA_3 = 0x6b206574 + +function salsa20Block(key: Uint8Array, nonce8: Uint8Array, counter: number): Uint8Array { + const k0 = readU32LE(key, 0), k1 = readU32LE(key, 4) + const k2 = readU32LE(key, 8), k3 = readU32LE(key, 12) + const k4 = readU32LE(key, 16), k5 = readU32LE(key, 20) + const k6 = readU32LE(key, 24), k7 = readU32LE(key, 28) + const n0 = readU32LE(nonce8, 0), n1 = readU32LE(nonce8, 4) + + const s0 = SIGMA_0, s1 = k0, s2 = k1, s3 = k2 + const s4 = k3, s5 = SIGMA_1, s6 = n0, s7 = n1 + const s8 = counter >>> 0, s9 = 0, s10 = SIGMA_2, s11 = k4 + const s12 = k5, s13 = k6, s14 = k7, s15 = SIGMA_3 + + let x0 = s0, x1 = s1, x2 = s2, x3 = s3 + let x4 = s4, x5 = s5, x6 = s6, x7 = s7 + let x8 = s8, x9 = s9, x10 = s10, x11 = s11 + let x12 = s12, x13 = s13, x14 = s14, x15 = s15 + + for (let i = 0; i < 10; i++) { + // Column round + x4 ^= rotl32((x0 + x12) >>> 0, 7); x8 ^= rotl32((x4 + x0) >>> 0, 9) + x12 ^= rotl32((x8 + x4) >>> 0, 13); x0 ^= rotl32((x12 + x8) >>> 0, 18) + x9 ^= rotl32((x5 + x1) >>> 0, 7); x13 ^= rotl32((x9 + x5) >>> 0, 9) + x1 ^= rotl32((x13 + x9) >>> 0, 13); x5 ^= rotl32((x1 + x13) >>> 0, 18) + x14 ^= rotl32((x10 + x6) >>> 0, 7); x2 ^= rotl32((x14 + x10) >>> 0, 9) + x6 ^= rotl32((x2 + x14) >>> 0, 13); x10 ^= rotl32((x6 + x2) >>> 0, 18) + x3 ^= rotl32((x15 + x11) >>> 0, 7); x7 ^= rotl32((x3 + x15) >>> 0, 9) + x11 ^= rotl32((x7 + x3) >>> 0, 13); x15 ^= rotl32((x11 + x7) >>> 0, 18) + // Row round + x1 ^= rotl32((x0 + x3) >>> 0, 7); x2 ^= rotl32((x1 + x0) >>> 0, 9) + x3 ^= rotl32((x2 + x1) >>> 0, 13); x0 ^= rotl32((x3 + x2) >>> 0, 18) + x6 ^= rotl32((x5 + x4) >>> 0, 7); x7 ^= rotl32((x6 + x5) >>> 0, 9) + x4 ^= rotl32((x7 + x6) >>> 0, 13); x5 ^= rotl32((x4 + x7) >>> 0, 18) + x11 ^= rotl32((x10 + x9) >>> 0, 7); x8 ^= rotl32((x11 + x10) >>> 0, 9) + x9 ^= rotl32((x8 + x11) >>> 0, 13); x10 ^= rotl32((x9 + x8) >>> 0, 18) + x12 ^= rotl32((x15 + x14) >>> 0, 7); x13 ^= rotl32((x12 + x15) >>> 0, 9) + x14 ^= rotl32((x13 + x12) >>> 0, 13); x15 ^= rotl32((x14 + x13) >>> 0, 18) + } + + const out = new Uint8Array(64) + writeU32LE(out, 0, (x0 + s0) >>> 0); writeU32LE(out, 4, (x1 + s1) >>> 0) + writeU32LE(out, 8, (x2 + s2) >>> 0); writeU32LE(out, 12, (x3 + s3) >>> 0) + writeU32LE(out, 16, (x4 + s4) >>> 0); writeU32LE(out, 20, (x5 + s5) >>> 0) + writeU32LE(out, 24, (x6 + s6) >>> 0); writeU32LE(out, 28, (x7 + s7) >>> 0) + writeU32LE(out, 32, (x8 + s8) >>> 0); writeU32LE(out, 36, (x9 + s9) >>> 0) + writeU32LE(out, 40, (x10 + s10) >>> 0); writeU32LE(out, 44, (x11 + s11) >>> 0) + writeU32LE(out, 48, (x12 + s12) >>> 0); writeU32LE(out, 52, (x13 + s13) >>> 0) + writeU32LE(out, 56, (x14 + s14) >>> 0); writeU32LE(out, 60, (x15 + s15) >>> 0) + return out +} + +// -- Streaming state + +export interface SbState { + _subkey: Uint8Array + _nonce8: Uint8Array + _counter: number + _ksBuf: Uint8Array + _ksOff: number + _authState: StateAddress +} + +export function sbInit(key: Uint8Array, nonce: Uint8Array): SbState { + // Double HSalsa20 cascade matching Haskell cryptonite XSalsa20 (Crypto.hs:xSalsa20): + // subkey1 = HSalsa20(key, zeros16) + // subkey2 = HSalsa20(subkey1, nonce[0:16]) + // keystream = Salsa20(subkey2, nonce[16:24]) + const zeros16 = new Uint8Array(16) + const subkey1 = _sodium.crypto_core_hsalsa20(zeros16, key) + const subkey = _sodium.crypto_core_hsalsa20(nonce.subarray(0, 16), subkey1) + const nonce8 = new Uint8Array(nonce.subarray(16, 24)) + const block0 = salsa20Block(subkey, nonce8, 0) + const poly1305Key = block0.subarray(0, 32) + const ksBuf = new Uint8Array(block0.subarray(32)) + const authState = sodium.crypto_onetimeauth_init(poly1305Key) + return {_subkey: subkey, _nonce8: nonce8, _counter: 1, _ksBuf: ksBuf, _ksOff: 0, _authState: authState} +} + +export function cbInit(dhSecret: Uint8Array, nonce: Uint8Array): SbState { + return sbInit(dhSecret, nonce) +} + +export function sbEncryptChunk(state: SbState, chunk: Uint8Array): Uint8Array { + const cipher = xorKeystream(state, chunk) + sodium.crypto_onetimeauth_update(state._authState, cipher) + return cipher +} + +export function sbDecryptChunk(state: SbState, chunk: Uint8Array): Uint8Array { + sodium.crypto_onetimeauth_update(state._authState, chunk) + return xorKeystream(state, chunk) +} + +export function sbAuth(state: SbState): Uint8Array { + return sodium.crypto_onetimeauth_final(state._authState) +} + +// -- High-level: tail tag (tag appended) + +export function sbEncryptTailTag( + key: Uint8Array, nonce: Uint8Array, + data: Uint8Array, len: bigint, padLen: bigint +): Uint8Array { + const padded = padLazy(data, len, padLen) + const state = sbInit(key, nonce) + const cipher = sbEncryptChunk(state, padded) + const tag = sbAuth(state) + return concatBytes(cipher, tag) +} + +export function sbDecryptTailTag( + key: Uint8Array, nonce: Uint8Array, + paddedLen: bigint, data: Uint8Array +): {valid: boolean; content: Uint8Array} { + const pLen = Number(paddedLen) + const cipher = data.subarray(0, pLen) + const providedTag = data.subarray(pLen) + const state = sbInit(key, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + const valid = providedTag.length === 16 && constantTimeEqual(providedTag, computedTag) + const content = unPadLazy(plaintext) + return {valid, content} +} + +// -- Tag-prepended secretbox (Haskell Crypto.hs:cryptoBox) + +export function cryptoBox(key: Uint8Array, nonce: Uint8Array, msg: Uint8Array): Uint8Array { + const state = sbInit(key, nonce) + const cipher = sbEncryptChunk(state, msg) + const tag = sbAuth(state) + return concatBytes(tag, cipher) +} + +export function cbEncrypt( + dhSecret: Uint8Array, nonce: Uint8Array, + msg: Uint8Array, padLen: number +): Uint8Array { + return cryptoBox(dhSecret, nonce, pad(msg, padLen)) +} + +export function cbDecrypt( + dhSecret: Uint8Array, nonce: Uint8Array, + packet: Uint8Array +): Uint8Array { + const tag = packet.subarray(0, 16) + const cipher = packet.subarray(16) + const state = sbInit(dhSecret, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + if (!constantTimeEqual(tag, computedTag)) throw new Error("secretbox: authentication failed") + return unPad(plaintext) +} + +// -- Internal + +function xorKeystream(state: SbState, data: Uint8Array): Uint8Array { + const result = new Uint8Array(data.length) + let off = 0 + while (off < data.length) { + if (state._ksOff >= state._ksBuf.length) { + state._ksBuf = salsa20Block(state._subkey, state._nonce8, state._counter++) + state._ksOff = 0 + } + const available = state._ksBuf.length - state._ksOff + const needed = data.length - off + const n = Math.min(available, needed) + for (let i = 0; i < n; i++) { + result[off + i] = data[off + i] ^ state._ksBuf[state._ksOff + i] + } + state._ksOff += n + off += n + } + return result +} + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/download.ts b/xftp-web/src/download.ts new file mode 100644 index 0000000000..e194a3f864 --- /dev/null +++ b/xftp-web/src/download.ts @@ -0,0 +1,76 @@ +// XFTP download pipeline -- integration of protocol + crypto layers. +// +// Ties together: DH key exchange (keys), transport decryption (client), +// file-level decryption (file), chunk sizing (chunks), digest verification. +// +// Usage: +// 1. Parse FileDescription from YAML (description.ts) +// 2. For each chunk replica: +// a. generateX25519KeyPair() -> ephemeral DH keypair +// b. encodeFGET(dhPub) -> FGET command +// c. encodeAuthTransmission(...) -> padded block (send to server) +// d. decodeTransmission(responseBlock) -> raw response +// e. decodeResponse(raw) -> FRFile { rcvDhKey, nonce } +// f. processFileResponse(rcvPrivKey, rcvDhKey, nonce) -> dhSecret +// g. decryptReceivedChunk(dhSecret, nonce, encData, digest) -> plaintext +// 3. processDownloadedFile(fd, plaintextChunks) -> { header, content } + +import {dh} from "./crypto/keys.js" +import {sha256} from "./crypto/digest.js" +import {decryptChunks, type FileHeader} from "./crypto/file.js" +import {decryptTransportChunk} from "./protocol/client.js" +import type {FileDescription} from "./protocol/description.js" + +// -- Process FRFile response + +// Derive transport decryption secret from FRFile response parameters. +// Uses DH(serverDhKey, recipientPrivKey) to produce shared secret. +export function processFileResponse( + recipientPrivKey: Uint8Array, // Ephemeral X25519 private key (32 bytes) + serverDhKey: Uint8Array, // rcvDhKey from FRFile response (32 bytes) +): Uint8Array { + return dh(serverDhKey, recipientPrivKey) +} + +// -- Decrypt a single received chunk + +// Decrypt transport-encrypted chunk data and verify SHA-256 digest. +// Returns decrypted content or throws on auth tag / digest failure. +export function decryptReceivedChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + encData: Uint8Array, + expectedDigest: Uint8Array | null +): Uint8Array { + const providedTag = encData.slice(encData.length - 16) + const {valid, content} = decryptTransportChunk(dhSecret, cbNonce, encData) + if (!valid) throw new Error("transport auth tag verification failed") + if (expectedDigest !== null) { + const actual = sha256(content) + if (!digestEqual(actual, expectedDigest)) { + throw new Error("chunk digest mismatch") + } + } + return content +} + +// -- Full download pipeline + +// Process downloaded file: concatenate transport-decrypted chunks, +// then file-level decrypt using key/nonce from file description. +// Returns parsed FileHeader and file content. +export function processDownloadedFile( + fd: FileDescription, + plaintextChunks: Uint8Array[] +): {header: FileHeader, content: Uint8Array} { + return decryptChunks(BigInt(fd.size), plaintextChunks, fd.key, fd.nonce) +} + +// -- Internal + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/protocol/address.ts b/xftp-web/src/protocol/address.ts new file mode 100644 index 0000000000..a4d4c05f61 --- /dev/null +++ b/xftp-web/src/protocol/address.ts @@ -0,0 +1,54 @@ +// XFTP server address parsing/formatting -- Simplex.Messaging.Protocol (ProtocolServer) +// +// Parses/formats server address strings of the form: +// xftp://@[,,...][:] +// +// KeyHash is base64url-encoded SHA-256 fingerprint of the identity certificate. + +import {base64urlEncode} from "./description.js" + +export interface XFTPServer { + keyHash: Uint8Array // 32-byte SHA-256 fingerprint (decoded from base64url) + host: string // primary hostname + port: string // port number (default "443") +} + +// Decode base64url (RFC 4648 section 5) to Uint8Array. +function base64urlDecode(s: string): Uint8Array { + // Convert base64url to standard base64 + let b64 = s.replace(/-/g, '+').replace(/_/g, '/') + // Add padding if needed + while (b64.length % 4 !== 0) b64 += '=' + const bin = atob(b64) + const bytes = new Uint8Array(bin.length) + for (let i = 0; i < bin.length; i++) bytes[i] = bin.charCodeAt(i) + return bytes +} + +// Parse an XFTP server address string. +// Format: xftp://@[,,...][:] +export function parseXFTPServer(address: string): XFTPServer { + const m = address.match(/^xftp:\/\/([A-Za-z0-9_-]+={0,2})@(.+)$/) + if (!m) throw new Error("parseXFTPServer: invalid address format") + const keyHash = base64urlDecode(m[1]) + if (keyHash.length !== 32) throw new Error("parseXFTPServer: keyHash must be 32 bytes") + const hostPart = m[2] + // Take the first host (before any comma), then split port from that + const firstHost = hostPart.split(',')[0] + const colonIdx = firstHost.lastIndexOf(':') + let host: string + let port: string + if (colonIdx > 0) { + host = firstHost.substring(0, colonIdx) + port = firstHost.substring(colonIdx + 1) + } else { + host = firstHost + port = "443" + } + return {keyHash, host, port} +} + +// Format an XFTPServer back to its URI string representation. +export function formatXFTPServer(srv: XFTPServer): string { + return "xftp://" + base64urlEncode(srv.keyHash) + "@" + srv.host + ":" + srv.port +} diff --git a/xftp-web/src/protocol/chunks.ts b/xftp-web/src/protocol/chunks.ts new file mode 100644 index 0000000000..fdbac31d76 --- /dev/null +++ b/xftp-web/src/protocol/chunks.ts @@ -0,0 +1,86 @@ +// XFTP chunk sizing -- Simplex.FileTransfer.Chunks + Client +// +// Computes chunk sizes for file uploads, chunk specifications with offsets, +// and per-chunk SHA-256 digests. + +import {kb, mb} from "./description.js" +import {sha256} from "../crypto/digest.js" + +// -- Chunk size constants (Simplex.FileTransfer.Chunks) + +export const chunkSize0 = kb(64) // 65536 +export const chunkSize1 = kb(256) // 262144 +export const chunkSize2 = mb(1) // 1048576 +export const chunkSize3 = mb(4) // 4194304 + +export const serverChunkSizes = [chunkSize0, chunkSize1, chunkSize2, chunkSize3] + +// -- Size constants + +export const fileSizeLen = 8 // 64-bit file size prefix (padLazy) +export const authTagSize = 16 // Poly1305 authentication tag + +// -- Chunk sizing (Simplex.FileTransfer.Client.prepareChunkSizes) + +function size34(sz: number): number { + return Math.floor((sz * 3) / 4) +} + +export function prepareChunkSizes(payloadSize: number): number[] { + let smallSize: number, bigSize: number + if (payloadSize > size34(chunkSize3)) { + smallSize = chunkSize2; bigSize = chunkSize3 + } else if (payloadSize > size34(chunkSize2)) { + smallSize = chunkSize1; bigSize = chunkSize2 + } else { + smallSize = chunkSize0; bigSize = chunkSize1 + } + function prepareSizes(size: number): number[] { + if (size === 0) return [] + if (size >= bigSize) { + const n1 = Math.floor(size / bigSize) + const remSz = size % bigSize + return new Array(n1).fill(bigSize).concat(prepareSizes(remSz)) + } + if (size > size34(bigSize)) return [bigSize] + const n2 = Math.floor(size / smallSize) + const remSz2 = size % smallSize + return new Array(remSz2 === 0 ? n2 : n2 + 1).fill(smallSize) + } + return prepareSizes(payloadSize) +} + +// Find the smallest server chunk size that fits the payload. +// Returns null if payload exceeds the largest chunk size. +// Matches Haskell singleChunkSize. +export function singleChunkSize(payloadSize: number): number | null { + for (const sz of serverChunkSizes) { + if (payloadSize <= sz) return sz + } + return null +} + +// -- Chunk specs + +export interface ChunkSpec { + chunkOffset: number + chunkSize: number +} + +// Generate chunk specifications with byte offsets. +// Matches Haskell prepareChunkSpecs (without filePath). +export function prepareChunkSpecs(chunkSizes: number[]): ChunkSpec[] { + const specs: ChunkSpec[] = [] + let offset = 0 + for (const size of chunkSizes) { + specs.push({chunkOffset: offset, chunkSize: size}) + offset += size + } + return specs +} + +// -- Chunk digest + +export function getChunkDigest(chunk: Uint8Array): Uint8Array { + return sha256(chunk) +} diff --git a/xftp-web/src/protocol/client.ts b/xftp-web/src/protocol/client.ts new file mode 100644 index 0000000000..1524f4235b --- /dev/null +++ b/xftp-web/src/protocol/client.ts @@ -0,0 +1,95 @@ +// XFTP client protocol operations -- Simplex.FileTransfer.Client + Crypto +// +// CbAuthenticator-based command authentication and transport-level +// chunk encryption/decryption for XFTP downloads. + +import {concatBytes} from "./encoding.js" +import {dh} from "../crypto/keys.js" +import {sha512} from "../crypto/digest.js" +import { + cbInit, sbEncryptChunk, sbDecryptChunk, sbAuth, cryptoBox +} from "../crypto/secretbox.js" + +// -- Constants + +export const cbAuthenticatorSize = 80 // SHA512 (64) + authTag (16) + +// -- CbAuthenticator (Crypto.hs:cbAuthenticate) + +// Create crypto_box authenticator for a message. +// Encrypts sha512(msg) with NaCl crypto_box using DH(peerPubKey, ownPrivKey). +// Returns 80 bytes (16-byte tag prepended + 64-byte encrypted hash). +export function cbAuthenticate( + peerPubKey: Uint8Array, + ownPrivKey: Uint8Array, + nonce: Uint8Array, + msg: Uint8Array +): Uint8Array { + const dhSecret = dh(peerPubKey, ownPrivKey) + const hash = sha512(msg) + return cryptoBox(dhSecret, nonce, hash) +} + +// Verify crypto_box authenticator for a message. +// Decrypts authenticator with DH(peerPubKey, ownPrivKey), checks against sha512(msg). +export function cbVerify( + peerPubKey: Uint8Array, + ownPrivKey: Uint8Array, + nonce: Uint8Array, + authenticator: Uint8Array, + msg: Uint8Array +): boolean { + if (authenticator.length !== cbAuthenticatorSize) return false + const dhSecret = dh(peerPubKey, ownPrivKey) + const tag = authenticator.subarray(0, 16) + const cipher = authenticator.subarray(16) + const state = cbInit(dhSecret, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + if (!constantTimeEqual(tag, computedTag)) return false + const expectedHash = sha512(msg) + return constantTimeEqual(plaintext, expectedHash) +} + +// -- Transport-level chunk encryption/decryption + +// Encrypt a chunk for transport (tag-appended format). +// Matches sendEncFile in FileTransfer.Transport: +// ciphertext streamed via sbEncryptChunk, then 16-byte auth tag appended. +export function encryptTransportChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + plainData: Uint8Array +): Uint8Array { + const state = cbInit(dhSecret, cbNonce) + const cipher = sbEncryptChunk(state, plainData) + const tag = sbAuth(state) + return concatBytes(cipher, tag) +} + +// Decrypt a transport-encrypted chunk (tag-appended format). +// Matches receiveEncFile / receiveSbFile in FileTransfer.Transport: +// ciphertext decrypted via sbDecryptChunk, then 16-byte auth tag verified. +export function decryptTransportChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + encData: Uint8Array +): {valid: boolean, content: Uint8Array, computedTag: Uint8Array} { + if (encData.length < 16) return {valid: false, content: new Uint8Array(0), computedTag: new Uint8Array(0)} + const cipher = encData.subarray(0, encData.length - 16) + const providedTag = encData.subarray(encData.length - 16) + const state = cbInit(dhSecret, cbNonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + const valid = constantTimeEqual(providedTag, computedTag) + return {valid, content: plaintext, computedTag} +} + +// -- Internal + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/protocol/commands.ts b/xftp-web/src/protocol/commands.ts new file mode 100644 index 0000000000..3ca43541fc --- /dev/null +++ b/xftp-web/src/protocol/commands.ts @@ -0,0 +1,157 @@ +// Protocol commands and responses -- Simplex.FileTransfer.Protocol +// +// Commands (client -> server): FNEW, FADD, FPUT, FDEL, FGET, FACK, PING +// Responses (server -> client): SIDS, RIDS, FILE, OK, ERR, PONG + +import { + Decoder, concatBytes, + encodeBytes, decodeBytes, + encodeWord32, + encodeNonEmpty, decodeNonEmpty, + encodeMaybe +} from "./encoding.js" +import {decodePubKeyX25519} from "../crypto/keys.js" + +// -- Types + +export interface FileInfo { + sndKey: Uint8Array // DER-encoded Ed25519 public key (44 bytes) + size: number // Word32 + digest: Uint8Array // SHA-256 digest (32 bytes) +} + +export type CommandError = "UNKNOWN" | "SYNTAX" | "PROHIBITED" | "NO_AUTH" | "HAS_AUTH" | "NO_ENTITY" + +export type XFTPErrorType = + | {type: "BLOCK"} | {type: "SESSION"} | {type: "HANDSHAKE"} + | {type: "CMD", cmdErr: CommandError} + | {type: "AUTH"} + | {type: "BLOCKED", blockInfo: string} + | {type: "SIZE"} | {type: "QUOTA"} | {type: "DIGEST"} | {type: "CRYPTO"} + | {type: "NO_FILE"} | {type: "HAS_FILE"} | {type: "FILE_IO"} + | {type: "TIMEOUT"} | {type: "INTERNAL"} + +export type FileResponse = + | {type: "FRSndIds", senderId: Uint8Array, recipientIds: Uint8Array[]} + | {type: "FRRcvIds", recipientIds: Uint8Array[]} + | {type: "FRFile", rcvDhKey: Uint8Array, nonce: Uint8Array} + | {type: "FROk"} + | {type: "FRErr", err: XFTPErrorType} + | {type: "FRPong"} + +// -- FileInfo encoding + +// smpEncode FileInfo {sndKey, size, digest} = smpEncode (sndKey, size, digest) +export function encodeFileInfo(fi: FileInfo): Uint8Array { + return concatBytes(encodeBytes(fi.sndKey), encodeWord32(fi.size), encodeBytes(fi.digest)) +} + +// -- Command encoding (encodeProtocol) + +const SPACE = new Uint8Array([0x20]) + +function ascii(s: string): Uint8Array { + const buf = new Uint8Array(s.length) + for (let i = 0; i < s.length; i++) buf[i] = s.charCodeAt(i) + return buf +} + +export function encodeFNEW(file: FileInfo, rcvKeys: Uint8Array[], auth: Uint8Array | null): Uint8Array { + return concatBytes( + ascii("FNEW"), SPACE, + encodeFileInfo(file), + encodeNonEmpty(encodeBytes, rcvKeys), + encodeMaybe(encodeBytes, auth) + ) +} + +export function encodeFADD(rcvKeys: Uint8Array[]): Uint8Array { + return concatBytes(ascii("FADD"), SPACE, encodeNonEmpty(encodeBytes, rcvKeys)) +} + +export function encodeFPUT(): Uint8Array { return ascii("FPUT") } + +export function encodeFDEL(): Uint8Array { return ascii("FDEL") } + +export function encodeFGET(rcvDhKey: Uint8Array): Uint8Array { + return concatBytes(ascii("FGET"), SPACE, encodeBytes(rcvDhKey)) +} + +export function encodePING(): Uint8Array { return ascii("PING") } + +// -- Response decoding + +function readTag(d: Decoder): string { + const start = d.offset() + while (d.remaining() > 0) { + if (d.buf[d.offset()] === 0x20 || d.buf[d.offset()] === 0x0a) break + d.anyByte() + } + let s = "" + for (let i = start; i < d.offset(); i++) s += String.fromCharCode(d.buf[i]) + return s +} + +function readSpace(d: Decoder): void { + if (d.anyByte() !== 0x20) throw new Error("expected space") +} + +function decodeCommandError(s: string): CommandError { + if (s === "UNKNOWN" || s === "SYNTAX" || s === "PROHIBITED" || s === "NO_AUTH" || s === "HAS_AUTH" || s === "NO_ENTITY") return s + if (s === "NO_QUEUE") return "NO_ENTITY" + throw new Error("bad CommandError: " + s) +} + +export function decodeXFTPError(d: Decoder): XFTPErrorType { + const s = readTag(d) + switch (s) { + case "BLOCK": return {type: "BLOCK"} + case "SESSION": return {type: "SESSION"} + case "HANDSHAKE": return {type: "HANDSHAKE"} + case "CMD": { readSpace(d); return {type: "CMD", cmdErr: decodeCommandError(readTag(d))} } + case "AUTH": return {type: "AUTH"} + case "BLOCKED": { + readSpace(d) + const rest = d.takeAll() + let info = "" + for (let i = 0; i < rest.length; i++) info += String.fromCharCode(rest[i]) + return {type: "BLOCKED", blockInfo: info} + } + case "SIZE": return {type: "SIZE"} + case "QUOTA": return {type: "QUOTA"} + case "DIGEST": return {type: "DIGEST"} + case "CRYPTO": return {type: "CRYPTO"} + case "NO_FILE": return {type: "NO_FILE"} + case "HAS_FILE": return {type: "HAS_FILE"} + case "FILE_IO": return {type: "FILE_IO"} + case "TIMEOUT": return {type: "TIMEOUT"} + case "INTERNAL": return {type: "INTERNAL"} + default: throw new Error("bad XFTPErrorType: " + s) + } +} + +export function decodeResponse(data: Uint8Array): FileResponse { + const d = new Decoder(data) + const tagStr = readTag(d) + switch (tagStr) { + case "SIDS": { + readSpace(d) + const senderId = decodeBytes(d) + return {type: "FRSndIds", senderId, recipientIds: decodeNonEmpty(decodeBytes, d)} + } + case "RIDS": { + readSpace(d) + return {type: "FRRcvIds", recipientIds: decodeNonEmpty(decodeBytes, d)} + } + case "FILE": { + readSpace(d) + const rcvDhKey = decodePubKeyX25519(decodeBytes(d)) + const nonce = d.take(24) + return {type: "FRFile", rcvDhKey, nonce} + } + case "OK": return {type: "FROk"} + case "ERR": { readSpace(d); return {type: "FRErr", err: decodeXFTPError(d)} } + case "PONG": return {type: "FRPong"} + default: throw new Error("unknown response: " + tagStr) + } +} diff --git a/xftp-web/src/protocol/description.ts b/xftp-web/src/protocol/description.ts new file mode 100644 index 0000000000..c58151f478 --- /dev/null +++ b/xftp-web/src/protocol/description.ts @@ -0,0 +1,363 @@ +// XFTP file description encoding/decoding -- Simplex.FileTransfer.Description +// +// Handles YAML-encoded file descriptions matching Haskell Data.Yaml output format. +// Base64url encoding matches Haskell Data.ByteString.Base64.URL.encode (with padding). + +// -- Base64url (RFC 4648 section 5) with '=' padding + +const B64URL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" +const B64_DECODE = new Uint8Array(128) +B64_DECODE.fill(0xff) +for (let i = 0; i < 64; i++) B64_DECODE[B64URL.charCodeAt(i)] = i + +export function base64urlEncode(data: Uint8Array): string { + let result = "" + const len = data.length + let i = 0 + for (; i + 2 < len; i += 3) { + const b0 = data[i], b1 = data[i + 1], b2 = data[i + 2] + result += B64URL[b0 >>> 2] + result += B64URL[((b0 & 3) << 4) | (b1 >>> 4)] + result += B64URL[((b1 & 15) << 2) | (b2 >>> 6)] + result += B64URL[b2 & 63] + } + if (i < len) { + const b0 = data[i] + result += B64URL[b0 >>> 2] + if (i + 1 < len) { + const b1 = data[i + 1] + result += B64URL[((b0 & 3) << 4) | (b1 >>> 4)] + result += B64URL[(b1 & 15) << 2] + result += "=" + } else { + result += B64URL[(b0 & 3) << 4] + result += "==" + } + } + return result +} + +export function base64urlDecode(s: string): Uint8Array { + let end = s.length + while (end > 0 && s.charCodeAt(end - 1) === 0x3d) end-- // strip '=' + const n = end + const out = new Uint8Array((n * 3) >>> 2) + let j = 0, i = 0 + for (; i + 3 < n; i += 4) { + const a = B64_DECODE[s.charCodeAt(i)], b = B64_DECODE[s.charCodeAt(i + 1)] + const c = B64_DECODE[s.charCodeAt(i + 2)], d = B64_DECODE[s.charCodeAt(i + 3)] + out[j++] = (a << 2) | (b >>> 4) + out[j++] = ((b & 15) << 4) | (c >>> 2) + out[j++] = ((c & 3) << 6) | d + } + if (n - i >= 2) { + const a = B64_DECODE[s.charCodeAt(i)], b = B64_DECODE[s.charCodeAt(i + 1)] + out[j++] = (a << 2) | (b >>> 4) + if (n - i >= 3) { + const c = B64_DECODE[s.charCodeAt(i + 2)] + out[j++] = ((b & 15) << 4) | (c >>> 2) + } + } + return out +} + +// -- FileSize encoding/decoding + +export const kb = (n: number): number => n * 1024 +export const mb = (n: number): number => n * 1048576 +export const gb = (n: number): number => n * 1073741824 + +export function encodeFileSize(bytes: number): string { + const ks = Math.floor(bytes / 1024) + if (bytes % 1024 !== 0) return String(bytes) + const ms = Math.floor(ks / 1024) + if (ks % 1024 !== 0) return ks + "kb" + const gs = Math.floor(ms / 1024) + if (ms % 1024 !== 0) return ms + "mb" + return gs + "gb" +} + +export function decodeFileSize(s: string): number { + if (s.endsWith("gb")) return parseInt(s) * 1073741824 + if (s.endsWith("mb")) return parseInt(s) * 1048576 + if (s.endsWith("kb")) return parseInt(s) * 1024 + return parseInt(s) +} + +// -- Types + +export type FileParty = "recipient" | "sender" + +export interface FileDescription { + party: FileParty + size: number // total file size in bytes + digest: Uint8Array // SHA-512 file digest + key: Uint8Array // SbKey (32 bytes) + nonce: Uint8Array // CbNonce (24 bytes) + chunkSize: number // default chunk size in bytes + chunks: FileChunk[] + redirect: RedirectFileInfo | null +} + +export interface RedirectFileInfo { + size: number + digest: Uint8Array +} + +export interface FileChunk { + chunkNo: number + chunkSize: number + digest: Uint8Array + replicas: FileChunkReplica[] +} + +export interface FileChunkReplica { + server: string // XFTPServer URI (e.g. "xftp://abc=@example.com") + replicaId: Uint8Array + replicaKey: Uint8Array // DER-encoded private key +} + +// -- Internal: flat server replica + +interface FileServerReplica { + chunkNo: number + server: string + replicaId: Uint8Array + replicaKey: Uint8Array + digest: Uint8Array | null + chunkSize: number | null +} + +// -- Server replica colon-separated format + +function encodeServerReplica(r: FileServerReplica): string { + let s = r.chunkNo + ":" + base64urlEncode(r.replicaId) + ":" + base64urlEncode(r.replicaKey) + if (r.digest !== null) s += ":" + base64urlEncode(r.digest) + if (r.chunkSize !== null) s += ":" + encodeFileSize(r.chunkSize) + return s +} + +function decodeServerReplica(server: string, s: string): FileServerReplica { + const parts = s.split(":") + if (parts.length < 3) throw new Error("invalid server replica: " + s) + return { + chunkNo: parseInt(parts[0]), + server, + replicaId: base64urlDecode(parts[1]), + replicaKey: base64urlDecode(parts[2]), + digest: parts.length >= 4 ? base64urlDecode(parts[3]) : null, + chunkSize: parts.length >= 5 ? decodeFileSize(parts[4]) : null + } +} + +// -- Unfold chunks to flat replicas + +function unfoldChunksToReplicas(defChunkSize: number, chunks: FileChunk[]): FileServerReplica[] { + const result: FileServerReplica[] = [] + for (const c of chunks) { + c.replicas.forEach((r, idx) => { + result.push({ + chunkNo: c.chunkNo, + server: r.server, + replicaId: r.replicaId, + replicaKey: r.replicaKey, + digest: idx === 0 ? c.digest : null, + chunkSize: c.chunkSize !== defChunkSize && idx === 0 ? c.chunkSize : null + }) + }) + } + return result +} + +// -- Group replicas by server (for YAML encoding) + +function encodeFileReplicas( + defChunkSize: number, chunks: FileChunk[] +): {server: string, chunks: string[]}[] { + const flat = unfoldChunksToReplicas(defChunkSize, chunks) + // Sort by server URI string (matches Haskell Ord for ProtocolServer when + // all servers share the same scheme and keyHash -- true for typical use). + flat.sort((a, b) => a.server < b.server ? -1 : a.server > b.server ? 1 : 0) + const groups: {server: string, chunks: string[]}[] = [] + for (const r of flat) { + if (groups.length === 0 || groups[groups.length - 1].server !== r.server) { + groups.push({server: r.server, chunks: [encodeServerReplica(r)]}) + } else { + groups[groups.length - 1].chunks.push(encodeServerReplica(r)) + } + } + return groups +} + +// -- Fold flat replicas back into FileChunks + +function bytesEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + for (let i = 0; i < a.length; i++) if (a[i] !== b[i]) return false + return true +} + +function foldReplicasToChunks(defChunkSize: number, replicas: FileServerReplica[]): FileChunk[] { + const sizes = new Map() + const digests = new Map() + for (const r of replicas) { + if (r.chunkSize !== null) { + const existing = sizes.get(r.chunkNo) + if (existing !== undefined && existing !== r.chunkSize) + throw new Error("different size in chunk replicas") + sizes.set(r.chunkNo, r.chunkSize) + } + if (r.digest !== null) { + const existing = digests.get(r.chunkNo) + if (existing !== undefined && !bytesEqual(existing, r.digest)) + throw new Error("different digest in chunk replicas") + digests.set(r.chunkNo, r.digest) + } + } + const chunkMap = new Map() + for (const r of replicas) { + const existing = chunkMap.get(r.chunkNo) + if (existing) { + existing.replicas.push({server: r.server, replicaId: r.replicaId, replicaKey: r.replicaKey}) + } else { + const digest = digests.get(r.chunkNo) + if (!digest) throw new Error("no digest for chunk") + chunkMap.set(r.chunkNo, { + chunkNo: r.chunkNo, + chunkSize: sizes.get(r.chunkNo) ?? defChunkSize, + digest, + replicas: [{server: r.server, replicaId: r.replicaId, replicaKey: r.replicaKey}] + }) + } + } + return Array.from(chunkMap.values()).sort((a, b) => a.chunkNo - b.chunkNo) +} + +// -- YAML encoding (matching Data.Yaml key ordering) + +export function encodeFileDescription(fd: FileDescription): string { + const lines: string[] = [] + // Top-level keys in alphabetical order (matching Data.Yaml / libyaml) + lines.push("chunkSize: " + encodeFileSize(fd.chunkSize)) + lines.push("digest: " + base64urlEncode(fd.digest)) + lines.push("key: " + base64urlEncode(fd.key)) + lines.push("nonce: " + base64urlEncode(fd.nonce)) + lines.push("party: " + fd.party) + if (fd.redirect !== null) { + lines.push("redirect:") + lines.push(" digest: " + base64urlEncode(fd.redirect.digest)) + lines.push(" size: " + fd.redirect.size) + } + const groups = encodeFileReplicas(fd.chunkSize, fd.chunks) + lines.push("replicas:") + for (const g of groups) { + lines.push("- chunks:") + for (const c of g.chunks) { + lines.push(" - " + c) + } + lines.push(" server: " + g.server) + } + lines.push("size: " + encodeFileSize(fd.size)) + return lines.join("\n") + "\n" +} + +// -- YAML decoding + +export function decodeFileDescription(yaml: string): FileDescription { + const lines = yaml.split("\n") + const topLevel: Record = {} + const replicaGroups: {server: string, chunks: string[]}[] = [] + let redirect: RedirectFileInfo | null = null + let i = 0 + while (i < lines.length) { + const line = lines[i] + if (line.length === 0) { i++; continue } + if (line === "replicas:") { + i++ + while (i < lines.length && lines[i].startsWith("- ")) { + const group = {server: "", chunks: [] as string[]} + i = parseReplicaItem(lines, i, group) + replicaGroups.push(group) + } + } else if (line === "redirect:") { + i++ + let digestStr = "", sizeStr = "" + while (i < lines.length && lines[i].startsWith(" ")) { + const kv = lines[i].substring(2) + const ci = kv.indexOf(": ") + if (ci >= 0) { + const k = kv.substring(0, ci), v = kv.substring(ci + 2) + if (k === "digest") digestStr = v + if (k === "size") sizeStr = v + } + i++ + } + redirect = {size: parseInt(sizeStr), digest: base64urlDecode(digestStr)} + } else { + const ci = line.indexOf(": ") + if (ci >= 0) topLevel[line.substring(0, ci)] = line.substring(ci + 2) + i++ + } + } + const chunkSize = decodeFileSize(topLevel["chunkSize"]) + const serverReplicas: FileServerReplica[] = [] + for (const g of replicaGroups) { + for (const c of g.chunks) serverReplicas.push(decodeServerReplica(g.server, c)) + } + return { + party: topLevel["party"] as FileParty, + size: decodeFileSize(topLevel["size"]), + digest: base64urlDecode(topLevel["digest"]), + key: base64urlDecode(topLevel["key"]), + nonce: base64urlDecode(topLevel["nonce"]), + chunkSize, + chunks: foldReplicasToChunks(chunkSize, serverReplicas), + redirect + } +} + +function parseReplicaItem( + lines: string[], startIdx: number, group: {server: string, chunks: string[]} +): number { + let i = startIdx + const first = lines[i].substring(2) // strip "- " prefix + i = parseReplicaField(first, lines, i + 1, group) + while (i < lines.length && lines[i].startsWith(" ") && !lines[i].startsWith("- ")) { + i = parseReplicaField(lines[i].substring(2), lines, i + 1, group) + } + return i +} + +function parseReplicaField( + entry: string, lines: string[], nextIdx: number, + group: {server: string, chunks: string[]} +): number { + if (entry === "chunks:" || entry.startsWith("chunks:")) { + let i = nextIdx + while (i < lines.length && lines[i].startsWith(" - ")) { + group.chunks.push(lines[i].substring(4)) + i++ + } + return i + } + const ci = entry.indexOf(": ") + if (ci >= 0) { + const k = entry.substring(0, ci), v = entry.substring(ci + 2) + if (k === "server") group.server = v + } + return nextIdx +} + +// -- Validation + +export function validateFileDescription(fd: FileDescription): string | null { + for (let i = 0; i < fd.chunks.length; i++) { + if (fd.chunks[i].chunkNo !== i + 1) return "chunk numbers are not sequential" + } + let total = 0 + for (const c of fd.chunks) total += c.chunkSize + if (total !== fd.size) return "chunks total size is different than file size" + return null +} + +export const fdSeparator = "################################\n" diff --git a/xftp-web/src/protocol/encoding.ts b/xftp-web/src/protocol/encoding.ts new file mode 100644 index 0000000000..aec31f63d9 --- /dev/null +++ b/xftp-web/src/protocol/encoding.ts @@ -0,0 +1,224 @@ +// Binary encoding/decoding matching Haskell Simplex.Messaging.Encoding module. +// All multi-byte integers are big-endian (network byte order). + +// -- Decoder: sequential parser over a Uint8Array (equivalent to Attoparsec parser) + +export class Decoder { + readonly buf: Uint8Array + private readonly view: DataView + private pos: number + + constructor(buf: Uint8Array) { + this.buf = buf + this.view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength) + this.pos = 0 + } + + take(n: number): Uint8Array { + if (this.pos + n > this.buf.length) throw new Error("Decoder: unexpected end of input") + const slice = this.buf.subarray(this.pos, this.pos + n) + this.pos += n + return slice + } + + takeAll(): Uint8Array { + const slice = this.buf.subarray(this.pos) + this.pos = this.buf.length + return slice + } + + anyByte(): number { + if (this.pos >= this.buf.length) throw new Error("Decoder: unexpected end of input") + return this.buf[this.pos++] + } + + remaining(): number { + return this.buf.length - this.pos + } + + offset(): number { + return this.pos + } +} + +// -- Utility + +export function concatBytes(...arrays: Uint8Array[]): Uint8Array { + let totalLen = 0 + for (const a of arrays) totalLen += a.length + const result = new Uint8Array(totalLen) + let offset = 0 + for (const a of arrays) { + result.set(a, offset) + offset += a.length + } + return result +} + +// -- Word16: 2-byte big-endian (Encoding.hs:70) + +export function encodeWord16(n: number): Uint8Array { + const buf = new Uint8Array(2) + const view = new DataView(buf.buffer) + view.setUint16(0, n, false) + return buf +} + +export function decodeWord16(d: Decoder): number { + const bytes = d.take(2) + const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + return view.getUint16(0, false) +} + +// -- Word32: 4-byte big-endian (Encoding.hs:76) + +export function encodeWord32(n: number): Uint8Array { + const buf = new Uint8Array(4) + const view = new DataView(buf.buffer) + view.setUint32(0, n, false) + return buf +} + +export function decodeWord32(d: Decoder): number { + const bytes = d.take(4) + const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + return view.getUint32(0, false) +} + +// -- Int64: two Word32s, high then low (Encoding.hs:82) +// Uses BigInt because JS numbers lose precision beyond 2^53. + +export function encodeInt64(n: bigint): Uint8Array { + const high = Number((n >> 32n) & 0xFFFFFFFFn) + const low = Number(n & 0xFFFFFFFFn) + return concatBytes(encodeWord32(high), encodeWord32(low)) +} + +export function decodeInt64(d: Decoder): bigint { + const high = BigInt(decodeWord32(d)) + const low = BigInt(decodeWord32(d)) + const unsigned = (high << 32n) | low + // Convert to signed Int64: if bit 63 is set, value is negative + return unsigned >= 0x8000000000000000n ? unsigned - 0x10000000000000000n : unsigned +} + +// -- ByteString: 1-byte length prefix + bytes (Encoding.hs:100) +// Max 255 bytes. + +export function encodeBytes(bs: Uint8Array): Uint8Array { + if (bs.length > 255) throw new Error("encodeBytes: length exceeds 255") + const result = new Uint8Array(1 + bs.length) + result[0] = bs.length + result.set(bs, 1) + return result +} + +export function decodeBytes(d: Decoder): Uint8Array { + const len = d.anyByte() + return d.take(len) +} + +// -- Large: 2-byte big-endian length prefix + bytes (Encoding.hs:133) +// Max 65535 bytes. + +export function encodeLarge(bs: Uint8Array): Uint8Array { + if (bs.length > 65535) throw new Error("encodeLarge: length exceeds 65535") + return concatBytes(encodeWord16(bs.length), bs) +} + +export function decodeLarge(d: Decoder): Uint8Array { + const len = decodeWord16(d) + return d.take(len) +} + +// -- Tail: raw bytes, no prefix (Encoding.hs:124) + +export function encodeTail(bs: Uint8Array): Uint8Array { + return bs +} + +export function decodeTail(d: Decoder): Uint8Array { + return d.takeAll() +} + +// -- Bool: 'T' (0x54) or 'F' (0x46) (Encoding.hs:58) + +const CHAR_T = 0x54 +const CHAR_F = 0x46 + +export function encodeBool(b: boolean): Uint8Array { + return new Uint8Array([b ? CHAR_T : CHAR_F]) +} + +export function decodeBool(d: Decoder): boolean { + const byte = d.anyByte() + if (byte === CHAR_T) return true + if (byte === CHAR_F) return false + throw new Error("decodeBool: invalid tag " + byte) +} + +// -- String/Text: encode as UTF-8 ByteString (Encoding.hs) +// Matches Haskell's Encoding Text instance: encodeUtf8/decodeUtf8. + +const textEncoder = new TextEncoder() +const textDecoder = new TextDecoder() + +export function encodeString(s: string): Uint8Array { + return encodeBytes(textEncoder.encode(s)) +} + +export function decodeString(d: Decoder): string { + return textDecoder.decode(decodeBytes(d)) +} + +// -- Maybe: '0' for Nothing, '1' + encoded value for Just (Encoding.hs:114) + +const CHAR_0 = 0x30 +const CHAR_1 = 0x31 + +export function encodeMaybe(encode: (v: T) => Uint8Array, v: T | null): Uint8Array { + if (v === null) return new Uint8Array([CHAR_0]) + return concatBytes(new Uint8Array([CHAR_1]), encode(v)) +} + +export function decodeMaybe(decode: (d: Decoder) => T, d: Decoder): T | null { + const tag = d.anyByte() + if (tag === CHAR_0) return null + if (tag === CHAR_1) return decode(d) + throw new Error("decodeMaybe: invalid tag " + tag) +} + +// -- NonEmpty: 1-byte length + encoded elements (Encoding.hs:165) +// Fails on empty list (matches Haskell behavior). + +export function encodeNonEmpty(encode: (v: T) => Uint8Array, xs: T[]): Uint8Array { + if (xs.length === 0) throw new Error("encodeNonEmpty: empty list") + if (xs.length > 255) throw new Error("encodeNonEmpty: length exceeds 255") + const parts: Uint8Array[] = [new Uint8Array([xs.length])] + for (const x of xs) parts.push(encode(x)) + return concatBytes(...parts) +} + +export function decodeNonEmpty(decode: (d: Decoder) => T, d: Decoder): T[] { + const len = d.anyByte() + if (len === 0) throw new Error("decodeNonEmpty: empty list") + const result: T[] = [] + for (let i = 0; i < len; i++) result.push(decode(d)) + return result +} + +// -- List encoding (smpEncodeList / smpListP, Encoding.hs:153) + +export function encodeList(encode: (v: T) => Uint8Array, xs: T[]): Uint8Array { + if (xs.length > 255) throw new Error("encodeList: length exceeds 255") + const parts: Uint8Array[] = [new Uint8Array([xs.length])] + for (const x of xs) parts.push(encode(x)) + return concatBytes(...parts) +} + +export function decodeList(decode: (d: Decoder) => T, d: Decoder): T[] { + const len = d.anyByte() + const result: T[] = [] + for (let i = 0; i < len; i++) result.push(decode(d)) + return result +} diff --git a/xftp-web/src/protocol/handshake.ts b/xftp-web/src/protocol/handshake.ts new file mode 100644 index 0000000000..c00f8acefc --- /dev/null +++ b/xftp-web/src/protocol/handshake.ts @@ -0,0 +1,220 @@ +// XFTP handshake encoding/decoding -- Simplex.FileTransfer.Transport +// +// Handles XFTP client/server handshake messages and version negotiation. + +import { + Decoder, concatBytes, + encodeWord16, decodeWord16, + encodeBytes, decodeBytes, + encodeMaybe, + decodeLarge, decodeNonEmpty +} from "./encoding.js" +import {sha256} from "../crypto/digest.js" +import {decodePubKeyX25519} from "../crypto/keys.js" +import {blockPad, blockUnpad, XFTP_BLOCK_SIZE} from "./transmission.js" + +// -- Version types + +export interface VersionRange { + minVersion: number // Word16 + maxVersion: number // Word16 +} + +// Encode version range as two big-endian Word16s. +// Matches Haskell: smpEncode (VRange v1 v2) = smpEncode (v1, v2) +export function encodeVersionRange(vr: VersionRange): Uint8Array { + return concatBytes(encodeWord16(vr.minVersion), encodeWord16(vr.maxVersion)) +} + +export function decodeVersionRange(d: Decoder): VersionRange { + const minVersion = decodeWord16(d) + const maxVersion = decodeWord16(d) + if (minVersion > maxVersion) throw new Error("invalid version range: min > max") + return {minVersion, maxVersion} +} + +// Version negotiation: intersection of two version ranges, or null if incompatible. +// Matches Haskell compatibleVRange. +export function compatibleVRange(a: VersionRange, b: VersionRange): VersionRange | null { + const min = Math.max(a.minVersion, b.minVersion) + const max = Math.min(a.maxVersion, b.maxVersion) + if (min > max) return null + return {minVersion: min, maxVersion: max} +} + +// -- Client hello + +export interface XFTPClientHello { + webChallenge: Uint8Array | null // 32 random bytes for web handshake, or null for standard +} + +// Encode client hello (padded to XFTP_BLOCK_SIZE for web clients). +// Wire format: smpEncode (Maybe ByteString), padded when webChallenge present +export function encodeClientHello(hello: XFTPClientHello): Uint8Array { + const body = encodeMaybe(encodeBytes, hello.webChallenge) + return hello.webChallenge ? blockPad(body, XFTP_BLOCK_SIZE) : body +} + +// -- Client handshake + +export interface XFTPClientHandshake { + xftpVersion: number // Word16 -- negotiated version + keyHash: Uint8Array // SHA-256 CA certificate fingerprint (32 bytes) +} + +// Encode and pad client handshake to XFTP_BLOCK_SIZE. +// Wire format: pad(smpEncode (xftpVersion, keyHash), 16384) +export function encodeClientHandshake(ch: XFTPClientHandshake): Uint8Array { + const body = concatBytes(encodeWord16(ch.xftpVersion), encodeBytes(ch.keyHash)) + return blockPad(body, XFTP_BLOCK_SIZE) +} + +// -- Server handshake + +export interface XFTPServerHandshake { + xftpVersionRange: VersionRange + sessionId: Uint8Array + certChainDer: Uint8Array[] // raw DER certificate blobs (NonEmpty) + signedKeyDer: Uint8Array // raw DER SignedExact blob + webIdentityProof: Uint8Array | null // signature bytes, or null if absent/empty +} + +// Decode padded server handshake block. +// Wire format: unpad(block) -> (versionRange, sessionId, certChainPubKey, sigBytes) +// where certChainPubKey = (NonEmpty Large certChain, Large signedKey) +// sigBytes = ByteString (1-byte len prefix, empty for Nothing) +// Trailing bytes (Tail) are ignored for forward compatibility. +export function decodeServerHandshake(block: Uint8Array): XFTPServerHandshake { + const raw = blockUnpad(block) + // Detect error responses (server sends padded error string like "HANDSHAKE") + if (raw.length < 20) { + const text = String.fromCharCode(...raw) + if (/^[A-Z_]+$/.test(text)) { + console.error('[XFTP] Server handshake error: %s', text) + throw new Error("Server handshake error: " + text) + } + } + const d = new Decoder(raw) + const xftpVersionRange = decodeVersionRange(d) + const sessionId = decodeBytes(d) + // CertChainPubKey: smpEncode (encodeCertChain certChain, SignedObject signedPubKey) + const certChainDer = decodeNonEmpty(decodeLarge, d) + const signedKeyDer = decodeLarge(d) + // webIdentityProof: 1-byte length-prefixed ByteString (empty = Nothing) + let webIdentityProof: Uint8Array | null = null + if (d.remaining() > 0) { + const sigBytes = decodeBytes(d) + webIdentityProof = sigBytes.length === 0 ? null : sigBytes + } + // Remaining bytes are Tail (ignored for forward compatibility) + return {xftpVersionRange, sessionId, certChainDer, signedKeyDer, webIdentityProof} +} + +// -- Certificate utilities + +// Certificate chain decomposition matching Haskell chainIdCaCerts (Transport.Shared). +export type ChainCertificates = + | {type: 'empty'} + | {type: 'self'; cert: Uint8Array} + | {type: 'valid'; leafCert: Uint8Array; idCert: Uint8Array; caCert: Uint8Array} + | {type: 'long'} + +export function chainIdCaCerts(certChainDer: Uint8Array[]): ChainCertificates { + switch (certChainDer.length) { + case 0: return {type: 'empty'} + case 1: return {type: 'self', cert: certChainDer[0]} + case 2: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[1]} + case 3: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[2]} + case 4: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[3]} + default: return {type: 'long'} + } +} + +// SHA-256 fingerprint of the identity certificate. +// For 2-cert chains: idCert = last cert (same as CA). +// For 3+ cert chains: idCert = second cert (distinct from CA). +// Matches Haskell: getFingerprint idCert HashSHA256 +export function caFingerprint(certChainDer: Uint8Array[]): Uint8Array { + const cc = chainIdCaCerts(certChainDer) + if (cc.type !== 'valid') throw new Error("caFingerprint: need valid chain (2-4 certs)") + return sha256(cc.idCert) +} + +// -- SignedExact DER parsing + +// Parsed components of an X.509 SignedExact structure. +export interface SignedKey { + objectDer: Uint8Array // raw DER of the signed object (SubjectPublicKeyInfo) + dhKey: Uint8Array // extracted 32-byte X25519 public key + algorithm: Uint8Array // AlgorithmIdentifier DER bytes + signature: Uint8Array // raw signature bytes (Ed25519: 64, Ed448: 114) +} + +// Parse ASN.1 DER length (short and long form). +function derLength(d: Decoder): number { + const first = d.anyByte() + if (first < 0x80) return first + const numBytes = first & 0x7f + if (numBytes === 0 || numBytes > 4) throw new Error("DER: unsupported length encoding") + let len = 0 + for (let i = 0; i < numBytes; i++) { + len = (len << 8) | d.anyByte() + } + return len +} + +// Read a complete TLV element, returning the full DER bytes (tag + length + value). +function derElement(d: Decoder): Uint8Array { + const start = d.offset() + d.anyByte() // tag + const len = derLength(d) + d.take(len) // value + return d.buf.subarray(start, d.offset()) +} + +// Extract components from a SignedExact X.PubKey DER structure. +// ASN.1 layout: +// SEQUENCE { +// SubjectPublicKeyInfo (SEQUENCE) -- the signed object +// AlgorithmIdentifier (SEQUENCE) -- signature algorithm +// BIT STRING -- signature +// } +export function extractSignedKey(signedDer: Uint8Array): SignedKey { + const outer = new Decoder(signedDer) + const outerTag = outer.anyByte() + if (outerTag !== 0x30) throw new Error("SignedExact: expected SEQUENCE tag 0x30, got 0x" + outerTag.toString(16)) + derLength(outer) // consume total content length + + // First element: SubjectPublicKeyInfo + const objectDer = derElement(outer) + + // Second element: AlgorithmIdentifier + const algorithm = derElement(outer) + + // Third element: BIT STRING (signature) + const sigTag = outer.anyByte() + if (sigTag !== 0x03) throw new Error("SignedExact: expected BIT STRING tag 0x03, got 0x" + sigTag.toString(16)) + const sigLen = derLength(outer) + const unusedBits = outer.anyByte() + if (unusedBits !== 0) throw new Error("SignedExact: expected 0 unused bits in signature") + const signature = outer.take(sigLen - 1) + + // Extract X25519 key from the signed object. + // objectDer may be the raw SPKI (44 bytes) or a wrapper SEQUENCE + // from x509 objectToSignedExact which wraps toASN1 in Start Sequence. + const dhKey = decodeX25519Key(objectDer) + + return {objectDer, dhKey, algorithm, signature} +} + +// Extract X25519 raw public key from either direct SPKI (44 bytes) +// or a wrapper SEQUENCE containing the SPKI. +function decodeX25519Key(der: Uint8Array): Uint8Array { + if (der.length === 44) return decodePubKeyX25519(der) + if (der[0] !== 0x30) throw new Error("decodeX25519Key: expected SEQUENCE") + const d = new Decoder(der) + d.anyByte() + derLength(d) + const inner = derElement(d) + return decodePubKeyX25519(inner) +} diff --git a/xftp-web/src/protocol/transmission.ts b/xftp-web/src/protocol/transmission.ts new file mode 100644 index 0000000000..1bc6ffd84a --- /dev/null +++ b/xftp-web/src/protocol/transmission.ts @@ -0,0 +1,113 @@ +// XFTP transmission framing -- Simplex.Messaging.Transport + FileTransfer.Protocol +// +// Handles block-level pad/unpad, batch encoding, and Ed25519 auth signing. + +import { + Decoder, concatBytes, + encodeBytes, decodeBytes, + encodeLarge, decodeLarge +} from "./encoding.js" +import {sign} from "../crypto/keys.js" + +// -- Constants + +export const XFTP_BLOCK_SIZE = 16384 + +// Protocol versions (FileTransfer.Transport) +export const initialXFTPVersion = 1 +export const authCmdsXFTPVersion = 2 +export const blockedFilesXFTPVersion = 3 +export const currentXFTPVersion = 3 + +// -- Block-level pad/unpad (Crypto.hs:pad/unPad, strict ByteString) + +export function blockPad(msg: Uint8Array, blockSize: number = XFTP_BLOCK_SIZE): Uint8Array { + const len = msg.length + const padLen = blockSize - len - 2 + if (padLen < 0) throw new Error("blockPad: message too large for block") + const result = new Uint8Array(blockSize) + result[0] = (len >>> 8) & 0xff + result[1] = len & 0xff + result.set(msg, 2) + result.fill(0x23, 2 + len) // '#' padding + return result +} + +export function blockUnpad(block: Uint8Array): Uint8Array { + if (block.length < 2) throw new Error("blockUnpad: too short") + const len = (block[0] << 8) | block[1] + if (2 + len > block.length) throw new Error("blockUnpad: invalid length") + return block.subarray(2, 2 + len) +} + +// -- Transmission encoding (client -> server) + +// Encode an authenticated XFTP command as a padded block. +// Matches xftpEncodeAuthTransmission with implySessId = False: +// sessionId is included in both signed data AND wire data. +export function encodeAuthTransmission( + sessionId: Uint8Array, + corrId: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + privateKey: Uint8Array +): Uint8Array { + // t' = encodeTransmission_ v t = smpEncode (corrId, entityId) <> cmdBytes + const tInner = concatBytes(encodeBytes(corrId), encodeBytes(entityId), cmdBytes) + // tForAuth = smpEncode sessionId <> t' + const tForAuth = concatBytes(encodeBytes(sessionId), tInner) + const signature = sign(privateKey, tForAuth) + const authenticator = encodeBytes(signature) + // implySessId = False: tToSend = tForAuth (sessionId on wire) + const encoded = concatBytes(authenticator, tForAuth) + const batch = concatBytes(new Uint8Array([1]), encodeLarge(encoded)) + return blockPad(batch) +} + +// Encode an unsigned XFTP command (e.g. PING) as a padded block. +// Matches xftpEncodeTransmission with implySessId = False: sessionId on wire. +export function encodeTransmission( + sessionId: Uint8Array, + corrId: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array +): Uint8Array { + const tInner = concatBytes(encodeBytes(sessionId), encodeBytes(corrId), encodeBytes(entityId), cmdBytes) + // No auth: tEncodeAuth False Nothing = smpEncode B.empty = \x00 + const authenticator = encodeBytes(new Uint8Array(0)) + const encoded = concatBytes(authenticator, tInner) + const batch = concatBytes(new Uint8Array([1]), encodeLarge(encoded)) + return blockPad(batch) +} + +// -- Transmission decoding (server -> client) + +export interface DecodedTransmission { + corrId: Uint8Array + entityId: Uint8Array + command: Uint8Array +} + +// Decode a server response block into raw parts. +// Call decodeResponse(command) from commands.ts to parse the response. +// Matches xftpDecodeTClient with implySessId = False: reads and verifies sessionId from wire. +export function decodeTransmission(sessionId: Uint8Array, block: Uint8Array): DecodedTransmission { + const raw = blockUnpad(block) + const d = new Decoder(raw) + const count = d.anyByte() + if (count !== 1) throw new Error("decodeTransmission: expected batch count 1, got " + count) + const transmission = decodeLarge(d) + const td = new Decoder(transmission) + // Skip authenticator (server responses have empty auth) + decodeBytes(td) + // implySessId = False: read sessionId from wire and verify + const sessId = decodeBytes(td) + if (sessId.length !== sessionId.length || !sessId.every((b, i) => b === sessionId[i])) { + console.error('[XFTP] Session ID mismatch in server response') + throw new Error("Session ID mismatch in server response") + } + const corrId = decodeBytes(td) + const entityId = decodeBytes(td) + const command = td.takeAll() + return {corrId, entityId, command} +} diff --git a/xftp-web/test/browser.test.ts b/xftp-web/test/browser.test.ts new file mode 100644 index 0000000000..0596c9b9a2 --- /dev/null +++ b/xftp-web/test/browser.test.ts @@ -0,0 +1,19 @@ +import {test, expect} from 'vitest' +import {encryptFileForUpload, uploadFile, downloadFile, newXFTPAgent, closeXFTPAgent} from '../src/agent.js' +import {parseXFTPServer} from '../src/protocol/address.js' + +const server = parseXFTPServer(import.meta.env.XFTP_SERVER) + +test('browser upload + download round-trip', async () => { + const agent = newXFTPAgent() + try { + const data = new Uint8Array(50000) + crypto.getRandomValues(data) + const encrypted = encryptFileForUpload(data, 'test.bin') + const {rcvDescription} = await uploadFile(agent, [server], encrypted) + const {content} = await downloadFile(agent, rcvDescription) + expect(content).toEqual(data) + } finally { + closeXFTPAgent(agent) + } +}) diff --git a/xftp-web/test/connection.node.test.ts b/xftp-web/test/connection.node.test.ts new file mode 100644 index 0000000000..c04b7dd5a2 --- /dev/null +++ b/xftp-web/test/connection.node.test.ts @@ -0,0 +1,164 @@ +import {test, expect, vi, beforeEach} from 'vitest' +import { + newXFTPAgent, getXFTPServerClient, reconnectClient, removeStaleConnection, + sendXFTPCommand, + XFTPRetriableError, XFTPPermanentError, + type XFTPClient, type XFTPClientAgent +} from '../src/client.js' +import {formatXFTPServer, type XFTPServer} from '../src/protocol/address.js' +import {blockPad} from '../src/protocol/transmission.js' +import {concatBytes, encodeBytes, encodeLarge} from '../src/protocol/encoding.js' + +const server: XFTPServer = { + keyHash: new Uint8Array(32), + host: "localhost", + port: "12345" +} +const key = formatXFTPServer(server) + +function makeMockClient(overrides?: Partial): XFTPClient { + return { + baseUrl: "https://localhost:12345", + sessionId: new Uint8Array(32), + xftpVersion: 3, + transport: {post: vi.fn(), close: vi.fn()}, + ...overrides + } +} + +function makeAgent(connectFn: (s: any) => Promise): XFTPClientAgent { + const agent = newXFTPAgent() + agent._connectFn = connectFn + return agent +} + +// T4: getXFTPServerClient coalesces concurrent calls +test('getXFTPServerClient coalesces concurrent calls', async () => { + let resolve_: (v: XFTPClient) => void + const promise = new Promise(r => { resolve_ = r }) + const connectFn = vi.fn(() => promise) + const agent = makeAgent(connectFn) + const p1 = getXFTPServerClient(agent, server) + const p2 = getXFTPServerClient(agent, server) + expect(p1).toBe(p2) // same promise, single connection + expect(connectFn).toHaveBeenCalledTimes(1) + const mockClient = makeMockClient() + resolve_!(mockClient) + expect(await p1).toBe(mockClient) +}) + +// T5: getXFTPServerClient auto-cleans failed connections +test('getXFTPServerClient auto-cleans failed connections', async () => { + const connectFn = vi.fn() + .mockImplementationOnce(() => Promise.reject(new Error("down"))) + .mockImplementationOnce(() => Promise.resolve(makeMockClient())) + const agent = makeAgent(connectFn) + const p1 = getXFTPServerClient(agent, server) + await expect(p1).rejects.toThrow("down") + // After microtask, entry is removed + await new Promise(r => setTimeout(r, 0)) + expect(agent.connections.has(key)).toBe(false) + // Next call creates fresh connection + const p2 = getXFTPServerClient(agent, server) + expect(p2).not.toBe(p1) + expect(connectFn).toHaveBeenCalledTimes(2) +}) + +// T6: removeStaleConnection respects promise identity +test('removeStaleConnection respects promise identity', () => { + const agent = newXFTPAgent() + const mockClient1 = makeMockClient() + const mockClient2 = makeMockClient() + const p1 = Promise.resolve(mockClient1) + agent.connections.set(key, {client: p1, queue: Promise.resolve()}) + // Replace with reconnect + const p2 = Promise.resolve(mockClient2) + agent.connections.set(key, {client: p2, queue: Promise.resolve()}) + // removeStaleConnection with old promise does NOT remove new entry + removeStaleConnection(agent, server, p1) + expect(agent.connections.has(key)).toBe(true) + expect(agent.connections.get(key)!.client).toBe(p2) + // removeStaleConnection with current promise removes it + removeStaleConnection(agent, server, p2) + expect(agent.connections.has(key)).toBe(false) +}) + +// T7: reconnectClient replaces promise but preserves queue +test('reconnectClient replaces promise but preserves queue', async () => { + const mockClient2 = makeMockClient() + const connectFn = vi.fn(() => Promise.resolve(mockClient2)) + const agent = makeAgent(connectFn) + const origQueue = Promise.resolve() + agent.connections.set(key, {client: Promise.resolve(makeMockClient()), queue: origQueue}) + reconnectClient(agent, server) + const conn = agent.connections.get(key)! + expect(await conn.client).toBe(mockClient2) // new client + expect(conn.queue).toBe(origQueue) // queue preserved +}) + +// T8: Retry loop — retriable error triggers reconnect, permanent does not +test('retry loop: retriable triggers reconnect, permanent does not', async () => { + const sessionId = new Uint8Array(32) + const dummyKey = new Uint8Array(64) + const dummyId = new Uint8Array(0) + const pingCmd = new TextEncoder().encode("PING") + + // Case 1: Retriable then success — 2 _connectFn calls + const connectFn1 = vi.fn() + .mockImplementationOnce(() => Promise.resolve(makeMockClient({ + sessionId, + transport: { + post: vi.fn().mockRejectedValueOnce(new XFTPRetriableError("SESSION")), + close: vi.fn() + } + }))) + .mockImplementationOnce(() => Promise.resolve(makeMockClient({ + sessionId, + transport: { + post: vi.fn().mockResolvedValueOnce(buildPongResponse(sessionId)), + close: vi.fn() + } + }))) + const agent1 = makeAgent(connectFn1) + const result = await sendXFTPCommand(agent1, server, dummyKey, dummyId, pingCmd) + expect(result.response.type).toBe("FRPong") + expect(connectFn1).toHaveBeenCalledTimes(2) + + // Case 2: All 3 retries exhausted — 3 _connectFn calls + const connectFn2 = vi.fn(() => Promise.resolve(makeMockClient({ + sessionId, + transport: { + post: vi.fn().mockRejectedValue(new XFTPRetriableError("SESSION")), + close: vi.fn() + } + }))) + const agent2 = makeAgent(connectFn2) + await expect(sendXFTPCommand(agent2, server, dummyKey, dummyId, pingCmd)) + .rejects.toThrow(/expired|reconnecting/) + expect(connectFn2).toHaveBeenCalledTimes(3) + + // Case 3: Permanent error — 1 _connectFn call (no reconnect) + const connectFn3 = vi.fn(() => Promise.resolve(makeMockClient({ + sessionId, + transport: { + post: vi.fn().mockRejectedValue(new XFTPPermanentError("AUTH", "expired")), + close: vi.fn() + } + }))) + const agent3 = makeAgent(connectFn3) + await expect(sendXFTPCommand(agent3, server, dummyKey, dummyId, pingCmd)) + .rejects.toThrow(/expired/) + expect(connectFn3).toHaveBeenCalledTimes(1) +}) + +// Helper: build a valid XFTP PONG response block +function buildPongResponse(sessionId: Uint8Array): Uint8Array { + const authenticator = encodeBytes(new Uint8Array(0)) + const sessBytes = encodeBytes(sessionId) + const corrId = encodeBytes(new Uint8Array(0)) + const entityId = encodeBytes(new Uint8Array(0)) + const pong = new TextEncoder().encode("PONG") + const transmission = concatBytes(authenticator, sessBytes, corrId, entityId, pong) + const batch = concatBytes(new Uint8Array([1]), encodeLarge(transmission)) + return blockPad(batch) +} diff --git a/xftp-web/test/errors.test.ts b/xftp-web/test/errors.test.ts new file mode 100644 index 0000000000..980897c83e --- /dev/null +++ b/xftp-web/test/errors.test.ts @@ -0,0 +1,34 @@ +import {test, expect} from 'vitest' +import { + XFTPRetriableError, XFTPPermanentError, + isRetriable, categorizeError, humanReadableMessage +} from '../src/client.js' + +// T1: isRetriable classifies errors correctly +test('isRetriable classifies errors correctly', () => { + // Retriable: + expect(isRetriable(new XFTPRetriableError("SESSION"))).toBe(true) + expect(isRetriable(new XFTPRetriableError("HANDSHAKE"))).toBe(true) + expect(isRetriable(new TypeError("fetch failed"))).toBe(true) + expect(isRetriable(Object.assign(new Error(), {name: "AbortError"}))).toBe(true) + // Not retriable: + expect(isRetriable(new XFTPPermanentError("AUTH", "..."))).toBe(false) + expect(isRetriable(new XFTPPermanentError("NO_FILE", "..."))).toBe(false) + expect(isRetriable(new XFTPPermanentError("INTERNAL", "..."))).toBe(false) + // Unknown errors are not retriable + expect(isRetriable(new Error("random"))).toBe(false) +}) + +// T2: categorizeError produces human-readable messages +test('categorizeError produces human-readable messages', () => { + const e = categorizeError(new XFTPPermanentError("AUTH", "File is invalid, expired, or has been removed")) + expect(e.message).toContain("expired") + // Verify every permanent error type maps to a non-empty human-readable message + for (const errType of ["AUTH", "NO_FILE", "SIZE", "QUOTA", "BLOCKED", "DIGEST", "INTERNAL"]) { + expect(humanReadableMessage(errType).length).toBeGreaterThan(0) + } + // Retriable errors also get human-readable messages + const re = categorizeError(new XFTPRetriableError("SESSION")) + expect(re.message).toContain("expired") +}) + diff --git a/xftp-web/test/fixtures.ts b/xftp-web/test/fixtures.ts new file mode 100644 index 0000000000..12e9158877 --- /dev/null +++ b/xftp-web/test/fixtures.ts @@ -0,0 +1,33 @@ +import {test as base} from '@playwright/test' +import {UploadPage} from './pages/upload-page' +import {DownloadPage} from './pages/download-page' + +// Extend Playwright test with page objects +export const test = base.extend<{ + uploadPage: UploadPage + downloadPage: DownloadPage +}>({ + uploadPage: async ({page}, use) => { + const uploadPage = new UploadPage(page) + await uploadPage.goto() + await use(uploadPage) + }, + downloadPage: async ({page}, use) => { + await use(new DownloadPage(page)) + }, +}) + +export {expect} from '@playwright/test' + +// Test data helpers +export function createTestContent(size: number, fill = 0x41): Buffer { + return Buffer.alloc(size, fill) +} + +export function createTextContent(text: string): Buffer { + return Buffer.from(text, 'utf-8') +} + +export function uniqueFileName(base: string, ext = 'txt'): string { + return `${base}-${Date.now()}.${ext}` +} diff --git a/xftp-web/test/globalSetup.ts b/xftp-web/test/globalSetup.ts new file mode 100644 index 0000000000..2d2b3a6e3e --- /dev/null +++ b/xftp-web/test/globalSetup.ts @@ -0,0 +1,181 @@ +import {spawn, ChildProcess} from 'child_process' +import {createHash} from 'crypto' +import {createConnection, createServer} from 'net' +import {resolve, join, dirname} from 'path' +import {fileURLToPath} from 'url' +import {readFileSync, mkdtempSync, writeFileSync, copyFileSync, existsSync, unlinkSync, openSync} from 'fs' +import {tmpdir} from 'os' + +const __filename = fileURLToPath(import.meta.url) +const __dirname = dirname(__filename) + +const LOCK_FILE = join(tmpdir(), 'xftp-test-server.lock') +const SERVER_PID_FILE = join(tmpdir(), 'xftp-test-server.pid') +export const PORT_FILE = join(tmpdir(), 'xftp-test-server.port') + +// Find a free port by binding to port 0 +function findFreePort(): Promise { + return new Promise((resolve, reject) => { + const srv = createServer() + srv.listen(0, '127.0.0.1', () => { + const addr = srv.address() + if (addr && typeof addr === 'object') { + const port = addr.port + srv.close(() => resolve(port)) + } else { + srv.close(() => reject(new Error('Could not get port'))) + } + }) + srv.on('error', reject) + }) +} + +let server: ChildProcess | null = null +let isOwner = false + +async function setup() { + // Kill any stale server from a previous run (negative PID kills process group) + if (existsSync(SERVER_PID_FILE)) { + try { + const serverPid = parseInt(readFileSync(SERVER_PID_FILE, 'utf-8').trim(), 10) + process.kill(-serverPid, 'SIGTERM') + await new Promise(r => setTimeout(r, 500)) + } catch (_) {} + try { unlinkSync(LOCK_FILE) } catch (_) {} + try { unlinkSync(SERVER_PID_FILE) } catch (_) {} + try { unlinkSync(PORT_FILE) } catch (_) {} + } + + // Find a free port dynamically + const xftpPort = await findFreePort() + + writeFileSync(LOCK_FILE, String(process.pid)) + writeFileSync(PORT_FILE, String(xftpPort)) + isOwner = true + + const fixtures = resolve(__dirname, '../../tests/fixtures') + + // Create temp directories + const cfgDir = mkdtempSync(join(tmpdir(), 'xftp-cfg-')) + const logDir = mkdtempSync(join(tmpdir(), 'xftp-log-')) + const filesDir = mkdtempSync(join(tmpdir(), 'xftp-files-')) + + // Copy certificates to cfgDir (xftp-server expects ca.crt, server.key, server.crt there) + copyFileSync(join(fixtures, 'ca.crt'), join(cfgDir, 'ca.crt')) + copyFileSync(join(fixtures, 'server.key'), join(cfgDir, 'server.key')) + copyFileSync(join(fixtures, 'server.crt'), join(cfgDir, 'server.crt')) + + // Write fingerprint file (checkSavedFingerprint reads this on startup) + // Fingerprint = SHA-256 of DER-encoded certificate (not PEM) + const pem = readFileSync(join(fixtures, 'ca.crt'), 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + const fp = createHash('sha256').update(der).digest('base64').replace(/\+/g, '-').replace(/\//g, '_') + writeFileSync(join(cfgDir, 'fingerprint'), fp + '\n') + + // Write INI config file + const iniContent = `[STORE_LOG] +enable: off + +[TRANSPORT] +host: localhost +port: ${xftpPort} + +[FILES] +path: ${filesDir} + +[WEB] +cert: ${join(fixtures, 'web.crt')} +key: ${join(fixtures, 'web.key')} +` + writeFileSync(join(cfgDir, 'file-server.ini'), iniContent) + + // Redirect server stderr to file so logs survive after setup exits + const serverLogPath = join(tmpdir(), 'xftp-test-server.log') + const stderrFd = openSync(serverLogPath, 'w') + console.log('[runSetup] Server log:', serverLogPath) + + // Spawn via cabal run to always use freshly built code + server = spawn('cabal', ['run', 'xftp-server', '--', 'start'], { + env: { + ...process.env, + XFTP_SERVER_CFG_PATH: cfgDir, + XFTP_SERVER_LOG_PATH: logDir + }, + stdio: ['ignore', 'ignore', stderrFd], + detached: true + }) + + // Poll-connect until the server is actually listening + await waitForServerReady(server, xftpPort) + + // Store server PID for teardown + writeFileSync(SERVER_PID_FILE, String(server.pid)) + + server.unref() +} + +export async function teardown() { + // Kill the xftp-server process group if it's running + if (existsSync(SERVER_PID_FILE)) { + try { + const serverPid = parseInt(readFileSync(SERVER_PID_FILE, 'utf-8').trim(), 10) + process.kill(-serverPid, 'SIGTERM') + // Wait a bit for graceful shutdown + await new Promise(r => setTimeout(r, 500)) + } catch (_) { + // Server already dead + } + } + // Clean up files + try { unlinkSync(LOCK_FILE) } catch (_) {} + try { unlinkSync(SERVER_PID_FILE) } catch (_) {} + try { unlinkSync(PORT_FILE) } catch (_) {} +} + +function waitForServerReady(proc: ChildProcess, port: number): Promise { + return new Promise((resolve, reject) => { + let settled = false + const timeout = setTimeout(() => { + settled = true + reject(new Error('Server start timeout')) + }, 15000) + const settle = (fn: () => void) => { if (!settled) { settled = true; clearTimeout(timeout); fn() } } + proc.on('error', (e) => settle(() => reject(e))) + proc.on('exit', (code) => { + if (code !== 0) settle(() => reject(new Error(`Server exited with code ${code}`))) + }) + // printXFTPConfig prints "Listening on port" BEFORE bind, so poll-connect + const poll = () => { + if (settled) return + const sock = createConnection({port, host: 'localhost'}, () => { + sock.destroy() + settle(() => resolve()) + }) + sock.on('error', () => { + sock.destroy() + setTimeout(poll, 100) + }) + } + setTimeout(poll, 200) + }) +} + +function waitForPort(port: number): Promise { + return new Promise((resolve, reject) => { + const deadline = Date.now() + 15000 + const poll = () => { + if (Date.now() > deadline) return reject(new Error('Timed out waiting for server')) + const sock = createConnection({port, host: 'localhost'}, () => { + sock.destroy() + resolve() + }) + sock.on('error', () => { + sock.destroy() + setTimeout(poll, 100) + }) + } + poll() + }) +} + +export default setup diff --git a/xftp-web/test/globalTeardown.ts b/xftp-web/test/globalTeardown.ts new file mode 100644 index 0000000000..9fc3f41a9b --- /dev/null +++ b/xftp-web/test/globalTeardown.ts @@ -0,0 +1,3 @@ +import {teardown as teardownFn} from './globalSetup' + +export default teardownFn diff --git a/xftp-web/test/page.spec.ts b/xftp-web/test/page.spec.ts new file mode 100644 index 0000000000..95d0cc3760 --- /dev/null +++ b/xftp-web/test/page.spec.ts @@ -0,0 +1,378 @@ +import {test, expect, createTestContent, createTextContent} from './fixtures' +import {UploadPage} from './pages/upload-page' +import {DownloadPage} from './pages/download-page' +import {readFileSync, statSync} from 'fs' +import {createHash} from 'crypto' + +// ───────────────────────────────────────────────────────────────────────────── +// Upload Flow Tests +// ───────────────────────────────────────────────────────────────────────────── + +test.describe('Upload Flow', () => { + test('upload via file picker button', async ({uploadPage}) => { + await uploadPage.expectDropZoneVisible() + + await uploadPage.selectTextFile('picker-test.txt', 'test content ' + Date.now()) + await uploadPage.waitForEncrypting() + await uploadPage.waitForUploading() + + const link = await uploadPage.waitForShareLink() + expect(link).toMatch(/^http:\/\/localhost:\d+\/?#/) + }) + + test('upload via file input (drag-drop code path)', async ({uploadPage}) => { + // Tests file handling logic - the drop handler uses the same input processing + await uploadPage.dragDropFile('dragdrop-test.txt', createTextContent('drag drop test')) + await uploadPage.expectProgressVisible() + + const link = await uploadPage.waitForShareLink() + expect(link).toContain('#') + }) + + test('upload rejects file over 100MB', async ({uploadPage}) => { + await uploadPage.selectLargeFile('large.bin', 100 * 1024 * 1024 + 1) + await uploadPage.expectError('too large') + await uploadPage.expectError('100 MB') + }) + + test('upload rejects empty file', async ({uploadPage}) => { + await uploadPage.selectFile('empty.txt', Buffer.alloc(0)) + await uploadPage.expectError('empty') + }) + + test('upload shows progress during encryption and upload', async ({uploadPage}) => { + await uploadPage.selectFile('progress-test.bin', createTestContent(500 * 1024)) + + await uploadPage.expectProgressVisible() + await uploadPage.waitForEncrypting() + await uploadPage.waitForUploading() + await uploadPage.waitForShareLink() + }) + + test('cancel button aborts upload and returns to landing', async ({uploadPage}) => { + await uploadPage.selectFile('cancel-test.bin', createTestContent(1024 * 1024)) + await uploadPage.expectProgressVisible() + + await uploadPage.clickCancel() + + await uploadPage.expectDropZoneVisible() + await uploadPage.expectShareLinkNotVisible() + }) + + test('share link copy button works', async ({uploadPage, context}) => { + await context.grantPermissions(['clipboard-read', 'clipboard-write']) + + await uploadPage.selectTextFile('copy-test.txt', 'copy test content') + const link = await uploadPage.waitForShareLink() + + await uploadPage.clickCopy() + + // Verify clipboard (may fail in headless) + try { + const clipboardText = await uploadPage.page.evaluate(() => navigator.clipboard.readText()) + expect(clipboardText).toBe(link) + } catch { + // Clipboard API may not be available in headless mode + } + }) + + test('error state shows retry button', async ({uploadPage}) => { + await uploadPage.selectFile('error-test.txt', Buffer.alloc(0)) + await uploadPage.expectError('empty') + await uploadPage.expectRetryButtonVisible() + }) + + test('upload complete shows expiry and security note', async ({uploadPage}) => { + await uploadPage.selectTextFile('ui-test.txt', 'ui test') + await uploadPage.waitForShareLink() + + await uploadPage.expectCompleteWithExpiry() + await uploadPage.expectSecurityNote() + }) +}) + +// ───────────────────────────────────────────────────────────────────────────── +// Download Flow Tests +// ───────────────────────────────────────────────────────────────────────────── + +test.describe('Download Flow', () => { + test('download shows error for malformed hash', async ({downloadPage}) => { + await downloadPage.goto('#not-valid-base64!!!') + await downloadPage.expectInitialError(/[Ii]nvalid|corrupted/) + await downloadPage.expectDownloadButtonNotVisible() + }) + + test('download shows error for invalid structure', async ({downloadPage}) => { + await downloadPage.goto('#AAAA') + await downloadPage.expectInitialError(/[Ii]nvalid|corrupted/) + }) + + test('download button initiates download', async ({uploadPage, downloadPage}) => { + await uploadPage.selectTextFile('dl-btn-test.txt', 'download test content') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + await downloadPage.expectFileReady() + + const download = await downloadPage.clickDownload() + expect(download.suggestedFilename()).toBe('dl-btn-test.txt') + }) + + test('download shows progress', async ({uploadPage, downloadPage}) => { + // Use larger file to ensure progress is observable + await uploadPage.selectFile('dl-progress.bin', createTestContent(1024 * 1024)) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + + // Set up download listener before clicking + const downloadPromise = downloadPage.page.waitForEvent('download') + + // Click starts download - progress should be visible while downloading + await downloadPage.startDownload() + await downloadPage.expectProgressVisible() + + await downloadPromise + }) + + test('downloaded file content matches upload', async ({uploadPage, downloadPage}) => { + const content = 'verification content ' + Date.now() + const fileName = 'verify.txt' + + await uploadPage.selectTextFile(fileName, content) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe(fileName) + + const path = await download.path() + if (path) { + const downloadedContent = readFileSync(path, 'utf-8') + expect(downloadedContent).toBe(content) + } + }) + + test('download page shows file size and security note', async ({uploadPage, downloadPage}) => { + await uploadPage.selectFile('size-test.bin', createTestContent(1024)) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + await downloadPage.expectFileSizeDisplayed() + await downloadPage.expectSecurityNote() + }) +}) + +// ───────────────────────────────────────────────────────────────────────────── +// Edge Cases +// ───────────────────────────────────────────────────────────────────────────── + +test.describe('Edge Cases', () => { + test('upload and download 1-byte file', async ({uploadPage, downloadPage}) => { + await uploadPage.selectFile('tiny.bin', Buffer.from([0x42])) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe('tiny.bin') + + const path = await download.path() + if (path) { + const content = readFileSync(path) + expect(content.length).toBe(1) + expect(content[0]).toBe(0x42) + } + }) + + test('upload and download file with unicode filename', async ({uploadPage, downloadPage}) => { + const fileName = 'test-\u4e2d\u6587-\u0420\u0443\u0441\u0441\u043a\u0438\u0439.txt' + + await uploadPage.selectTextFile(fileName, 'unicode filename test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + // Browser download attribute uses encodeURIComponent for non-ASCII filenames + expect(download.suggestedFilename()).toBe(encodeURIComponent(fileName)) + }) + + test('upload and download file with spaces', async ({uploadPage, downloadPage}) => { + const fileName = 'my document (final) v2.txt' + + await uploadPage.selectTextFile(fileName, 'spaces test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + // Browser download attribute uses encodeURIComponent for the filename + expect(download.suggestedFilename()).toBe(encodeURIComponent(fileName)) + }) + + test('filename with path separators is sanitized', async ({uploadPage, downloadPage}) => { + await uploadPage.selectTextFile('../../../etc/passwd', 'path traversal test') + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).not.toContain('/') + expect(download.suggestedFilename()).not.toContain('\\') + }) + + test('binary file with all byte values', async ({uploadPage, downloadPage}) => { + // Create buffer with all 256 byte values + const buffer = Buffer.alloc(256) + for (let i = 0; i < 256; i++) buffer[i] = i + + await uploadPage.selectFile('all-bytes.bin', buffer) + const link = await uploadPage.waitForShareLink() + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + const path = await download.path() + if (path) { + const content = readFileSync(path) + expect(content.length).toBe(256) + for (let i = 0; i < 256; i++) { + expect(content[i]).toBe(i) + } + } + }) + + test('upload handles network error gracefully', async ({uploadPage}) => { + // Intercept and abort server requests after encryption starts + await uploadPage.page.route('**/*', route => { + const url = route.request().url() + // Only abort XFTP server requests (HTTPS), not the web page (HTTP) + if (url.startsWith('https://') && route.request().method() !== 'GET') { + route.abort('failed') + } else { + route.continue() + } + }) + + await uploadPage.selectTextFile('network-error.txt', 'network error test') + await uploadPage.expectError(/.+/) // Any error message + }) + + test('concurrent downloads from same link', async ({browser}) => { + const context = await browser.newContext({ignoreHTTPSErrors: true}) + const page1 = await context.newPage() + const upload = new UploadPage(page1) + + await upload.goto() + await upload.selectTextFile('concurrent.txt', 'concurrent download test') + const link = await upload.waitForShareLink() + const hash = upload.getHashFromLink(link) + + // Open two tabs and download concurrently (shared HTTP/2 connection) + const page2 = await context.newPage() + const page3 = await context.newPage() + const dl2 = new DownloadPage(page2) + const dl3 = new DownloadPage(page3) + + await dl2.goto(hash) + await dl3.goto(hash) + + const [download2, download3] = await Promise.all([ + dl2.clickDownload(), + dl3.clickDownload() + ]) + + expect(download2.suggestedFilename()).toBe('concurrent.txt') + expect(download3.suggestedFilename()).toBe('concurrent.txt') + + await context.close() + }) +}) + +// ───────────────────────────────────────────────────────────────────────────── +// Slow Tests (Large Files) +// ───────────────────────────────────────────────────────────────────────────── + +test.describe('Slow Tests', () => { + test('upload file at exactly 100MB', async ({uploadPage}) => { + test.slow() + await uploadPage.selectLargeFile('exactly-100mb.bin', 100 * 1024 * 1024) + + // Should succeed (not show error) + await uploadPage.expectNoError() + await uploadPage.expectProgressVisible() + + // Wait for completion (may take a while) + await uploadPage.waitForShareLink(300_000) + }) + + test('upload and download multi-chunk file with redirect', async ({uploadPage, downloadPage}) => { + test.slow() + // Use ~5MB file to get multiple chunks + await uploadPage.selectLargeFile('multi-chunk.bin', 5 * 1024 * 1024) + const link = await uploadPage.waitForShareLink(120_000) + + await downloadPage.gotoWithLink(link) + const download = await downloadPage.clickDownload() + + expect(download.suggestedFilename()).toBe('multi-chunk.bin') + + const path = await download.path() + if (path) { + const stat = statSync(path) + expect(stat.size).toBe(5 * 1024 * 1024) + } + }) + + // Disabled by default — run with: npx playwright test -g "100MB" + test.skip('upload and download 100MB random file', async ({browser}) => { + test.setTimeout(600_000) // 10 minutes + + const SIZE = 100 * 1024 * 1024 + + const context = await browser.newContext({ignoreHTTPSErrors: true}) + const page = await context.newPage() + const upload = new UploadPage(page) + await upload.goto() + + // Generate random content in browser and compute SHA-256 + const uploadHash = await page.evaluate(async (size: number) => { + const buffer = new ArrayBuffer(size) + const view = new Uint8Array(buffer) + for (let off = 0; off < size; off += 65536) { + crypto.getRandomValues(view.subarray(off, Math.min(off + 65536, size))) + } + const hashBuf = await crypto.subtle.digest('SHA-256', buffer) + const hash = Array.from(new Uint8Array(hashBuf)).map(b => b.toString(16).padStart(2, '0')).join('') + const input = document.getElementById('file-input') as HTMLInputElement + const file = new File([buffer], 'large-100mb.bin', {type: 'application/octet-stream'}) + const dt = new DataTransfer() + dt.items.add(file) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + return hash + }, SIZE) + + const link = await upload.waitForShareLink(300_000) + + const dlPage = await context.newPage() + const dl = new DownloadPage(dlPage) + await dl.gotoWithLink(link) + const download = await dl.clickDownload() + + expect(download.suggestedFilename()).toBe('large-100mb.bin') + + const path = await download.path() + expect(path).toBeTruthy() + + const stat = statSync(path!) + expect(stat.size).toBe(SIZE) + + const downloadHash = createHash('sha256').update(readFileSync(path!)).digest('hex') + expect(downloadHash).toBe(uploadHash) + + await context.close() + }) +}) diff --git a/xftp-web/test/pages/download-page.ts b/xftp-web/test/pages/download-page.ts new file mode 100644 index 0000000000..af4abf9522 --- /dev/null +++ b/xftp-web/test/pages/download-page.ts @@ -0,0 +1,89 @@ +import {Page, Locator, expect, Download} from '@playwright/test' + +export class DownloadPage { + readonly page: Page + readonly readyStage: Locator + readonly downloadButton: Locator + readonly progressStage: Locator + readonly progressCanvas: Locator + readonly statusText: Locator + readonly errorStage: Locator + readonly errorMessage: Locator + readonly retryButton: Locator + readonly securityNote: Locator + + constructor(page: Page) { + this.page = page + this.readyStage = page.locator('#dl-ready') + this.downloadButton = page.locator('#dl-btn') + this.progressStage = page.locator('#dl-progress') + this.progressCanvas = page.locator('#dl-progress-container canvas') + this.statusText = page.locator('#dl-status') + this.errorStage = page.locator('#dl-error') + this.errorMessage = page.locator('#dl-error-msg') + this.retryButton = page.locator('#dl-retry-btn') + this.securityNote = page.locator('.security-note') + } + + async goto(hash: string) { + await this.page.goto(`http://localhost:4173${hash}`) + } + + async gotoWithLink(fullUrl: string) { + const hash = new URL(fullUrl).hash + await this.goto(hash) + } + + async expectFileReady() { + await expect(this.readyStage).toBeVisible() + await expect(this.downloadButton).toBeVisible() + } + + async expectFileSizeDisplayed() { + await expect(this.readyStage).toContainText(/\d+(?:\.\d+)?\s*(?:KB|MB|B)/) + } + + async clickDownload(): Promise { + const downloadPromise = this.page.waitForEvent('download') + await this.downloadButton.click() + return downloadPromise + } + + async startDownload() { + await this.downloadButton.click() + } + + async waitForDownloading(timeout = 30_000) { + await expect(this.statusText).toContainText('Downloading', {timeout}) + } + + async waitForDecrypting(timeout = 30_000) { + await expect(this.statusText).toContainText('Decrypting', {timeout}) + } + + async expectProgressVisible() { + await expect(this.progressStage).toBeVisible() + await expect(this.progressCanvas).toBeVisible() + } + + async expectInitialError(messagePattern: string | RegExp) { + // For malformed links - error shown in card without #dl-error stage + await expect(this.page.locator('.card .error')).toBeVisible() + await expect(this.page.locator('.card .error')).toContainText(messagePattern) + } + + async expectRuntimeError(messagePattern: string | RegExp) { + // For runtime download errors - uses #dl-error stage + await expect(this.errorStage).toBeVisible() + await expect(this.errorMessage).toContainText(messagePattern) + } + + async expectSecurityNote() { + await expect(this.securityNote).toBeVisible() + await expect(this.securityNote).toContainText('encrypted') + } + + async expectDownloadButtonNotVisible() { + await expect(this.downloadButton).not.toBeVisible() + } +} diff --git a/xftp-web/test/pages/index.ts b/xftp-web/test/pages/index.ts new file mode 100644 index 0000000000..513ce4c673 --- /dev/null +++ b/xftp-web/test/pages/index.ts @@ -0,0 +1,2 @@ +export {UploadPage} from './upload-page' +export {DownloadPage} from './download-page' diff --git a/xftp-web/test/pages/upload-page.ts b/xftp-web/test/pages/upload-page.ts new file mode 100644 index 0000000000..3eac122b66 --- /dev/null +++ b/xftp-web/test/pages/upload-page.ts @@ -0,0 +1,136 @@ +import {Page, Locator, expect} from '@playwright/test' + +export class UploadPage { + readonly page: Page + readonly dropZone: Locator + readonly fileInput: Locator + readonly progressStage: Locator + readonly progressCanvas: Locator + readonly statusText: Locator + readonly cancelButton: Locator + readonly completeStage: Locator + readonly shareLink: Locator + readonly copyButton: Locator + readonly errorStage: Locator + readonly errorMessage: Locator + readonly retryButton: Locator + readonly expiryNote: Locator + readonly securityNote: Locator + + constructor(page: Page) { + this.page = page + this.dropZone = page.locator('#drop-zone') + this.fileInput = page.locator('#file-input') + this.progressStage = page.locator('#upload-progress') + this.progressCanvas = page.locator('#progress-container canvas') + this.statusText = page.locator('#upload-status') + this.cancelButton = page.locator('#cancel-btn') + this.completeStage = page.locator('#upload-complete') + this.shareLink = page.locator('[data-testid="share-link"]') + this.copyButton = page.locator('#copy-btn') + this.errorStage = page.locator('#upload-error') + this.errorMessage = page.locator('#error-msg') + this.retryButton = page.locator('#retry-btn') + this.expiryNote = page.locator('.expiry') + this.securityNote = page.locator('.security-note') + } + + async goto() { + await this.page.goto('http://localhost:4173') + } + + async selectFile(name: string, content: Buffer, mimeType = 'application/octet-stream') { + await this.fileInput.setInputFiles({name, mimeType, buffer: content}) + } + + async selectTextFile(name: string, content: string) { + await this.selectFile(name, Buffer.from(content, 'utf-8'), 'text/plain') + } + + async selectLargeFile(name: string, sizeBytes: number) { + // Create large file in browser to avoid memory issues in test process + await this.page.evaluate(({name, size}) => { + const input = document.getElementById('file-input') as HTMLInputElement + const buffer = new ArrayBuffer(size) + new Uint8Array(buffer).fill(0x55) + const file = new File([buffer], name, {type: 'application/octet-stream'}) + const dt = new DataTransfer() + dt.items.add(file) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + }, {name, size: sizeBytes}) + } + + async dragDropFile(name: string, content: Buffer) { + // Note: True drag-drop simulation is complex in Playwright. The app's drop handler + // dispatches a 'change' event on the file input, so setting input files triggers + // the same code path. This tests the file handling logic, not the DnD UI events. + await this.selectFile(name, content) + } + + async waitForEncrypting(timeout = 10_000) { + await expect(this.statusText).toContainText('Encrypting', {timeout}) + } + + async waitForUploading(timeout = 30_000) { + await expect(this.statusText).toContainText('Uploading', {timeout}) + } + + async waitForShareLink(timeout = 60_000): Promise { + await expect(this.shareLink).toBeVisible({timeout}) + return await this.shareLink.inputValue() + } + + async clickCopy() { + await this.copyButton.click() + await expect(this.copyButton).toContainText('Copied!') + } + + async clickCancel() { + await this.cancelButton.click() + } + + async clickRetry() { + await this.retryButton.click() + } + + async expectError(messagePattern: string | RegExp) { + await expect(this.errorStage).toBeVisible() + await expect(this.errorMessage).toContainText(messagePattern) + } + + async expectDropZoneVisible() { + await expect(this.dropZone).toBeVisible() + } + + async expectProgressVisible() { + await expect(this.progressStage).toBeVisible() + await expect(this.progressCanvas).toBeVisible() + } + + async expectCompleteWithExpiry() { + await expect(this.completeStage).toBeVisible() + await expect(this.expiryNote).toContainText('48 hours') + } + + async expectSecurityNote() { + await expect(this.securityNote).toBeVisible() + await expect(this.securityNote).toContainText('encrypted') + } + + async expectRetryButtonVisible() { + await expect(this.retryButton).toBeVisible() + } + + async expectShareLinkNotVisible() { + await expect(this.shareLink).not.toBeVisible() + } + + async expectNoError(timeout = 5000) { + await expect(this.errorStage).not.toBeVisible({timeout}) + } + + getHashFromLink(url: string): string { + return new URL(url).hash + } +} diff --git a/xftp-web/test/runSetup.ts b/xftp-web/test/runSetup.ts new file mode 100644 index 0000000000..6acfad5713 --- /dev/null +++ b/xftp-web/test/runSetup.ts @@ -0,0 +1,5 @@ +// Helper script to run globalSetup synchronously before vite build +import setup from './globalSetup.js' + +await setup() +console.log('[runSetup] Setup complete') diff --git a/xftp-web/tsconfig.json b/xftp-web/tsconfig.json new file mode 100644 index 0000000000..e42e74895d --- /dev/null +++ b/xftp-web/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "node", + "lib": ["ES2022"], + "outDir": "dist", + "rootDir": "src", + "declaration": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "sourceMap": true + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist", "test"] +} diff --git a/xftp-web/tsconfig.web.json b/xftp-web/tsconfig.web.json new file mode 100644 index 0000000000..476d40b5e5 --- /dev/null +++ b/xftp-web/tsconfig.web.json @@ -0,0 +1,12 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "DOM"] + }, + "include": ["web/**/*.ts", "src/**/*.ts"], + "exclude": ["web/crypto.worker.ts"] +} diff --git a/xftp-web/tsconfig.worker.json b/xftp-web/tsconfig.worker.json new file mode 100644 index 0000000000..0335541dc9 --- /dev/null +++ b/xftp-web/tsconfig.worker.json @@ -0,0 +1,11 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "WebWorker"] + }, + "include": ["web/crypto.worker.ts", "src/**/*.ts"] +} diff --git a/xftp-web/vite.config.ts b/xftp-web/vite.config.ts new file mode 100644 index 0000000000..539ff18c1b --- /dev/null +++ b/xftp-web/vite.config.ts @@ -0,0 +1,103 @@ +import {defineConfig, type Plugin} from 'vite' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import {resolve, join} from 'path' +import {tmpdir} from 'os' +import presets from './web/servers.json' + +const PORT_FILE = join(tmpdir(), 'xftp-test-server.port') +const FIXTURES = resolve(import.meta.dirname, '../tests/fixtures') + +const __dirname = import.meta.dirname + +function parseHost(addr: string): string { + const m = addr.match(/@(.+)$/) + if (!m) throw new Error('bad server address: ' + addr) + const host = m[1].split(',')[0] + return host.includes(':') ? host : host + ':443' +} + +function cspPlugin(servers: string[], isDev: boolean): Plugin { + const origins = servers.map(s => 'https://' + parseHost(s)).join(' ') + return { + name: 'csp-connect-src', + transformIndexHtml: { + order: 'pre', + handler(html) { + if (isDev) { + return html.replace(/]*?Content-Security-Policy[\s\S]*?>/i, '') + } + return html.replace('__CSP_CONNECT_SRC__', origins) + } + } + } +} + +// Compute fingerprint from ca.crt (SHA-256 of DER) +function getFingerprint(): string { + const pem = readFileSync(join(FIXTURES, 'ca.crt'), 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + return createHash('sha256').update(der).digest('base64') + .replace(/\+/g, '-').replace(/\//g, '_') +} + +// Plugin to inject __XFTP_SERVERS__ lazily (reads PORT_FILE written by test/runSetup.ts) +function xftpServersPlugin(): Plugin { + let serverAddr: string | null = null + const fp = getFingerprint() + return { + name: 'xftp-servers-define', + transform(code, _id) { + if (!code.includes('__XFTP_SERVERS__')) return null + if (!serverAddr) { + const port = readFileSync(PORT_FILE, 'utf-8').trim() + serverAddr = `xftp://${fp}@localhost:${port}` + } + return code.replace(/__XFTP_SERVERS__/g, JSON.stringify([serverAddr])) + } + } +} + +export default defineConfig(({mode}) => { + const define: Record = {} + let servers: string[] + const plugins: Plugin[] = [] + + if (mode === 'development') { + // In development mode, use the test server (port from globalSetup) + plugins.push(xftpServersPlugin()) + define['__XFTP_PROXY_PORT__'] = JSON.stringify(null) + // For CSP plugin, use localhost placeholder (CSP stripped in dev server anyway) + servers = ['xftp://fp@localhost:443'] + } else { + // In production mode, use the preset servers + servers = [...presets.simplex, ...presets.flux] + define['__XFTP_SERVERS__'] = JSON.stringify(servers) + define['__XFTP_PROXY_PORT__'] = JSON.stringify(null) + } + + plugins.push(cspPlugin(servers, mode === 'development')) + + const httpsConfig = mode === 'development' ? { + key: readFileSync(join(FIXTURES, 'web.key')), + cert: readFileSync(join(FIXTURES, 'web.crt')), + } : undefined + + return { + root: 'web', + build: { + outDir: resolve(__dirname, 'dist-web'), + emptyOutDir: true, + target: 'esnext', + chunkSizeWarningLimit: 1200, + rollupOptions: { + external: ['node:http2', 'url'], + }, + }, + server: httpsConfig ? {https: httpsConfig} : {}, + preview: {host: true, https: false}, + define, + worker: {format: 'es' as const, rollupOptions: {external: ['node:http2', 'url']}}, + plugins, + } +}) diff --git a/xftp-web/vitest.config.ts b/xftp-web/vitest.config.ts new file mode 100644 index 0000000000..8f7b71327a --- /dev/null +++ b/xftp-web/vitest.config.ts @@ -0,0 +1,42 @@ +import {defineConfig, type Plugin} from 'vitest/config' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import {PORT_FILE} from './test/globalSetup' + +// Compute fingerprint from ca.crt (SHA-256 of DER, same as Haskell's loadFileFingerprint) +const pem = readFileSync('../tests/fixtures/ca.crt', 'utf-8') +const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') +const fingerprint = createHash('sha256').update(der).digest('base64').replace(/\+/g, '-').replace(/\//g, '_') + +// Plugin to inject XFTP_SERVER at transform time (after globalSetup writes PORT_FILE) +function xftpServerPlugin(): Plugin { + let serverAddr: string | null = null + return { + name: 'xftp-server-define', + transform(code, id) { + if (!code.includes('import.meta.env.XFTP_SERVER')) return null + if (!serverAddr) { + const port = readFileSync(PORT_FILE, 'utf-8').trim() + serverAddr = `xftp://${fingerprint}@localhost:${port}` + } + return code.replace(/import\.meta\.env\.XFTP_SERVER/g, JSON.stringify(serverAddr)) + } + } +} + +export default defineConfig({ + esbuild: {target: 'esnext'}, + optimizeDeps: {esbuildOptions: {target: 'esnext'}}, + plugins: [xftpServerPlugin()], + test: { + include: ['test/**/*.test.ts'], + exclude: ['test/**/*.node.test.ts'], + browser: { + enabled: true, + provider: 'playwright', + instances: [{browser: 'chromium'}], + headless: true + }, + globalSetup: './test/globalSetup.ts' + } +}) diff --git a/xftp-web/vitest.node.config.ts b/xftp-web/vitest.node.config.ts new file mode 100644 index 0000000000..bb90990dd5 --- /dev/null +++ b/xftp-web/vitest.node.config.ts @@ -0,0 +1,9 @@ +import {defineConfig} from 'vitest/config' + +export default defineConfig({ + esbuild: {target: 'esnext'}, + test: { + include: ['test/**/*.node.test.ts'], + testTimeout: 30000 + } +}) diff --git a/xftp-web/web/crypto-backend.ts b/xftp-web/web/crypto-backend.ts new file mode 100644 index 0000000000..e1ed52b846 --- /dev/null +++ b/xftp-web/web/crypto-backend.ts @@ -0,0 +1,140 @@ +import type {FileHeader} from '../src/crypto/file.js' + +export interface CryptoBackend { + encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + readChunk(offset: number, size: number): Promise + decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise + verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> + cleanup(): Promise +} + +export interface EncryptResult { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} + +type PendingRequest = {resolve: (value: any) => void, reject: (reason: any) => void} + +class WorkerBackend implements CryptoBackend { + private worker: Worker + private pending = new Map() + private nextId = 1 + private progressCb: ((done: number, total: number) => void) | null = null + private ready: Promise + + constructor() { + this.worker = new Worker(new URL('./crypto.worker.ts', import.meta.url), {type: 'module'}) + let rejectReady: (e: Error) => void + this.ready = new Promise((resolve, reject) => { + rejectReady = reject + this.worker.onmessage = (e) => { + if (e.data?.type === 'ready') { + this.worker.onmessage = (e) => this.handleMessage(e.data) + resolve() + } else { + reject(new Error('Worker: unexpected first message')) + } + } + }) + this.worker.onerror = (e) => { + rejectReady(new Error('Worker failed to load: ' + e.message)) + for (const p of this.pending.values()) p.reject(new Error('Worker error: ' + e.message)) + this.pending.clear() + } + } + + private handleMessage(msg: {id: number, type: string, [k: string]: any}) { + if (msg.type === 'progress') { + this.progressCb?.(msg.done, msg.total) + return + } + const p = this.pending.get(msg.id) + if (!p) return + this.pending.delete(msg.id) + if (msg.type === 'error') { + p.reject(new Error(msg.message)) + } else { + p.resolve(msg) + } + } + + private async send(msg: Record, transfer?: Transferable[]): Promise { + await this.ready + const id = this.nextId++ + return new Promise((resolve, reject) => { + this.pending.set(id, {resolve, reject}) + this.worker.postMessage({...msg, id}, transfer ?? []) + }) + } + + private toTransferable(data: Uint8Array): ArrayBuffer { + if (data.byteOffset !== 0 || data.byteLength !== data.buffer.byteLength) { + return data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength) as ArrayBuffer + } + return data.buffer as ArrayBuffer + } + + async encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void): Promise { + this.progressCb = onProgress ?? null + const buf = this.toTransferable(data) + const resp = await this.send({type: 'encrypt', data: buf, fileName}, [buf]) + this.progressCb = null + return {digest: resp.digest, key: resp.key, nonce: resp.nonce, chunkSizes: resp.chunkSizes} + } + + async readChunk(offset: number, size: number): Promise { + const resp = await this.send({type: 'readChunk', offset, size}) + return new Uint8Array(resp.data) + } + + async decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise { + // Copy arrays to ensure clean ArrayBuffer separation before worker transfer + // nonce/dhSecret may be subarrays sharing buffer with body + const dhSecretCopy = new Uint8Array(dhSecret) + const nonceCopy = new Uint8Array(nonce) + const digestCopy = new Uint8Array(digest) + const buf = this.toTransferable(body) + const hex = (b: Uint8Array | ArrayBuffer, n = 8) => { + const u = b instanceof ArrayBuffer ? new Uint8Array(b) : b + return Array.from(u.slice(0, n)).map(x => x.toString(16).padStart(2, '0')).join('') + } + console.log(`[BACKEND-DBG] chunk=${chunkNo} body.len=${body.length} body.byteOff=${body.byteOffset} buf.byteLen=${buf.byteLength} nonce=${hex(nonceCopy, 24)} dhSecret=${hex(dhSecretCopy)} digest=${hex(digestCopy, 32)} buf[0..8]=${hex(buf)} body[-8..]=${hex(body.slice(-8))}`) + await this.send( + {type: 'decryptAndStoreChunk', dhSecret: dhSecretCopy, nonce: nonceCopy, body: buf, chunkDigest: digestCopy, chunkNo}, + [buf] + ) + } + + async verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> { + const resp = await this.send({ + type: 'verifyAndDecrypt', + size: params.size, digest: params.digest, key: params.key, nonce: params.nonce + }) + return {header: resp.header, content: new Uint8Array(resp.content)} + } + + async cleanup(): Promise { + await this.send({type: 'cleanup'}) + this.worker.terminate() + } +} + +export function createCryptoBackend(): CryptoBackend { + if (typeof Worker === 'undefined') { + throw new Error('Web Workers required — update your browser') + } + return new WorkerBackend() +} diff --git a/xftp-web/web/crypto.worker.ts b/xftp-web/web/crypto.worker.ts new file mode 100644 index 0000000000..3ed8cd0160 --- /dev/null +++ b/xftp-web/web/crypto.worker.ts @@ -0,0 +1,322 @@ +import sodium from 'libsodium-wrappers-sumo' +import {encryptFile, encodeFileHeader, decryptChunks} from '../src/crypto/file.js' +import {sha512Streaming} from '../src/crypto/digest.js' +import {prepareChunkSizes, fileSizeLen, authTagSize} from '../src/protocol/chunks.js' +import {decryptReceivedChunk} from '../src/download.js' + +// ── OPFS session management ───────────────────────────────────── + +const SESSION_DIR = `session-${Date.now()}-${crypto.randomUUID()}` +let uploadReadHandle: FileSystemSyncAccessHandle | null = null +let downloadWriteHandle: FileSystemSyncAccessHandle | null = null +const chunkMeta = new Map() +let currentDownloadOffset = 0 +let sessionDir: FileSystemDirectoryHandle | null = null +let useMemory = false +const memoryChunks = new Map() + +async function getSessionDir(): Promise { + if (!sessionDir) { + const root = await navigator.storage.getDirectory() + sessionDir = await root.getDirectoryHandle(SESSION_DIR, {create: true}) + } + return sessionDir +} + +async function sweepStale() { + const root = await navigator.storage.getDirectory() + const oneHourAgo = Date.now() - 3600_000 + for await (const [name] of (root as any).entries()) { + if (!name.startsWith('session-')) continue + const parts = name.split('-') + const ts = parseInt(parts[1], 10) + if (!isNaN(ts) && ts < oneHourAgo) { + try { await root.removeEntry(name, {recursive: true}) } catch (_) {} + } + } +} + +// ── Message handlers ──────────────────────────────────────────── + +async function handleEncrypt(id: number, data: ArrayBuffer, fileName: string) { + const source = new Uint8Array(data) + const key = new Uint8Array(32) + const nonce = new Uint8Array(24) + crypto.getRandomValues(key) + crypto.getRandomValues(nonce) + const fileHdr = encodeFileHeader({fileName, fileExtra: null}) + const fileSize = BigInt(fileHdr.length + source.length) + const payloadSize = Number(fileSize) + fileSizeLen + authTagSize + const chunkSizes = prepareChunkSizes(payloadSize) + const encSize = BigInt(chunkSizes.reduce((a: number, b: number) => a + b, 0)) + const encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize) + + self.postMessage({id, type: 'progress', done: 50, total: 100}) + + const digest = sha512Streaming([encData]) + console.log(`[WORKER-DBG] encrypt: encData.len=${encData.length} digest=${_whex(digest, 64)} chunkSizes=[${chunkSizes.join(',')}]`) + + self.postMessage({id, type: 'progress', done: 80, total: 100}) + + // Write to OPFS + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('upload.bin', {create: true}) + const writeHandle = await fileHandle.createSyncAccessHandle() + const written = writeHandle.write(encData) + if (written !== encData.length) throw new Error(`OPFS upload write: ${written}/${encData.length}`) + writeHandle.flush() + writeHandle.close() + + // Reopen as persistent read handle + uploadReadHandle = await fileHandle.createSyncAccessHandle() + + self.postMessage({id, type: 'progress', done: 100, total: 100}) + self.postMessage({id, type: 'encrypted', digest, key, nonce, chunkSizes}) +} + +function handleReadChunk(id: number, offset: number, size: number) { + if (!uploadReadHandle) { + self.postMessage({id, type: 'error', message: 'No upload file open'}) + return + } + const buf = new Uint8Array(size) + uploadReadHandle.read(buf, {at: offset}) + const ab = buf.buffer as ArrayBuffer + self.postMessage({id, type: 'chunk', data: ab}, [ab]) +} + +async function handleDecryptAndStore( + id: number, dhSecret: Uint8Array, nonce: Uint8Array, + body: ArrayBuffer, chunkDigest: Uint8Array, chunkNo: number +) { + const bodyArr = new Uint8Array(body) + console.log(`[WORKER-DBG] store chunk=${chunkNo} body.len=${bodyArr.length} nonce=${_whex(nonce, 24)} dhSecret=${_whex(dhSecret)} digest=${_whex(chunkDigest, 32)} body[0..8]=${_whex(bodyArr)} body[-8..]=${_whex(bodyArr.slice(-8))}`) + const decrypted = decryptReceivedChunk(dhSecret, nonce, bodyArr, chunkDigest) + console.log(`[WORKER-DBG] decrypted chunk=${chunkNo} len=${decrypted.length} [0..8]=${_whex(decrypted)} [-8..]=${_whex(decrypted.slice(-8))}`) + + if (useMemory) { + memoryChunks.set(chunkNo, decrypted) + self.postMessage({id, type: 'stored'}) + return + } + + if (!downloadWriteHandle) { + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('download.bin', {create: true}) + downloadWriteHandle = await fileHandle.createSyncAccessHandle() + } + + const offset = currentDownloadOffset + currentDownloadOffset += decrypted.length + chunkMeta.set(chunkNo, {offset, size: decrypted.length}) + const written = downloadWriteHandle.write(decrypted, {at: offset}) + console.log(`[WORKER-DBG] OPFS write chunk=${chunkNo} offset=${offset} size=${decrypted.length} written=${written}`) + + if (written !== decrypted.length) { + console.warn(`[WORKER] OPFS write failed chunk=${chunkNo}: ${written}/${decrypted.length}, falling back to in-memory storage`) + // Migrate previously written chunks from OPFS to memory + for (const [cn, meta] of chunkMeta.entries()) { + if (cn === chunkNo) continue + const buf = new Uint8Array(meta.size) + downloadWriteHandle.read(buf, {at: meta.offset}) + memoryChunks.set(cn, buf) + } + downloadWriteHandle.close() + downloadWriteHandle = null + try { + const dir = await getSessionDir() + await dir.removeEntry('download.bin') + } catch (_) {} + chunkMeta.clear() + currentDownloadOffset = 0 + memoryChunks.set(chunkNo, decrypted) + useMemory = true + self.postMessage({id, type: 'stored'}) + return + } + + downloadWriteHandle.flush() + + // Verify: read back and compare first/last 8 bytes + const verifyBuf = new Uint8Array(Math.min(8, decrypted.length)) + downloadWriteHandle.read(verifyBuf, {at: offset}) + const verifyEnd = new Uint8Array(Math.min(8, decrypted.length)) + downloadWriteHandle.read(verifyEnd, {at: offset + decrypted.length - verifyEnd.length}) + console.log(`[WORKER-DBG] OPFS verify chunk=${chunkNo} readBack[0..8]=${_whex(verifyBuf)} readBack[-8..]=${_whex(verifyEnd)} expected[0..8]=${_whex(decrypted)} expected[-8..]=${_whex(decrypted.slice(-8))}`) + + self.postMessage({id, type: 'stored'}) +} + +async function handleVerifyAndDecrypt( + id: number, size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array +) { + console.log(`[WORKER-DBG] verify: expectedSize=${size} expectedDigest=${_whex(digest, 64)} useMemory=${useMemory} chunkMeta.size=${chunkMeta.size} memoryChunks.size=${memoryChunks.size}`) + + // Read chunks — from memory (fallback) or OPFS + const chunks: Uint8Array[] = [] + let totalSize = 0 + if (useMemory) { + const sorted = [...memoryChunks.entries()].sort((a, b) => a[0] - b[0]) + for (const [chunkNo, data] of sorted) { + console.log(`[WORKER-DBG] verify memory chunk=${chunkNo} size=${data.length}`) + chunks.push(data) + totalSize += data.length + } + } else { + // Close write handle, reopen as read + if (downloadWriteHandle) { + downloadWriteHandle.flush() + downloadWriteHandle.close() + downloadWriteHandle = null + } + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('download.bin') + const readHandle = await fileHandle.createSyncAccessHandle() + console.log(`[WORKER-DBG] verify: OPFS file size=${readHandle.getSize()}`) + const sortedEntries = [...chunkMeta.entries()].sort((a, b) => a[0] - b[0]) + for (const [chunkNo, meta] of sortedEntries) { + const buf = new Uint8Array(meta.size) + const bytesRead = readHandle.read(buf, {at: meta.offset}) + console.log(`[WORKER-DBG] verify read chunk=${chunkNo} offset=${meta.offset} size=${meta.size} bytesRead=${bytesRead} [0..8]=${_whex(buf)} [-8..]=${_whex(buf.slice(-8))}`) + chunks.push(buf) + totalSize += meta.size + } + readHandle.close() + } + + if (totalSize !== size) { + self.postMessage({id, type: 'error', message: `File size mismatch: ${totalSize} !== ${size}`}) + return + } + + // Compute per-chunk SHA-512 incrementally to find divergence point + const state = sodium.crypto_hash_sha512_init() as unknown as import('libsodium-wrappers').StateAddress + for (let i = 0; i < chunks.length; i++) { + const chunk = chunks[i] + const SEG = 4 * 1024 * 1024 + for (let off = 0; off < chunk.length; off += SEG) { + sodium.crypto_hash_sha512_update(state, chunk.subarray(off, Math.min(off + SEG, chunk.length))) + } + } + const actualDigest = sodium.crypto_hash_sha512_final(state) + if (!digestEqual(actualDigest, digest)) { + console.error(`[WORKER-DBG] DIGEST MISMATCH: expected=${_whex(digest, 64)} actual=${_whex(actualDigest, 64)} chunks=${chunks.length} totalSize=${totalSize}`) + // Log per-chunk incremental hash to find divergence + const state2 = sodium.crypto_hash_sha512_init() as unknown as import('libsodium-wrappers').StateAddress + for (let i = 0; i < chunks.length; i++) { + const chunk = chunks[i] + const SEG = 4 * 1024 * 1024 + for (let off = 0; off < chunk.length; off += SEG) { + sodium.crypto_hash_sha512_update(state2, chunk.subarray(off, Math.min(off + SEG, chunk.length))) + } + // snapshot incremental hash (create temp copy of state) + const chunkDigest = sha512Streaming([chunk]) + console.error(`[WORKER-DBG] chunk[${i}] size=${chunk.length} sha512=${_whex(chunkDigest, 32)}… [0..8]=${_whex(chunk)} [-8..]=${_whex(chunk.slice(-8))}`) + } + self.postMessage({id, type: 'error', message: 'File digest mismatch'}) + return + } + console.log(`[WORKER-DBG] verify: digest OK`) + + // File-level decrypt + const result = decryptChunks(BigInt(size), chunks, key, nonce) + + // Clean up download state + if (!useMemory) { + const dir = await getSessionDir() + try { await dir.removeEntry('download.bin') } catch (_) {} + } + chunkMeta.clear() + memoryChunks.clear() + currentDownloadOffset = 0 + useMemory = false + + const contentBuf = result.content.buffer.slice( + result.content.byteOffset, + result.content.byteOffset + result.content.byteLength + ) + self.postMessage( + {id, type: 'decrypted', header: result.header, content: contentBuf}, + [contentBuf] + ) +} + +async function handleCleanup(id: number) { + if (uploadReadHandle) { + uploadReadHandle.close() + uploadReadHandle = null + } + if (downloadWriteHandle) { + downloadWriteHandle.close() + downloadWriteHandle = null + } + chunkMeta.clear() + memoryChunks.clear() + currentDownloadOffset = 0 + useMemory = false + try { + const root = await navigator.storage.getDirectory() + await root.removeEntry(SESSION_DIR, {recursive: true}) + } catch (_) {} + sessionDir = null + self.postMessage({id, type: 'cleaned'}) +} + +// ── Message dispatch ──────────────────────────────────────────── + +// Serialize all message processing — async onmessage would allow +// interleaved execution at await points, racing on shared OPFS handles +// when downloadFileRaw fetches chunks from multiple servers in parallel. +let queue: Promise = Promise.resolve() +self.onmessage = (e: MessageEvent) => { + const msg = e.data + queue = queue.then(async () => { + try { + await initPromise + switch (msg.type) { + case 'encrypt': + await handleEncrypt(msg.id, msg.data, msg.fileName) + break + case 'readChunk': + handleReadChunk(msg.id, msg.offset, msg.size) + break + case 'decryptAndStoreChunk': + await handleDecryptAndStore(msg.id, msg.dhSecret, msg.nonce, msg.body, msg.chunkDigest, msg.chunkNo) + break + case 'verifyAndDecrypt': + await handleVerifyAndDecrypt(msg.id, msg.size, msg.digest, msg.key, msg.nonce) + break + case 'cleanup': + await handleCleanup(msg.id) + break + default: + self.postMessage({id: msg.id, type: 'error', message: `Unknown message type: ${msg.type}`}) + } + } catch (err: any) { + self.postMessage({id: msg.id, type: 'error', message: err?.message ?? String(err)}) + } + }) +} + +// ── Helpers ───────────────────────────────────────────────────── + +function _whex(b: Uint8Array, n = 8): string { + return Array.from(b.slice(0, n)).map(x => x.toString(16).padStart(2, '0')).join('') +} + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} + +// ── Init ──────────────────────────────────────────────────────── + +const initPromise = (async () => { + await sodium.ready + await sweepStale() +})() + +// Signal main thread that the worker is ready to receive messages +initPromise.then(() => self.postMessage({type: 'ready'}), () => {}) diff --git a/xftp-web/web/download.ts b/xftp-web/web/download.ts new file mode 100644 index 0000000000..25443cf35e --- /dev/null +++ b/xftp-web/web/download.ts @@ -0,0 +1,140 @@ +import {createCryptoBackend} from './crypto-backend.js' +import {createProgressRing} from './progress.js' +import { + newXFTPAgent, closeXFTPAgent, + decodeDescriptionURI, downloadFileRaw +} from '../src/agent.js' +import {XFTPPermanentError} from '../src/client.js' + +export function initDownload(app: HTMLElement, hash: string) { + let fd: ReturnType + try { + fd = decodeDescriptionURI(hash) + } catch (err: any) { + app.innerHTML = `

Invalid or corrupted link.

` + return + } + + const size = fd.redirect ? fd.redirect.size : fd.size + app.innerHTML = ` +
` + + const readyStage = document.getElementById('dl-ready')! + const progressStage = document.getElementById('dl-progress')! + const errorStage = document.getElementById('dl-error')! + const progressContainer = document.getElementById('dl-progress-container')! + const statusText = document.getElementById('dl-status')! + const dlBtn = document.getElementById('dl-btn')! + const errorMsg = document.getElementById('dl-error-msg')! + const retryBtn = document.getElementById('dl-retry-btn')! + + function showStage(stage: HTMLElement) { + for (const s of [readyStage, progressStage, errorStage]) s.hidden = true + stage.hidden = false + } + + function showError(msg: string) { + errorMsg.textContent = msg + showStage(errorStage) + } + + dlBtn.addEventListener('click', startDownload) + retryBtn.addEventListener('click', startDownload) + + async function startDownload() { + showStage(progressStage) + const ring = createProgressRing() + progressContainer.innerHTML = '' + progressContainer.appendChild(ring.canvas) + statusText.textContent = 'Downloading…' + + const backend = createCryptoBackend() + const agent = newXFTPAgent() + + try { + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + await backend.decryptAndStoreChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest, raw.chunkNo + ) + }, { + onProgress: (downloaded, total) => { + ring.update(downloaded / total * 0.8) + } + }) + + statusText.textContent = 'Decrypting…' + ring.update(0.85) + + const {header, content} = await backend.verifyAndDecrypt({ + size: resolvedFd.size, + digest: resolvedFd.digest, + key: resolvedFd.key, + nonce: resolvedFd.nonce + }) + + ring.update(0.95) + + // Sanitize filename and trigger browser save + const fileName = sanitizeFileName(header.fileName) + const blob = new Blob([content.buffer as ArrayBuffer]) + const url = URL.createObjectURL(blob) + const a = document.createElement('a') + a.href = url + a.download = encodeURIComponent(fileName) + a.style.display = 'none' + document.body.appendChild(a) + a.click() + document.body.removeChild(a) + setTimeout(() => URL.revokeObjectURL(url), 1000) + + ring.update(1) + statusText.textContent = 'Download complete' + } catch (err: any) { + const msg = err?.message ?? String(err) + showError(msg) + if (err instanceof XFTPPermanentError) retryBtn.hidden = true + else retryBtn.hidden = false + } finally { + await backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + } + } +} + +function sanitizeFileName(name: string): string { + let s = name + // Strip path separators + s = s.replace(/[/\\]/g, '') + // Replace null/control characters + s = s.replace(/[\x00-\x1f\x7f]/g, '_') + // Strip Unicode bidi override characters + s = s.replace(/[\u202a-\u202e\u2066-\u2069]/g, '') + // Limit length + if (s.length > 255) s = s.slice(0, 255) + return s || 'download' +} + +function formatSize(bytes: number): string { + if (bytes < 1024) return bytes + ' B' + if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB' + return (bytes / (1024 * 1024)).toFixed(1) + ' MB' +} diff --git a/xftp-web/web/index.html b/xftp-web/web/index.html new file mode 100644 index 0000000000..fd02668a76 --- /dev/null +++ b/xftp-web/web/index.html @@ -0,0 +1,15 @@ + + + + + + + SimpleX File Transfer + + + +
+ + + diff --git a/xftp-web/web/main.ts b/xftp-web/web/main.ts new file mode 100644 index 0000000000..69fd8eba51 --- /dev/null +++ b/xftp-web/web/main.ts @@ -0,0 +1,30 @@ +import sodium from 'libsodium-wrappers-sumo' +import {initUpload} from './upload.js' +import {initDownload} from './download.js' + +async function main() { + await sodium.ready + initApp() + + // Handle hash changes (SPA navigation) + window.addEventListener('hashchange', initApp) +} + +function initApp() { + const app = document.getElementById('app')! + const hash = window.location.hash.slice(1) + + if (hash) { + initDownload(app, hash) + } else { + initUpload(app) + } +} + +main().catch(err => { + const app = document.getElementById('app') + if (app) { + app.innerHTML = `

Failed to initialize: ${err.message}

` + } + console.error(err) +}) diff --git a/xftp-web/web/progress.ts b/xftp-web/web/progress.ts new file mode 100644 index 0000000000..2fa292f27e --- /dev/null +++ b/xftp-web/web/progress.ts @@ -0,0 +1,52 @@ +const SIZE = 120 +const LINE_WIDTH = 8 +const RADIUS = (SIZE - LINE_WIDTH) / 2 +const CENTER = SIZE / 2 +const BG_COLOR = '#e0e0e0' +const FG_COLOR = '#3b82f6' + +export interface ProgressRing { + canvas: HTMLCanvasElement + update(fraction: number): void +} + +export function createProgressRing(): ProgressRing { + const canvas = document.createElement('canvas') + canvas.width = SIZE * devicePixelRatio + canvas.height = SIZE * devicePixelRatio + canvas.style.width = SIZE + 'px' + canvas.style.height = SIZE + 'px' + canvas.className = 'progress-ring' + const ctx = canvas.getContext('2d')! + ctx.scale(devicePixelRatio, devicePixelRatio) + + function draw(fraction: number) { + ctx.clearRect(0, 0, SIZE, SIZE) + // Background arc + ctx.beginPath() + ctx.arc(CENTER, CENTER, RADIUS, 0, 2 * Math.PI) + ctx.strokeStyle = BG_COLOR + ctx.lineWidth = LINE_WIDTH + ctx.lineCap = 'round' + ctx.stroke() + // Foreground arc + if (fraction > 0) { + ctx.beginPath() + ctx.arc(CENTER, CENTER, RADIUS, -Math.PI / 2, -Math.PI / 2 + 2 * Math.PI * fraction) + ctx.strokeStyle = FG_COLOR + ctx.lineWidth = LINE_WIDTH + ctx.lineCap = 'round' + ctx.stroke() + } + // Percentage text + const pct = Math.round(fraction * 100) + ctx.fillStyle = '#333' + ctx.font = '600 20px system-ui, sans-serif' + ctx.textAlign = 'center' + ctx.textBaseline = 'middle' + ctx.fillText(pct + '%', CENTER, CENTER) + } + + draw(0) + return {canvas, update: draw} +} diff --git a/xftp-web/web/servers.json b/xftp-web/web/servers.json new file mode 100644 index 0000000000..334fa57835 --- /dev/null +++ b/xftp-web/web/servers.json @@ -0,0 +1,18 @@ +{ + "simplex": [ + "xftp://da1aH3nOT-9G8lV7bWamhxpDYdJ1xmW7j3JpGaDR5Ug=@xftp1.simplex.im", + "xftp://5vog2Imy1ExJB_7zDZrkV1KDWi96jYFyy9CL6fndBVw=@xftp2.simplex.im", + "xftp://PYa32DdYNFWi0uZZOprWQoQpIk5qyjRJ3EF7bVpbsn8=@xftp3.simplex.im", + "xftp://k_GgQl40UZVV0Y4BX9ZTyMVqX5ZewcLW0waQIl7AYDE=@xftp4.simplex.im", + "xftp://-bIo6o8wuVc4wpZkZD3tH-rCeYaeER_0lz1ffQcSJDs=@xftp5.simplex.im", + "xftp://6nSvtY9pJn6PXWTAIMNl95E1Kk1vD7FM2TeOA64CFLg=@xftp6.simplex.im" + ], + "flux": [ + "xftp://92Sctlc09vHl_nAqF2min88zKyjdYJ9mgxRCJns5K2U=@xftp1.simplexonflux.com", + "xftp://YBXy4f5zU1CEhnbbCzVWTNVNsaETcAGmYqGNxHntiE8=@xftp2.simplexonflux.com", + "xftp://ARQO74ZSvv2OrulRF3CdgwPz_AMy27r0phtLSq5b664=@xftp3.simplexonflux.com", + "xftp://ub2jmAa9U0uQCy90O-fSUNaYCj6sdhl49Jh3VpNXP58=@xftp4.simplexonflux.com", + "xftp://Rh19D5e4Eez37DEE9hAlXDB3gZa1BdFYJTPgJWPO9OI=@xftp5.simplexonflux.com", + "xftp://0AznwoyfX8Od9T_acp1QeeKtxUi676IBIiQjXVwbdyU=@xftp6.simplexonflux.com" + ] +} diff --git a/xftp-web/web/servers.ts b/xftp-web/web/servers.ts new file mode 100644 index 0000000000..b9a67e5bfd --- /dev/null +++ b/xftp-web/web/servers.ts @@ -0,0 +1,12 @@ +import {parseXFTPServer, type XFTPServer} from '../src/protocol/address.js' + +// __XFTP_SERVERS__ is injected at build time by vite.config.ts +// In development mode: test server from globalSetup +// In production mode: preset servers from servers.json +declare const __XFTP_SERVERS__: string[] + +const serverAddresses: string[] = __XFTP_SERVERS__ + +export function getServers(): XFTPServer[] { + return serverAddresses.map(parseXFTPServer) +} diff --git a/xftp-web/web/style.css b/xftp-web/web/style.css new file mode 100644 index 0000000000..3c5654a0e9 --- /dev/null +++ b/xftp-web/web/style.css @@ -0,0 +1,103 @@ +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +body { + font-family: system-ui, -apple-system, sans-serif; + background: #f5f5f5; + color: #333; + min-height: 100vh; + display: flex; + align-items: center; + justify-content: center; +} + +#app { + width: 100%; + max-width: 480px; + padding: 16px; +} + +.card { + background: #fff; + border-radius: 12px; + padding: 32px 24px; + box-shadow: 0 1px 3px rgba(0,0,0,.1); + text-align: center; +} + +h1 { + font-size: 1.25rem; + font-weight: 600; + margin-bottom: 24px; +} + +.stage { margin-top: 16px; } + +/* Drop zone */ +.drop-zone { + border: 2px dashed #ccc; + border-radius: 8px; + padding: 32px 16px; + transition: border-color .15s, background .15s; +} +.drop-zone.drag-over { + border-color: #3b82f6; + background: #eff6ff; +} + +/* Buttons */ +.btn { + display: inline-block; + padding: 10px 24px; + border: none; + border-radius: 6px; + background: #3b82f6; + color: #fff; + font-size: .9rem; + font-weight: 500; + cursor: pointer; + transition: background .15s; +} +.btn:hover { background: #2563eb; } +.btn-secondary { background: #6b7280; } +.btn-secondary:hover { background: #4b5563; } + +/* Hints */ +.hint { color: #999; font-size: .85rem; margin-top: 8px; } +.expiry { margin-top: 12px; } + +/* Progress */ +.progress-ring { display: block; margin: 0 auto 12px; } +#upload-status, #dl-status { font-size: .9rem; color: #666; margin-bottom: 12px; } + +/* Share link row */ +.link-row { + display: flex; + gap: 8px; + margin-top: 12px; +} +.link-row input { + flex: 1; + padding: 8px 10px; + border: 1px solid #ccc; + border-radius: 6px; + font-size: .85rem; + background: #f9fafb; +} + +/* Messages */ +.success { color: #16a34a; font-weight: 600; } +.error { color: #dc2626; font-weight: 500; margin-bottom: 12px; } + +/* Security note */ +.security-note { + margin-top: 20px; + padding: 12px; + background: #f0fdf4; + border-radius: 6px; + font-size: .8rem; + color: #555; + text-align: left; +} +.security-note p + p { margin-top: 6px; } +.security-note a { color: #3b82f6; text-decoration: none; } +.security-note a:hover { text-decoration: underline; } diff --git a/xftp-web/web/upload.ts b/xftp-web/web/upload.ts new file mode 100644 index 0000000000..0faa70c0a4 --- /dev/null +++ b/xftp-web/web/upload.ts @@ -0,0 +1,170 @@ +import {createCryptoBackend} from './crypto-backend.js' +import {getServers} from './servers.js' +import {createProgressRing} from './progress.js' +import { + newXFTPAgent, closeXFTPAgent, uploadFile, encodeDescriptionURI, + type EncryptedFileMetadata +} from '../src/agent.js' +import {XFTPPermanentError} from '../src/client.js' + +const MAX_SIZE = 100 * 1024 * 1024 + +export function initUpload(app: HTMLElement) { + app.innerHTML = ` +
+

SimpleX File Transfer

+
+

Drag & drop a file here

+

or

+ + +

Max 100 MB

+
+ + + +
` + + const dropZone = document.getElementById('drop-zone')! + const fileInput = document.getElementById('file-input') as HTMLInputElement + const progressStage = document.getElementById('upload-progress')! + const completeStage = document.getElementById('upload-complete')! + const errorStage = document.getElementById('upload-error')! + const progressContainer = document.getElementById('progress-container')! + const statusText = document.getElementById('upload-status')! + const cancelBtn = document.getElementById('cancel-btn')! + const shareLink = document.getElementById('share-link') as HTMLInputElement + const copyBtn = document.getElementById('copy-btn')! + const errorMsg = document.getElementById('error-msg')! + const retryBtn = document.getElementById('retry-btn')! + + let aborted = false + let pendingFile: File | null = null + + dropZone.addEventListener('dragover', e => { e.preventDefault(); dropZone.classList.add('drag-over') }) + dropZone.addEventListener('dragleave', () => dropZone.classList.remove('drag-over')) + dropZone.addEventListener('drop', e => { + e.preventDefault() + dropZone.classList.remove('drag-over') + const f = e.dataTransfer?.files[0] + if (f) startUpload(f) + }) + fileInput.addEventListener('change', () => { + if (fileInput.files?.[0]) startUpload(fileInput.files[0]) + }) + retryBtn.addEventListener('click', () => { + if (pendingFile) startUpload(pendingFile) + }) + + function showStage(stage: HTMLElement) { + for (const s of [dropZone, progressStage, completeStage, errorStage]) s.hidden = true + stage.hidden = false + } + + function showError(msg: string) { + errorMsg.textContent = msg + showStage(errorStage) + } + + async function startUpload(file: File) { + pendingFile = file + aborted = false + + if (file.size > MAX_SIZE) { + showError(`File too large (${formatSize(file.size)}). Maximum is 100 MB.`) + return + } + if (file.size === 0) { + showError('File is empty.') + return + } + + showStage(progressStage) + const ring = createProgressRing() + progressContainer.innerHTML = '' + progressContainer.appendChild(ring.canvas) + statusText.textContent = 'Encrypting…' + + const backend = createCryptoBackend() + const agent = newXFTPAgent() + + cancelBtn.onclick = () => { + aborted = true + backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + showStage(dropZone) + } + + try { + const fileData = new Uint8Array(await file.arrayBuffer()) + if (aborted) return + + const encrypted = await backend.encrypt(fileData, file.name, (done, total) => { + ring.update(done / total * 0.3) + }) + if (aborted) return + + statusText.textContent = 'Uploading…' + const metadata: EncryptedFileMetadata = { + digest: encrypted.digest, + key: encrypted.key, + nonce: encrypted.nonce, + chunkSizes: encrypted.chunkSizes + } + const servers = getServers() + const result = await uploadFile(agent, servers, metadata, { + readChunk: (off, sz) => backend.readChunk(off, sz), + onProgress: (uploaded, total) => { + ring.update(0.3 + (uploaded / total) * 0.7) + } + }) + if (aborted) return + + const url = window.location.origin + window.location.pathname + '#' + result.uri + shareLink.value = url + showStage(completeStage) + copyBtn.onclick = () => { + navigator.clipboard.writeText(url).then(() => { + copyBtn.textContent = 'Copied!' + setTimeout(() => { copyBtn.textContent = 'Copy' }, 2000) + }) + } + } catch (err: any) { + if (!aborted) { + const msg = err?.message ?? String(err) + showError(msg) + // Hide retry button for permanent errors (no point retrying) + if (err instanceof XFTPPermanentError) retryBtn.hidden = true + else retryBtn.hidden = false + } + } finally { + await backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + } + } +} + +function formatSize(bytes: number): string { + if (bytes < 1024) return bytes + ' B' + if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB' + return (bytes / (1024 * 1024)).toFixed(1) + ' MB' +}