diff --git a/apps/xftp-server/XFTPWeb.hs b/apps/xftp-server/XFTPWeb.hs index a3edb41f0e..a9ee55e156 100644 --- a/apps/xftp-server/XFTPWeb.hs +++ b/apps/xftp-server/XFTPWeb.hs @@ -34,7 +34,7 @@ xftpMediaContent = $(embedDir "apps/xftp-server/static/media/") xftpFilePageHtml :: ByteString xftpFilePageHtml = $(embedFile "apps/xftp-server/static/file.html") -xftpGenerateSite :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO () +xftpGenerateSite :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO () xftpGenerateSite cfg info onionHost path = do let substs = xftpSubsts cfg info onionHost Web.generateSite embeddedContent (render (Web.indexHtml embeddedContent) substs) [] path @@ -50,10 +50,10 @@ xftpGenerateSite cfg info onionHost path = do createDirectoryIfMissing True dir forM_ content_ $ \(fp, content) -> B.writeFile (dir fp) content -xftpServerInformation :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> ByteString +xftpServerInformation :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> ByteString xftpServerInformation cfg info onionHost = render (Web.indexHtml embeddedContent) (xftpSubsts cfg info onionHost) -xftpSubsts :: XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> [(ByteString, Maybe ByteString)] +xftpSubsts :: XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> [(ByteString, Maybe ByteString)] xftpSubsts XFTPServerConfig {fileExpiration, logStatsInterval, allowNewFiles, newFileBasicAuth} information onionHost = [("smpConfig", Nothing), ("xftpConfig", Just "y")] <> substConfig <> serverInfoSubsts simplexmqSource information <> [("onionHost", strEncode <$> onionHost), ("iniFileName", Just "file-server.ini")] where diff --git a/plans/2026-03-25-xftp-postgres-backend-design.md b/plans/2026-03-25-xftp-postgres-backend-design.md new file mode 100644 index 0000000000..78a32a5075 --- /dev/null +++ b/plans/2026-03-25-xftp-postgres-backend-design.md @@ -0,0 +1,472 @@ +# XFTP Server PostgreSQL Backend + +## Overview + +Add PostgreSQL backend support to xftp-server, following the SMP server pattern. Supports bidirectional migration between STM (in-memory with StoreLog) and PostgreSQL backends. + +## Goals + +- PostgreSQL-backed file metadata storage as an alternative to STM + StoreLog +- Polymorphic server code via `FileStoreClass` typeclass with IO-based methods (following `QueueStoreClass` pattern) +- Bidirectional migration: StoreLog <-> PostgreSQL via CLI commands +- Shared `server_postgres` cabal flag (same flag enables both SMP and XFTP Postgres support) +- INI-based backend selection at runtime + +## Architecture + +### FileStoreClass Typeclass + +IO-based typeclass following the `QueueStoreClass` pattern — each method is a self-contained IO action, with the implementation responsible for its own atomicity (STM backend wraps in `atomically`, Postgres backend uses database transactions): + +```haskell +class FileStoreClass s where + type FileStoreConfig s + + -- Lifecycle + newFileStore :: FileStoreConfig s -> IO s + closeFileStore :: s -> IO () + + -- File operations + addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ()) + setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ()) + addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ()) + getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey)) + deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ()) + blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ()) + deleteRecipient :: s -> RecipientId -> FileRec -> IO () + ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ()) + + -- Expiration (with LIMIT for Postgres; called in a loop until empty) + expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)] + + -- Storage and stats (for init-time computation) + getUsedStorage :: s -> IO Int64 + getFileCount :: s -> IO Int +``` + +- STM backend: each method wraps its STM transaction in `atomically` internally. +- Postgres backend: each method runs its query via `withDB` / database connection internally. + +No polymorphic monad or `runStore` dispatcher needed — unlike `MsgStoreClass`, XFTP file operations are individually atomic and don't require grouping multiple operations into backend-dependent transactions. + +### PostgresFileStore Data Type + +```haskell +data PostgresFileStore = PostgresFileStore + { dbStore :: DBStore, + dbStoreLog :: Maybe (StoreLog 'WriteMode) + } +``` + +- `dbStore` — connection pool created via `createDBStore`, runs schema migrations on init. +- `dbStoreLog` — optional parallel log file (enabled by `db_store_log` INI setting). When present, every mutation (`addFile`, `setFilePath`, `deleteFile`, `blockFile`, `addRecipient`, `ackFile`) also writes to this log via a `withLog` wrapper. `withLog` is called AFTER the DB operation succeeds (so the log reflects committed state only). Log write failures are non-fatal (logged as warnings, do not fail the DB operation). This provides an audit trail and enables recovery via export. + +`closeFileStore` for Postgres calls `closeDBStore` (closes connection pool) then `mapM_ closeStoreLog dbStoreLog` (flushes and closes the parallel log). For STM, it closes the storeLog. Called from a `finally` block during server shutdown, matching SMP's `stopServer` → `closeMsgStore` → `closeQueueStore` pattern. + +### STMFileStore Type + +After extracting from current `Store.hs`, `STMFileStore` retains the file and recipient maps but no longer owns `usedStorage` (moved to `XFTPEnv`): + +```haskell +data STMFileStore = STMFileStore + { files :: TMap SenderId FileRec, + recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey) + } +``` + +`closeFileStore` for STM is a no-op (TMaps are garbage-collected; the env-level `storeLog` is closed separately by the server). + +### Error Handling + +Postgres operations follow SMP's `withDB` / `handleDuplicate` pattern: + +```haskell +withDB :: Text -> PostgresFileStore -> (DB.Connection -> IO (Either XFTPErrorType a)) -> ExceptT XFTPErrorType IO a +withDB op st action = + ExceptT $ E.try (withTransaction (dbStore st) action) >>= either logErr pure + where + logErr :: E.SomeException -> IO (Either XFTPErrorType a) + logErr e = logError ("STORE: " <> err) $> Left INTERNAL + where + err = op <> ", withDB, " <> tshow e + +handleDuplicate :: SqlError -> IO (Either XFTPErrorType a) +handleDuplicate e = case constraintViolation e of + Just (UniqueViolation _) -> pure $ Left DUPLICATE_ + _ -> E.throwIO e +``` + +- All DB operations wrapped in `withDB` — catches exceptions, logs, returns `INTERNAL`. +- Unique constraint violations caught by `handleDuplicate` and mapped to `DUPLICATE_`. +- UPDATE operations verified with `assertUpdated` — returns `AUTH` if 0 rows affected (matching SMP pattern, prevents silent failures when WHERE clause doesn't match). +- Critical sections (DB write + TVar update) wrapped in `uninterruptibleMask_` to prevent async exceptions from leaving inconsistent state between DB and TVars. + +### FileRec and TVar Fields + +`FileRec` retains its `TVar` fields (matching SMP's `PostgresQueue` pattern): + +```haskell +data FileRec = FileRec + { senderId :: SenderId, + fileInfo :: FileInfo, + filePath :: TVar (Maybe FilePath), + recipientIds :: TVar (Set RecipientId), + createdAt :: RoundedFileTime, + fileStatus :: TVar ServerEntityStatus + } +``` + +- **STM backend**: TVars are the source of truth, as currently. +- **Postgres backend**: `getFile` reads from DB and creates a `FileRec` with fresh TVars populated from the DB row (matching SMP's `mkQ` pattern — `newTVarIO` per load). Mutation methods (`setFilePath`, `blockFile`, etc.) update both the DB (persistence) and the TVars (in-session consistency). The `recipientIds` TVar is initialized to `S.empty` — no subquery needed because no server code reads `recipientIds` directly; all recipient operations go through the typeclass methods (`addRecipient`, `deleteRecipient`, `ackFile`), which query the `recipients` table for Postgres. + +### usedStorage Ownership + +`usedStorage :: TVar Int64` moves from the store to `XFTPEnv`. The store typeclass does **not** manage `usedStorage` — it only provides `getUsedStorage` for init-time computation. + +- **STM init**: StoreLog replay calls `setFilePath` (which only sets the filePath TVar — the STM `setFilePath` implementation is changed to **not** update `usedStorage`). Similarly, STM `deleteFile` (Store.hs line 117) and `blockFile` (line 125) are changed to **not** update `usedStorage` — the server handles all `usedStorage` adjustments externally. After replay, `getUsedStorage` computes the sum over all file sizes (matching current `countUsedStorage` behavior). +- **Postgres init**: `getUsedStorage` executes `SELECT COALESCE(SUM(file_size), 0) FROM files`. +- **Runtime**: Server manages `usedStorage` TVar directly for reserve/commit/rollback during uploads, and adjusts after `deleteFile`/`blockFile` calls. + +**Note on `getUsedStorage` semantics**: The current STM `countUsedStorage` sums all file sizes unconditionally (including files without `filePath` set, i.e., created but not yet uploaded). The Postgres `getUsedStorage` matches this: `SELECT SUM(file_size) FROM files` (no `WHERE file_path IS NOT NULL`). In practice, orphaned files (created but never uploaded) are rare and short-lived (expired within 48h), so the difference is negligible. A future improvement could filter by `file_path IS NOT NULL` in both backends to reflect actual disk usage more accurately. + +### Server.hs Refactoring + +`Server.hs` becomes polymorphic over `FileStoreClass s`. Since all typeclass methods are IO, call sites replace `atomically` with direct IO calls to the store. + +**Call sites requiring changes** (exhaustive list): + +1. **`receiveServerFile`** (line 563): `atomically $ writeTVar filePath (Just fPath)` → `setFilePath store senderId fPath`. The `reserve` logic (line 551-555) stays as direct TVar manipulation on `usedStorage` from `XFTPEnv`. + +2. **`verifyXFTPTransmission`** (line 453): `atomically $ verify =<< getFile st party fId` — the `getFile` call and subsequent `readTVar fileStatus` are in a single `atomically` block. Refactored to: `getFile st party fId` (IO), then `readTVarIO (fileStatus fr)` from the returned `FileRec` (safe for both backends — STM TVar is the source of truth, Postgres TVar is a fresh snapshot from DB). + +3. **`retryAdd`** (line 516): Signature `XFTPFileId -> STM (Either XFTPErrorType a)` → `XFTPFileId -> IO (Either XFTPErrorType a)`. The `atomically` call (line 520) replaced with `liftIO`. + +4. **`deleteOrBlockServerFile_`** (line 620): Parameter `FileStore -> STM (Either XFTPErrorType ())` → `FileStoreClass s => s -> IO (Either XFTPErrorType ())`. The `atomically` call (line 626) removed — the store method is already IO. After the store action, server adjusts `usedStorage` TVar in `XFTPEnv` based on `fileInfo.size`. + +5. **`ackFileReception`** (line 605): `atomically $ deleteRecipient st rId fr` → `deleteRecipient st rId fr`. + +6. **Control port `CPDelete`/`CPBlock`** (lines 371, 377): `atomically $ getFile fs SFRecipient fileId` → `getFile fs SFRecipient fileId`. + +7. **`expireServerFiles`** (line 636): Replace per-file `expiredFilePath` iteration with batched `expiredFiles st old batchSize`, which returns `[(SenderId, Maybe FilePath, Word32)]` — the `Word32` file size is needed so the server can adjust the `usedStorage` TVar after each deletion. Called in a loop until the returned list is empty. The `itemDelay` between files applies to the deletion loop over each batch, not the query itself. STM backend ignores the batch size limit (returns all expired files from TMap scan); Postgres uses `LIMIT`. + +8. **`restoreServerStats`** (line 694): `FileStore {files, usedStorage} <- asks store` accesses store fields directly. Refactored to: `usedStorage` from `XFTPEnv` via `asks usedStorage`, file count via `getFileCount store`. STM: `M.size <$> readTVarIO files`. Postgres: `SELECT COUNT(*) FROM files`. + +### Store Config Selection + +GADT in `Env.hs`: + +```haskell +data XFTPStoreConfig s where + XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore +#if defined(dbServerPostgres) + XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore +#endif +``` + +`XFTPEnv` becomes polymorphic: + +```haskell +data XFTPEnv s = XFTPEnv + { config :: XFTPServerConfig, + store :: s, + usedStorage :: TVar Int64, + storeLog :: Maybe (StoreLog 'WriteMode), + ... + } +``` + +The `M` monad (`ReaderT (XFTPEnv s) IO`) and all functions in `Server.hs` gain `FileStoreClass s =>` constraints. + +**StoreLog lifecycle per backend:** + +- **STM mode**: `storeLog = Just sl` (current behavior — append-only log for persistence and recovery). +- **Postgres mode**: `storeLog = Nothing` (main storeLog disabled — Postgres is the source of truth). The optional parallel `dbStoreLog` inside `PostgresFileStore` provides audit/recovery if enabled via `db_store_log` INI setting. + +The existing `withFileLog` pattern in Server.hs continues to work unchanged — it maps over `Maybe (StoreLog 'WriteMode)`, which is `Nothing` in Postgres mode so the calls become no-ops. + +### Main.hs Store Type Dispatch + +The `Start` CLI command gains a `--confirm-migrations` flag (default `MCConsole` — manual prompt, matching SMP's `StartOptions`). For automated deployments, `--confirm-migrations up` auto-applies forward migrations. The import command uses `MCYesUp` (always auto-apply). + +Following SMP's existential dispatch pattern (`AStoreType` + `run`), `Main.hs` selects the store type from INI config and dispatches to the polymorphic server: + +```haskell +runServer ini = do + let storeType = fromRight "memory" $ lookupValue "STORE_LOG" "store_files" ini + case storeType of + "memory" -> run $ XSCMemory (enableStoreLog $> storeLogFilePath) + "database" -> +#if defined(dbServerPostgres) + run $ XSCDatabase PostgresFileStoreCfg {..} +#else + exitError "server not compiled with Postgres support" +#endif + _ -> exitError $ "Invalid store_files value: " <> storeType + where + run :: FileStoreClass s => XFTPStoreConfig s -> IO () + run storeCfg = do + env <- newXFTPServerEnv storeCfg config + runReaderT (xftpServer config) env +``` + +**`newXFTPServerEnv` refactored signature:** + +```haskell +newXFTPServerEnv :: FileStoreClass s => XFTPStoreConfig s -> XFTPServerConfig -> IO (XFTPEnv s) +newXFTPServerEnv storeCfg config = do + (store, storeLog) <- case storeCfg of + XSCMemory storeLogPath -> do + st <- newFileStore () + sl <- mapM (`readWriteFileStore` st) storeLogPath + pure (st, sl) + XSCDatabase dbCfg -> do + st <- newFileStore dbCfg + pure (st, Nothing) -- main storeLog disabled for Postgres + usedStorage <- newTVarIO =<< getUsedStorage store + ... + pure XFTPEnv {config, store, usedStorage, storeLog, ...} +``` + +### Startup Config Validation + +Following SMP's `checkMsgStoreMode` pattern, `Main.hs` validates config before starting: + +- **`store_files=database` + StoreLog file exists** (without `db_store_log=on`): Error — "StoreLog file present but store_files is `database`. Use `xftp-server database import` to migrate, or set `db_store_log: on`." +- **`store_files=database` + schema doesn't exist**: Error — "Create schema in PostgreSQL or use `xftp-server database import`." +- **`store_files=memory` + Postgres schema exists**: Warning — "Postgres schema exists but store_files is `memory`. Data in Postgres will not be used." +- **Binary compiled without `server_postgres` + `store_files=database`**: Error — "Server not compiled with Postgres support." + +## Module Structure + +``` +src/Simplex/FileTransfer/Server/ + Store.hs -- FileStoreClass typeclass + shared types (FileRec, FileRecipient, etc.) + Store/ + STM.hs -- STMFileStore (extracted from current Store.hs) + Postgres.hs -- PostgresFileStore [CPP-guarded] + Postgres/ + Migrations.hs -- Schema migrations [CPP-guarded] + Config.hs -- PostgresFileStoreCfg [CPP-guarded] + StoreLog.hs -- Unchanged (interchange format for both backends + migration) + Env.hs -- XFTPStoreConfig GADT, polymorphic XFTPEnv + Main.hs -- Store selection, migration CLI commands + Server.hs -- Polymorphic over FileStoreClass +``` + +## PostgreSQL Schema + +Initial migration (`20260325_initial`): + +```sql +CREATE TABLE files ( + sender_id BYTEA NOT NULL PRIMARY KEY, + file_size INT4 NOT NULL, + file_digest BYTEA NOT NULL, + sender_key BYTEA NOT NULL, + file_path TEXT, + created_at INT8 NOT NULL, + status TEXT NOT NULL DEFAULT 'active' +); + +CREATE TABLE recipients ( + recipient_id BYTEA NOT NULL PRIMARY KEY, + sender_id BYTEA NOT NULL REFERENCES files ON DELETE CASCADE, + recipient_key BYTEA NOT NULL +); + +CREATE INDEX idx_recipients_sender_id ON recipients (sender_id); +CREATE INDEX idx_files_created_at ON files (created_at); +``` + +- `file_size` is `INT4` matching `Word32` in `FileInfo.size` +- `sender_key` and `recipient_key` stored as `BYTEA` using binary encoding via `C.encodePubKey` / `C.decodePubKey` (matching SMP's `ToField`/`FromField` instances for `APublicAuthKey` — includes algorithm type tag in the binary format) +- `file_path` nullable (set after upload completes via `setFilePath`) +- `ON DELETE CASCADE` for recipients when file is hard-deleted +- `created_at` stores rounded epoch seconds (1-hour precision, `RoundedFileTime`) +- `status` as TEXT via `StrEncoding` (`ServerEntityStatus`: `EntityActive`, `EntityBlocked info`, `EntityOff`) +- Hard deletes (no `deleted_at` column) +- No PL/pgSQL functions needed; `setFilePath` uses `WHERE file_path IS NULL` to prevent duplicate uploads (the `UPDATE` itself acquires a row-level lock) +- `used_storage` computed on startup: `SELECT COALESCE(SUM(file_size), 0) FROM files` (matches STM `countUsedStorage` — all files, see usedStorage Ownership section) + +### Migrations Module + +Following SMP's `QueueStore/Postgres/Migrations.hs` pattern: + +```haskell +module Simplex.FileTransfer.Server.Store.Postgres.Migrations + ( xftpServerMigrations, + ) +where + +import Data.List (sortOn) +import Data.Text (Text) +import Simplex.Messaging.Agent.Store.Shared +import Text.RawString.QQ (r) + +xftpSchemaMigrations :: [(String, Text, Maybe Text)] +xftpSchemaMigrations = + [ ("20260325_initial", m20260325_initial, Nothing) + ] + +xftpServerMigrations :: [Migration] +xftpServerMigrations = sortOn name $ map migration xftpSchemaMigrations + where + migration (name, up, down) = Migration {name, up, down = down} + +m20260325_initial :: Text +m20260325_initial = + [r| +CREATE TABLE files ( + sender_id BYTEA NOT NULL PRIMARY KEY, + ... +); + |] +``` + +The `Migration` type (from `Simplex.Messaging.Agent.Store.Shared`) has fields `{name :: String, up :: Text, down :: Maybe Text}`. Initial migration has `Nothing` for `down`. Future migrations should include `Just down_migration` for rollback support. Called via `createDBStore dbOpts xftpServerMigrations (MigrationConfig confirmMigrations Nothing)`. + +### Postgres Operations + +Key query patterns: + +- **`addFile`**: `INSERT INTO files (...) VALUES (...)`, return `DUPLICATE_` on unique violation. +- **`setFilePath`**: `UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL`, verified with `assertUpdated` (returns `AUTH` if 0 rows affected — file not found or already uploaded). The `WHERE file_path IS NULL` prevents duplicate uploads; the `UPDATE` acquires a row lock implicitly. Only persists the path; `usedStorage` managed by server. +- **`addRecipient`**: `INSERT INTO recipients (...)`, plus check for duplicates. No need for `recipientIds` TVar update — Postgres derives it from the table. +- **`getFile`** (sender): `SELECT ... FROM files WHERE sender_id = ?`, returns auth key from `sender_key` column. +- **`getFile`** (recipient): `SELECT f.*, r.recipient_key FROM recipients r JOIN files f ON ... WHERE r.recipient_id = ?`. +- **`deleteFile`**: `DELETE FROM files WHERE sender_id = ?` (recipients cascade). +- **`blockFile`**: `UPDATE files SET status = ? WHERE sender_id = ?`. When `deleted = True`, the server adjusts `usedStorage` externally (matching current STM behavior where `blockFile` only updates status and storage, not `filePath`). +- **`expiredFiles`**: `SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? LIMIT ?` — batched query replaces per-file iteration, includes `file_size` for `usedStorage` adjustment. Called in a loop until no rows returned. + +## INI Configuration + +New keys in `[STORE_LOG]` section: + +```ini +[STORE_LOG] +enable: on +store_files: memory # memory | database +db_connection: postgresql://xftp@/xftp_server_store +db_schema: xftp_server +db_pool_size: 10 +db_store_log: off +expire_files_hours: 48 +``` + +`store_files` selects the backend (`store_files` rather than `store_queues` because XFTP stores files, not queues): +- `memory` -> `XSCMemory` (current behavior) +- `database` -> `XSCDatabase` (requires `server_postgres` build flag) + +### INI Template Generation (`xftp-server init`) + +The `iniFileContent` function in `Main.hs` must be updated to generate the new keys in the `[STORE_LOG]` section. Following SMP's `iniDbOpts` pattern with `optDisabled'` (prefixes `"# "` when value equals default), Postgres keys are generated commented out by default: + +```ini +[STORE_LOG] +enable: on + +# File storage mode: `memory` or `database` (PostgreSQL). +store_files: memory + +# Database connection settings for PostgreSQL database (`store_files: database`). +# db_connection: postgresql://xftp@/xftp_server_store +# db_schema: xftp_server +# db_pool_size: 10 + +# Write database changes to store log file +# db_store_log: off + +expire_files_hours: 48 +``` + +Reuses `iniDBOptions` from `Simplex.Messaging.Server.CLI` for runtime parsing (falls back to defaults when keys are commented out or missing). `enableDbStoreLog'` pattern (`settingIsOn "STORE_LOG" "db_store_log"`) controls `dbStoreLogPath`. + +### PostgresFileStoreCfg + +```haskell +data PostgresFileStoreCfg = PostgresFileStoreCfg + { dbOpts :: DBOpts, + dbStoreLogPath :: Maybe FilePath, + confirmMigrations :: MigrationConfirmation + } +``` + +No `deletedTTL` (hard deletes). + +### Default DB Options + +```haskell +defaultXFTPDBOpts :: DBOpts +defaultXFTPDBOpts = + DBOpts + { connstr = "postgresql://xftp@/xftp_server_store", + schema = "xftp_server", + poolSize = 10, + createSchema = False + } +``` + +## Migration CLI + +Bidirectional migration via StoreLog as interchange format: + +``` +xftp-server database import [--database DB_CONN] [--schema DB_SCHEMA] [--pool-size N] +xftp-server database export [--database DB_CONN] [--schema DB_SCHEMA] [--pool-size N] +``` + +No `--table` flag needed (unlike SMP which has queues/messages/all) — XFTP has a single entity type (files + recipients, always migrated together). + +CLI options reuse `dbOptsP` parser from `Simplex.Messaging.Server.CLI`. + +### Import (StoreLog -> PostgreSQL) + +1. Confirm: prompt user with database connection details and StoreLog path +2. Read and replay StoreLog into temporary `STMFileStore` +3. Connect to PostgreSQL, run schema migrations (`createSchema = True`, `confirmMigrations = MCYesUp`) +4. Batch-insert file records into `files` table using PostgreSQL COPY protocol (matching SMP's `batchInsertQueues` pattern for performance). Progress reported every 10k files. +5. Batch-insert recipient records into `recipients` table using COPY protocol +6. Verify counts: `SELECT COUNT(*) FROM files` / `recipients` — warn if mismatch +7. Rename StoreLog to `.bak` (prevents accidental re-import, preserves original for rollback) +8. Report counts + +### Export (PostgreSQL -> StoreLog) + +1. Confirm: prompt user with database connection details and output path. Fail if output file already exists. +2. Connect to PostgreSQL +3. Open new StoreLog file for writing +4. Fold over all file records, writing per file (in this order, matching existing `writeFileStore`): `AddFile` (with `ServerEntityStatus` — this preserves `EntityBlocked` state), `AddRecipients`, then `PutFile` (if `file_path` is set) +5. Report counts + +Note: `AddFile` carries `ServerEntityStatus` which includes `EntityBlocked info`, so blocking state is preserved through export/import without needing separate `BlockFile` log entries. + +File data on disk is untouched by migration — only metadata moves between backends. + +## Cabal Integration + +Shared `server_postgres` flag. New Postgres modules added to existing conditional block: + +```cabal +if flag(server_postgres) + cpp-options: -DdbServerPostgres + exposed-modules: + ...existing SMP modules... + Simplex.FileTransfer.Server.Store.Postgres + Simplex.FileTransfer.Server.Store.Postgres.Migrations + Simplex.FileTransfer.Server.Store.Postgres.Config +``` + +CPP guards (`#if defined(dbServerPostgres)`) in: +- `Store.hs` — Postgres `FromField`/`ToField` instances for XFTP-specific types if needed +- `Env.hs` — `XSCDatabase` constructor +- `Main.hs` — database CLI commands, store selection for `database` mode, Postgres imports +- `Server.hs` — Postgres-specific imports if needed + +## Testing + +- **Parameterized server tests**: Existing `xftpServerTests` refactored to accept a store type parameter (following SMP's `SpecWith (ASrvTransport, AStoreType)` pattern). The same server tests run against both STM and Postgres backends — STM tests run unconditionally, Postgres tests added under `#if defined(dbServerPostgres)` with `postgressBracket` for database lifecycle (drop → create → test → drop). +- **Unit tests**: `PostgresFileStore` operations — add/get/delete/block/expire, duplicate detection, auth errors +- **Migration round-trip**: STM store → export to StoreLog → import to Postgres → export back → verify StoreLog equality (including blocked file status) +- **Tests location**: in `tests/` alongside existing XFTP tests, guarded by `server_postgres` CPP flag +- **Test database**: PostgreSQL on `localhost:5432`, using a dedicated `xftp_server_test` schema (dropped and recreated per test run via `postgressBracket`, following SMP's test database lifecycle pattern) +- **Test fixtures**: `testXFTPStoreDBOpts :: DBOpts` with `createSchema = True`, `confirmMigrations = MCYesUp`, in `tests/XFTPClient.hs` diff --git a/plans/2026-03-25-xftp-postgres-implementation-plan.md b/plans/2026-03-25-xftp-postgres-implementation-plan.md new file mode 100644 index 0000000000..2ae334670e --- /dev/null +++ b/plans/2026-03-25-xftp-postgres-implementation-plan.md @@ -0,0 +1,648 @@ +# XFTP PostgreSQL Backend — Implementation Plan + +> **For agentic workers:** REQUIRED: Use superpowers-extended-cc:subagent-driven-development (if subagents available) or superpowers-extended-cc:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Add PostgreSQL backend support to xftp-server as an alternative to STM + StoreLog, with bidirectional migration. + +**Architecture:** Introduce `FileStoreClass` typeclass (IO-based, following `QueueStoreClass` pattern). Extract current STM store into `Store/STM.hs`, make `Server.hs` polymorphic, then add `Store/Postgres.hs` behind `server_postgres` CPP flag. `usedStorage` moves from store to `XFTPEnv` so the server manages quota tracking externally. + +**Tech Stack:** Haskell, postgresql-simple, STM, fourmolu, cabal with CPP flags + +**Design spec:** `plans/2026-03-25-xftp-postgres-backend-design.md` + +--- + +## File Structure + +**Existing files modified:** +- `src/Simplex/FileTransfer/Server/Store.hs` — rewritten: becomes typeclass + shared types +- `src/Simplex/FileTransfer/Server/Env.hs` — polymorphic `XFTPEnv s`, `XFTPStoreConfig` GADT +- `src/Simplex/FileTransfer/Server.hs` — polymorphic over `FileStoreClass s` +- `src/Simplex/FileTransfer/Server/StoreLog.hs` — update for IO store functions +- `src/Simplex/FileTransfer/Server/Main.hs` — INI config, dispatch, CLI commands +- `simplexmq.cabal` — new modules +- `tests/XFTPClient.hs` — Postgres test fixtures +- `tests/Test.hs` — Postgres test group + +**New files created:** +- `src/Simplex/FileTransfer/Server/Store/STM.hs` — `STMFileStore` (extracted from current `Store.hs`) +- `src/Simplex/FileTransfer/Server/Store/Postgres.hs` — `PostgresFileStore` [CPP-guarded] +- `src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs` — `PostgresFileStoreCfg` [CPP-guarded] +- `src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs` — schema SQL [CPP-guarded] +- `tests/CoreTests/XFTPStoreTests.hs` — Postgres store unit tests [CPP-guarded] + +--- + +## Task 1: Move `usedStorage` from `FileStore` to `XFTPEnv` + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Store.hs` +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` +- Modify: `src/Simplex/FileTransfer/Server.hs` + +- [ ] **Step 1: Remove `usedStorage` from `FileStore` in `Store.hs`** + + 1. Remove `usedStorage :: TVar Int64` field from `FileStore` record (line 47). + 2. Remove `usedStorage <- newTVarIO 0` from `newFileStore` (line 75) and drop the field from the record construction (line 76). + 3. In `setFilePath` (line 92-97): remove `modifyTVar' (usedStorage st) (+ fromIntegral (size fileInfo))` — keep only `writeTVar filePath (Just fPath)`. Change pattern from `\FileRec {fileInfo, filePath}` to `\FileRec {filePath}` (fileInfo is now unused — `-Wunused-matches` error). + 4. In `deleteFile` (line 112-119): remove `modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)`. Change outer pattern match from `FileStore {files, recipients, usedStorage}` to `FileStore {files, recipients}`. Change inner pattern from `Just FileRec {fileInfo, recipientIds}` to `Just FileRec {recipientIds}` (`fileInfo` is now unused — `-Wunused-matches` error). + 5. In `blockFile` (line 122-127): remove `when deleted $ modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo)`. Change pattern match from `st@FileStore {usedStorage}` to `st`. The `deleted` parameter and `fileInfo` in the inner pattern become unused — prefix with `_` or remove from pattern to avoid `-Wunused-matches`. + +- [ ] **Step 2: Add `usedStorage` to `XFTPEnv` in `Env.hs`** + + 1. Add `usedStorage :: TVar Int64` field to `XFTPEnv` record (between `store` and `storeLog`, line 93). + 2. In `newXFTPServerEnv` (line 112-126): replace lines 117-118: + ``` + used <- countUsedStorage <$> readTVarIO (files store) + atomically $ writeTVar (usedStorage store) used + ``` + with: + ``` + usedStorage <- newTVarIO =<< countUsedStorage <$> readTVarIO (files store) + ``` + 3. Add `usedStorage` to the `pure XFTPEnv {..}` construction. + +- [ ] **Step 3: Update all `usedStorage` access sites in `Server.hs`** + + 1. Line 552: `us <- asks $ usedStorage . store` → `us <- asks usedStorage`. + 2. Line 569: `us <- asks $ usedStorage . store` → `us <- asks usedStorage`. + 3. Line 639: `usedStart <- readTVarIO $ usedStorage st` → `usedStart <- readTVarIO =<< asks usedStorage`. + 4. Line 647: `usedEnd <- readTVarIO $ usedStorage st` → `usedEnd <- readTVarIO =<< asks usedStorage`. + 5. Line 694: `FileStore {files, usedStorage} <- asks store` → split into `FileStore {files} <- asks store` and `usedStorage <- asks usedStorage`. + 6. In `deleteOrBlockServerFile_` (line 620): after `void $ atomically $ storeAction st`, add usedStorage adjustment — `us <- asks usedStorage` then `atomically $ modifyTVar' us $ subtract (fromIntegral $ size fileInfo)` when file had a path (check `path` from `readTVarIO filePath` earlier in the function). + +- [ ] **Step 4: Build and verify** + + Run: `cabal build` + +- [ ] **Step 5: Run existing tests** + + Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"` + +- [ ] **Step 6: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs + git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs + git commit -m "refactor(xftp): move usedStorage from FileStore to XFTPEnv" + ``` + +--- + +## Task 2: Add `getUsedStorage`, `getFileCount`, `expiredFiles` functions + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Store.hs` +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` +- Modify: `src/Simplex/FileTransfer/Server.hs` + +- [ ] **Step 1: Add three new functions to `Store.hs`** + + 1. Add to exports: `getUsedStorage`, `getFileCount`, `expiredFiles`. + 2. Remove `expiredFilePath` from exports AND delete the function definition (dead code → `-Wunused-binds` error). Also remove `($>>=)` from import `Simplex.Messaging.Util (ifM, ($>>=))` → `Simplex.Messaging.Util (ifM)` — `$>>=` was only used by `expiredFilePath`. + 3. Add import: `qualified Data.Map.Strict as M` (needed for `M.foldl'` in `getUsedStorage` and `M.toList` in `expiredFiles`). + 4. Implement: + ```haskell + getUsedStorage :: FileStore -> IO Int64 + getUsedStorage FileStore {files} = + M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 <$> readTVarIO files + + getFileCount :: FileStore -> IO Int + getFileCount FileStore {files} = M.size <$> readTVarIO files + + expiredFiles :: FileStore -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)] + expiredFiles FileStore {files} old _limit = do + fs <- readTVarIO files + fmap catMaybes . forM (M.toList fs) $ \(sId, FileRec {fileInfo = FileInfo {size}, filePath, createdAt = RoundedSystemTime createdAt}) -> + if createdAt + fileTimePrecision < old + then do + path <- readTVarIO filePath + pure $ Just (sId, path, size) + else pure Nothing + ``` + 5. Add imports: `Data.Maybe (catMaybes)`, `Data.Word (Word32)` (note: `qualified Data.Map.Strict as M` already added in item 3). + +- [ ] **Step 2: Replace `countUsedStorage` in `Env.hs`** + + 1. Replace `countUsedStorage <$> readTVarIO (files store)` with `getUsedStorage store` in `newXFTPServerEnv`. + 2. Remove `countUsedStorage` function definition and its export. + 3. Remove `qualified Data.Map.Strict as M` import if no longer used. + +- [ ] **Step 3: Update `restoreServerStats` in `Server.hs` to use `getFileCount`** + + In `restoreServerStats` (line 694-696): replace `FileStore {files} <- asks store` and `_filesCount <- M.size <$> readTVarIO files` with `st <- asks store` and `_filesCount <- liftIO $ getFileCount st` (eliminates the `FileStore` pattern match — `files` binding no longer needed). + +- [ ] **Step 4: Replace `expireServerFiles` iteration in `Server.hs`** + + 1. Replace the body of `expireServerFiles` (lines 636-660). Remove `files' <- readTVarIO (files st)` and the `forM_ (M.keys files')` loop. + 2. New body: call `expiredFiles st old 10000` in a loop. For each `(sId, filePath_, fileSize)` in returned list: apply `itemDelay`, remove disk file if present, call `atomically $ deleteFile st sId`, adjust `usedStorage` TVar by `fileSize`, increment `filesExpired` stat. Loop until `expiredFiles` returns `[]`. + 3. Remove `Data.Map.Strict` import from Server.hs if no longer needed (was used for `M.size` and `M.keys` — now replaced by `getFileCount` and `expiredFiles`). + +- [ ] **Step 5: Build and verify** + + Run: `cabal build` + +- [ ] **Step 6: Run existing tests** + + Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"` + +- [ ] **Step 7: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs + git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs + git commit -m "refactor(xftp): add getUsedStorage, getFileCount, expiredFiles store functions" + ``` + +--- + +## Task 3: Change `Store.hs` functions from STM to IO + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Store.hs` +- Modify: `src/Simplex/FileTransfer/Server.hs` +- Modify: `src/Simplex/FileTransfer/Server/StoreLog.hs` + +- [ ] **Step 1: Change all Store.hs function signatures from STM to IO** + + For each of: `addFile`, `setFilePath`, `addRecipient`, `getFile`, `deleteFile`, `blockFile`, `deleteRecipient`, `ackFile`: + 1. Change return type from `STM (Either XFTPErrorType ...)` to `IO (Either XFTPErrorType ...)` (or `STM ()` to `IO ()` for `deleteRecipient`). + 2. Wrap the function body in `atomically $ do ...`. + 3. Keep `withFile` and `newFileRec` as internal STM helpers (called inside the `atomically` blocks). + +- [ ] **Step 2: Update Server.hs call sites — remove `atomically` wrappers** + + 1. Line 563 (`receiveServerFile`): change `atomically $ writeTVar filePath (Just fPath)` → add `st <- asks store` then `void $ liftIO $ setFilePath st senderId fPath` (design call site #1 — `store` is not in scope in `receiveServerFile`'s `receive` helper, so bind via `asks`; `void` avoids `-Wunused-do-bind` warning on the `Either` result). + 2. Line 453 (`verifyXFTPTransmission`): split `atomically $ verify =<< getFile st party fId` into: `liftIO (getFile st party fId)` (IO→M lift), then pattern match on result, use `readTVarIO (fileStatus fr)` instead of `readTVar`. + 3. Lines 371, 377 (control port `CPDelete`/`CPBlock`): change `ExceptT $ atomically $ getFile fs SFRecipient fileId` → `ExceptT $ liftIO $ getFile fs SFRecipient fileId` (inside `unliftIO u $ do` block which runs in M monad — `liftIO` required to lift IO into M). + 4. Line 508 (`addFile` in `createFile`): the `ExceptT $ addFile st sId file ts EntityActive` — `addFile` is now IO, `ExceptT` wraps IO directly. Remove any `atomically`. + 5. Line 514 (`addRecipient`): same — `ExceptT . addRecipient st sId` works directly in IO. + 6. Line 516 (`retryAdd`): change parameter type from `(XFTPFileId -> STM (Either XFTPErrorType a))` to `(XFTPFileId -> IO (Either XFTPErrorType a))`. Line 520: change `atomically (add fId)` to `liftIO (add fId)`. + 7. Line 605 (`ackFileReception`): change `atomically $ deleteRecipient st rId fr` to `liftIO $ deleteRecipient st rId fr`. + 8. Line 620 (`deleteOrBlockServerFile_`): change third parameter type from `(FileStore -> STM (Either XFTPErrorType ()))` to `(FileStore -> IO (Either XFTPErrorType ()))`. Line 626: change `void $ atomically $ storeAction st` to `void $ liftIO $ storeAction st`. + 9. `expireServerFiles` `delete` helper: change `atomically $ deleteFile st sId` to `liftIO $ deleteFile st sId` (deleteFile is now IO; `liftIO` required because the helper runs in M monad, not IO). + +- [ ] **Step 3: Update `StoreLog.hs` — remove `atomically` from replay** + + In `readFileStore` (line 93), function `addToStore`: + 1. Change `atomically (addToStore lr)` to `addToStore lr` — store functions are now IO. + 2. The `addToStore` body calls `addFile`, `setFilePath`, `deleteFile`, `blockFile`, `ackFile` — all IO now, no `atomically` needed. + 3. For `AddRecipients`: `runExceptT $ mapM_ (ExceptT . addRecipient st sId) rcps` — `addRecipient` returns `IO (Either ...)`, so `ExceptT . addRecipient st sId` works directly. + +- [ ] **Step 4: Build and verify** + + Run: `cabal build` + +- [ ] **Step 5: Run existing tests** + + Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"` + +- [ ] **Step 6: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs + git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs + git commit -m "refactor(xftp): change file store operations from STM to IO" + ``` + +--- + +## Task 4: Extract `FileStoreClass` typeclass, move STM impl to `Store/STM.hs` + +**Files:** +- Rewrite: `src/Simplex/FileTransfer/Server/Store.hs` +- Create: `src/Simplex/FileTransfer/Server/Store/STM.hs` +- Modify: `src/Simplex/FileTransfer/Server/StoreLog.hs` +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` +- Modify: `src/Simplex/FileTransfer/Server.hs` +- Modify: `simplexmq.cabal` + +- [ ] **Step 1: Create `Store/STM.hs` — move all implementation code** + + 1. Create directory `src/Simplex/FileTransfer/Server/Store/`. + 2. Create `src/Simplex/FileTransfer/Server/Store/STM.hs`. + 3. Move from `Store.hs`: `FileStore` data type (rename to `STMFileStore`), all function implementations, internal helpers (`withFile`, `newFileRec`), all STM-specific imports. + 4. Rename all `FileStore` references to `STMFileStore` in the new file. + 5. Module declaration: `module Simplex.FileTransfer.Server.Store.STM` exporting only `STMFileStore (..)` — do NOT export standalone functions (`addFile`, `setFilePath`, etc.) to avoid name collisions with the typeclass methods from `Store.hs`. + +- [ ] **Step 2: Rewrite `Store.hs` as the typeclass module** + + 1. Add `{-# LANGUAGE TypeFamilies #-}` pragma to `Store.hs` (required for `type FileStoreConfig s` associated type). + 2. Keep in `Store.hs`: `FileRec (..)`, `FileRecipient (..)`, `RoundedFileTime`, `fileTimePrecision` definitions and their `StrEncoding` instance. + 3. Add `FileStoreClass` typeclass: + ```haskell + class FileStoreClass s where + type FileStoreConfig s + + -- Lifecycle + newFileStore :: FileStoreConfig s -> IO s + closeFileStore :: s -> IO () + + -- File operations + addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ()) + setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ()) + addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ()) + getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey)) + deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ()) + blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ()) + deleteRecipient :: s -> RecipientId -> FileRec -> IO () + ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ()) + + -- Expiration + expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)] + + -- Stats + getUsedStorage :: s -> IO Int64 + getFileCount :: s -> IO Int + ``` + 4. Do NOT re-export from `Store/STM.hs` — this would create a circular module dependency (Store.hs imports Store/STM.hs, Store/STM.hs imports Store.hs). Consumers must import `Store.STM` directly where they need `STMFileStore`. + 5. Remove all STM-specific imports that are no longer needed. + +- [ ] **Step 3: Add `FileStoreClass` instance in `Store/STM.hs`** + + 1. Import `FileStoreClass` from `Simplex.FileTransfer.Server.Store`. + 2. Inline all implementations directly in the instance body (do NOT delegate to standalone functions — the standalone names collide with typeclass method names, causing ambiguous occurrences for importers): + ```haskell + instance FileStoreClass STMFileStore where + type FileStoreConfig STMFileStore = () + newFileStore () = do + files <- TM.emptyIO + recipients <- TM.emptyIO + pure STMFileStore {files, recipients} + closeFileStore _ = pure () + addFile st sId fileInfo createdAt status = atomically $ ... + setFilePath st sId fPath = atomically $ ... + -- ... (each method's body is the existing function body, inlined) + ``` + 3. Remove the standalone top-level function definitions — they are now instance methods. Keep only `withFile` and `newFileRec` as internal helpers used by the instance methods. + +- [ ] **Step 4: Update importers** + + 1. `Env.hs`: add `import Simplex.FileTransfer.Server.Store.STM (STMFileStore (..))`. Change `FileStore` → `STMFileStore` in `XFTPEnv` type and `newXFTPServerEnv`. Change `store <- newFileStore` to `store <- newFileStore ()` (typeclass method now takes `FileStoreConfig STMFileStore` which is `()`). Keep `import Simplex.FileTransfer.Server.Store` for `FileRec`, `FileRecipient`, `FileStoreClass`, etc. + 2. `Server.hs`: add `import Simplex.FileTransfer.Server.Store.STM`. Change `FileStore` → `STMFileStore` in any explicit type annotations. Import `FileStoreClass` from `Simplex.FileTransfer.Server.Store`. + 3. `StoreLog.hs`: add `import Simplex.FileTransfer.Server.Store.STM` to access concrete `STMFileStore` type and store functions used during log replay. Change `FileStore` → `STMFileStore` in `readWriteFileStore` and `writeFileStore` parameter types. + +- [ ] **Step 5: Update cabal file** + + Add `Simplex.FileTransfer.Server.Store.STM` to `exposed-modules` in the `!flag(client_library)` section, alongside existing XFTP server modules. + +- [ ] **Step 6: Build and verify** + + Run: `cabal build` + +- [ ] **Step 7: Run existing tests** + + Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"` + +- [ ] **Step 8: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Store/STM.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs + git add src/Simplex/FileTransfer/Server/Store.hs src/Simplex/FileTransfer/Server/Store/STM.hs src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/StoreLog.hs simplexmq.cabal + git commit -m "refactor(xftp): extract FileStoreClass typeclass, move STM impl to Store.STM" + ``` + +--- + +## Task 5: Make `XFTPEnv` and `Server.hs` polymorphic over `FileStoreClass` + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` +- Modify: `src/Simplex/FileTransfer/Server.hs` +- Modify: `src/Simplex/FileTransfer/Server/Main.hs` +- Modify: `tests/XFTPClient.hs` (if it calls `runXFTPServerBlocking` directly) + +- [ ] **Step 1: Make `XFTPEnv` polymorphic in `Env.hs`** + + 1. Add `XFTPStoreConfig` GADT: `data XFTPStoreConfig s where XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore`. + 2. Change `data XFTPEnv` to `data XFTPEnv s` — field `store :: FileStore` becomes `store :: s`. + 3. Change `newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv` to `newXFTPServerEnv :: FileStoreClass s => XFTPStoreConfig s -> XFTPServerConfig -> IO (XFTPEnv s)`. + 4. Pattern match on `XSCMemory storeLogPath` in `newXFTPServerEnv` body. Create store via `newFileStore ()`, storeLog via `mapM (`readWriteFileStore` st) storeLogPath`. + +- [ ] **Step 2: Make `Server.hs` polymorphic** + + 1. Change `type M a = ReaderT XFTPEnv IO a` to `type M s a = ReaderT (XFTPEnv s) IO a`. + 2. Add `FileStoreClass s =>` constraint to all functions using `M s a`. Use `forall s.` in signatures of functions that have `where`-block bindings with `M s` type annotations — `ScopedTypeVariables` requires explicit `forall` to bring `s` into scope for inner type signatures (matching SMP's `smpServer :: forall s. MsgStoreClass s => ...` pattern). Full list: `xftpServer`, `processRequest`, `verifyXFTPTransmission`, `processXFTPRequest` and all its `where`-bound functions (`createFile`, `addRecipients`, `receiveServerFile`, `sendServerFile`, `deleteServerFile`, `ackFileReception`, `retryAdd`, `addFileRetry`, `addRecipientRetry`), `deleteServerFile_`, `blockServerFile`, `deleteOrBlockServerFile_`, `expireServerFiles`, `randomId`, `getFileId`, `withFileLog`, `incFileStat`, `saveServerStats`, `restoreServerStats`, `randomDelay` (inside `#ifdef slow_servers` CPP block). Also update `encodeXftp` (line 236) and `runCPClient` (line 339) which use explicit `ReaderT XFTPEnv IO` instead of the `M` alias — change to `ReaderT (XFTPEnv s) IO`. + 3. Change `runXFTPServerBlocking` and `runXFTPServer` to take `XFTPStoreConfig s` parameter. + 4. Add `closeFileStore store` call to the server shutdown path (in the `finally` block or `stopServer` equivalent — after saving stats, before logging "Server stopped"). This ensures Postgres connection pool and `dbStoreLog` are properly closed. For STM this is a no-op. + +- [ ] **Step 3: Update `Main.hs` dispatch** + + 1. In `runServer`: construct `XSCMemory (enableStoreLog $> storeLogFilePath)`. + 2. Add dispatch function that calls the updated `runXFTPServer` (which creates `started` internally): + ```haskell + run :: FileStoreClass s => XFTPStoreConfig s -> IO () + run storeCfg = runXFTPServer storeCfg serverConfig + ``` + 3. Call `run` with the `XSCMemory` config. + +- [ ] **Step 4: Update test helper if needed** + + If `tests/XFTPClient.hs` calls `runXFTPServerBlocking` directly, update the call to pass an `XSCMemory` config. Check the `withXFTPServer` / `serverBracket` helper. + +- [ ] **Step 5: Build and verify** + + Run: `cabal build && cabal build test:simplexmq-test` + +- [ ] **Step 6: Run existing tests** + + Run: `cabal test --test-show-details=streaming --test-option=--match="/XFTP/"` + +- [ ] **Step 7: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/Main.hs + git add src/Simplex/FileTransfer/Server/Env.hs src/Simplex/FileTransfer/Server.hs src/Simplex/FileTransfer/Server/Main.hs tests/XFTPClient.hs simplexmq.cabal + git commit -m "refactor(xftp): make XFTPEnv and server polymorphic over FileStoreClass" + ``` + +--- + +## Task 6: Add Postgres config, migrations, and store skeleton + +**Files:** +- Create: `src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs` +- Create: `src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs` +- Create: `src/Simplex/FileTransfer/Server/Store/Postgres.hs` +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` +- Modify: `simplexmq.cabal` + +- [ ] **Step 1: Create `Store/Postgres/Config.hs`** + + ```haskell + module Simplex.FileTransfer.Server.Store.Postgres.Config + ( PostgresFileStoreCfg (..), + defaultXFTPDBOpts, + ) + where + + import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..)) + import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation) + + data PostgresFileStoreCfg = PostgresFileStoreCfg + { dbOpts :: DBOpts, + dbStoreLogPath :: Maybe FilePath, + confirmMigrations :: MigrationConfirmation + } + + defaultXFTPDBOpts :: DBOpts + defaultXFTPDBOpts = + DBOpts + { connstr = "postgresql://xftp@/xftp_server_store", + schema = "xftp_server", + poolSize = 10, + createSchema = False + } + ``` + +- [ ] **Step 2: Create `Store/Postgres/Migrations.hs`** + + Full migration module with `xftpServerMigrations :: [Migration]` and `m20260325_initial` containing CREATE TABLE SQL for `files` and `recipients` tables plus indexes. Follow SMP's `QueueStore/Postgres/Migrations.hs` pattern exactly: tuple list → `sortOn name . map migration`. + +- [ ] **Step 3: Create `Store/Postgres.hs` with stub instance** + + 1. Define `PostgresFileStore` with `dbStore :: DBStore` and `dbStoreLog :: Maybe (StoreLog 'WriteMode)`. + 2. `instance FileStoreClass PostgresFileStore` with `error "not implemented"` for all methods except `newFileStore` (calls `createDBStore` + opens `dbStoreLog`) and `closeFileStore` (closes both). `type FileStoreConfig PostgresFileStore = PostgresFileStoreCfg`. + 3. Add `withDB`, `handleDuplicate`, `assertUpdated`, `withLog` helpers. + +- [ ] **Step 4: Add `XSCDatabase` GADT constructor in `Env.hs` (CPP-guarded)** + + ```haskell + #if defined(dbServerPostgres) + import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore) + import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg) + #endif + + data XFTPStoreConfig s where + XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore + #if defined(dbServerPostgres) + XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore + #endif + ``` + +- [ ] **Step 5: Update cabal** + + Add to existing `if flag(server_postgres)` block: + ``` + Simplex.FileTransfer.Server.Store.Postgres + Simplex.FileTransfer.Server.Store.Postgres.Config + Simplex.FileTransfer.Server.Store.Postgres.Migrations + ``` + +- [ ] **Step 6: Build both ways** + + Run: `cabal build && cabal build -fserver_postgres` + +- [ ] **Step 7: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store/Postgres.hs src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs src/Simplex/FileTransfer/Server/Env.hs + git add src/Simplex/FileTransfer/Server/Store/Postgres.hs src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs src/Simplex/FileTransfer/Server/Env.hs simplexmq.cabal + git commit -m "feat(xftp): add PostgreSQL store skeleton with schema migration" + ``` + +--- + +## Task 7: Implement `PostgresFileStore` operations + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Store/Postgres.hs` + +- [ ] **Step 1: Implement `addFile`** + + `INSERT INTO files (sender_id, file_size, file_digest, sender_key, file_path, created_at, status) VALUES (?,?,?,?,NULL,?,?)`. Catch unique violation with `handleDuplicate` → `DUPLICATE_`. Call `withLog "addFile"` after. + +- [ ] **Step 2: Implement `getFile`** + + For `SFSender`: `SELECT ... FROM files WHERE sender_id = ?`. Construct `FileRec` with `newTVarIO` per TVar field. `recipientIds = S.empty`. + For `SFRecipient`: `SELECT f.*, r.recipient_key FROM recipients r JOIN files f ON r.sender_id = f.sender_id WHERE r.recipient_id = ?`. + +- [ ] **Step 3: Implement `setFilePath`** + + `UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL`. Use `assertUpdated`. Call `withLog "setFilePath"`. + +- [ ] **Step 4: Implement `addRecipient`** + + `INSERT INTO recipients (recipient_id, sender_id, recipient_key) VALUES (?,?,?)`. `handleDuplicate` → `DUPLICATE_`. Call `withLog "addRecipient"`. + +- [ ] **Step 5: Implement `deleteFile`, `blockFile`** + + `deleteFile`: `DELETE FROM files WHERE sender_id = ?` (CASCADE). `withLog "deleteFile"`. + `blockFile`: `UPDATE files SET status = ? WHERE sender_id = ?`. `assertUpdated`. `withLog "blockFile"`. + +- [ ] **Step 6: Implement `deleteRecipient`, `ackFile`** + + `deleteRecipient`: `DELETE FROM recipients WHERE recipient_id = ?`. `withLog "deleteRecipient"`. + `ackFile`: same + return `Left AUTH` if 0 rows. + +- [ ] **Step 7: Implement `expiredFiles`, `getUsedStorage`, `getFileCount`** + + `expiredFiles`: `SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? LIMIT ?`. + `getUsedStorage`: `SELECT COALESCE(SUM(file_size), 0) FROM files`. + `getFileCount`: `SELECT COUNT(*) FROM files`. + +- [ ] **Step 8: Add `ToField`/`FromField` instances** + + For `RoundedFileTime` (Int64 wrapper), `ServerEntityStatus` (Text via StrEncoding), `C.APublicAuthKey` (Binary via `encodePubKey`/`decodePubKey`). Check SMP's `QueueStore/Postgres.hs` for existing instances to import. + +- [ ] **Step 9: Wrap mutation operations in `uninterruptibleMask_`** + + Operations that combine a DB write with a TVar update (e.g., `getFile` constructs `FileRec` with `newTVarIO`) must be wrapped in `E.uninterruptibleMask_` to prevent async exceptions from leaving inconsistent state. Follow SMP's `addQueue_`, `deleteStoreQueue` pattern. + +- [ ] **Step 10: Build** + + Run: `cabal build -fserver_postgres` + +- [ ] **Step 11: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Store/Postgres.hs + git add src/Simplex/FileTransfer/Server/Store/Postgres.hs + git commit -m "feat(xftp): implement PostgresFileStore operations" + ``` + +--- + +## Task 8: Add INI config, Main.hs dispatch, startup validation + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Main.hs` +- Modify: `src/Simplex/FileTransfer/Server/Env.hs` + +- [ ] **Step 1: Update `iniFileContent` in `Main.hs`** + + Add to `[STORE_LOG]` section: `store_files: memory`, commented-out `db_connection`, `db_schema`, `db_pool_size`, `db_store_log` keys. Follow SMP's `optDisabled'` pattern for commented defaults. + +- [ ] **Step 2: Add `StartOptions` and `--confirm-migrations` flag** + + ```haskell + data StartOptions = StartOptions + { confirmMigrations :: MigrationConfirmation + } + ``` + Add to `Start` command parser with default `MCConsole`. Thread through to `runServer`. + +- [ ] **Step 3: Add store_files INI parsing and CPP-guarded Postgres dispatch** + + In `runServer`: read `store_files` from INI (`fromRight "memory" $ lookupValue "STORE_LOG" "store_files" ini`). Add `"database"` branch (CPP-guarded) that constructs `PostgresFileStoreCfg` using `iniDBOptions ini defaultXFTPDBOpts` and `enableDbStoreLog'` pattern. Non-postgres build: `exitError`. + +- [ ] **Step 4: Add `XSCDatabase` branch in `newXFTPServerEnv` (`Env.hs`)** + + CPP-guarded pattern match on `XSCDatabase dbCfg`: `newFileStore dbCfg`, `storeLog = Nothing`. + +- [ ] **Step 5: Add startup config validation** + + Add `checkFileStoreMode` (CPP-guarded) before `run`: validate conflicting storeLog file + database mode, missing schema, etc. per design doc. + +- [ ] **Step 6: Build both ways** + + Run: `cabal build && cabal build -fserver_postgres` + +- [ ] **Step 7: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Main.hs src/Simplex/FileTransfer/Server/Env.hs + git add src/Simplex/FileTransfer/Server/Main.hs src/Simplex/FileTransfer/Server/Env.hs + git commit -m "feat(xftp): add PostgreSQL INI config, store dispatch, startup validation" + ``` + +--- + +## Task 9: Add database import/export CLI commands + +**Files:** +- Modify: `src/Simplex/FileTransfer/Server/Main.hs` + +- [ ] **Step 1: Add `Database` CLI command (CPP-guarded)** + + Add `Database StoreCmd DBOpts` constructor to `CliCommand`. Add `database` subcommand parser with `import`/`export` subcommands + `dbOptsP defaultXFTPDBOpts`. + +- [ ] **Step 2: Implement `importFileStoreToDatabase`** + + 1. `confirmOrExit` with database details. + 2. Create temporary `STMFileStore`, replay StoreLog via `readWriteFileStore`. + 3. Create `PostgresFileStore` with `createSchema = True`, `confirmMigrations = MCYesUp`. + 4. Batch-insert files using PostgreSQL COPY protocol. Progress every 10k. + 5. Batch-insert recipients using COPY protocol. + 6. Verify counts: `SELECT COUNT(*)` — warn on mismatch. + 7. Rename StoreLog to `.bak`. + 8. Report counts. + +- [ ] **Step 3: Implement `exportDatabaseToStoreLog`** + + 1. `confirmOrExit`. Fail if output file exists. + 2. Create `PostgresFileStore` from config. + 3. Open StoreLog for writing. + 4. Fold over file records: write `AddFile` (with status), `AddRecipients`, `PutFile` per file. + 5. Close StoreLog, report counts. + +- [ ] **Step 4: Build** + + Run: `cabal build -fserver_postgres` + +- [ ] **Step 5: Format and commit** + + ```bash + fourmolu -i src/Simplex/FileTransfer/Server/Main.hs + git add src/Simplex/FileTransfer/Server/Main.hs + git commit -m "feat(xftp): add database import/export CLI commands" + ``` + +--- + +## Task 10: Add Postgres tests + +**Files:** +- Modify: `tests/XFTPClient.hs` +- Modify: `tests/Test.hs` +- Create: `tests/CoreTests/XFTPStoreTests.hs` + +- [ ] **Step 1: Add test fixtures in `tests/XFTPClient.hs`** + + ```haskell + testXFTPStoreDBOpts :: DBOpts + testXFTPStoreDBOpts = + DBOpts + { connstr = "postgresql://test_xftp_server_user@/test_xftp_server_db", + schema = "xftp_server_test", + poolSize = 10, + createSchema = True + } + ``` + Add `testXFTPDBConnectInfo :: ConnectInfo` matching the connection string. + +- [ ] **Step 2: Add Postgres server test group in `tests/Test.hs`** + + CPP-guarded block that runs existing `xftpServerTests` with Postgres store config, wrapped in `postgressBracket testXFTPDBConnectInfo`. Parameterize `withXFTPServer` to accept store config if needed. + +- [ ] **Step 3: Create `tests/CoreTests/XFTPStoreTests.hs` — unit tests** + + Test `PostgresFileStore` operations directly: + - `addFile` + `getFile SFSender` round-trip. + - `addFile` duplicate → `DUPLICATE_`. + - `getFile` nonexistent → `AUTH`. + - `setFilePath` + verify `WHERE file_path IS NULL` guard. + - `addRecipient` + `getFile SFRecipient` round-trip. + - `deleteFile` cascades recipients. + - `blockFile` + verify status. + - `expiredFiles` batch semantics. + - `getUsedStorage`, `getFileCount` correctness. + +- [ ] **Step 4: Add migration round-trip test** + + Create `STMFileStore` with test data (files + recipients + blocked status) → export to StoreLog → import to Postgres → export back → compare StoreLog files byte-for-byte. + +- [ ] **Step 5: Build and run tests** + + ```bash + cabal build -fserver_postgres test:simplexmq-test + cabal test --test-show-details=streaming --test-option=--match="/XFTP/" -fserver_postgres + ``` + +- [ ] **Step 6: Format and commit** + + ```bash + fourmolu -i tests/CoreTests/XFTPStoreTests.hs tests/XFTPClient.hs + git add tests/CoreTests/XFTPStoreTests.hs tests/XFTPClient.hs tests/Test.hs + git commit -m "test(xftp): add PostgreSQL backend tests" + ``` diff --git a/simplexmq.cabal b/simplexmq.cabal index 3ad23df095..ce2b67f9eb 100644 --- a/simplexmq.cabal +++ b/simplexmq.cabal @@ -281,6 +281,9 @@ library Simplex.Messaging.Notifications.Server.Store.Postgres Simplex.Messaging.Notifications.Server.Store.Types Simplex.Messaging.Notifications.Server.StoreLog + Simplex.FileTransfer.Server.Store.Postgres + Simplex.FileTransfer.Server.Store.Postgres.Config + Simplex.FileTransfer.Server.Store.Postgres.Migrations Simplex.Messaging.Server.MsgStore.Postgres Simplex.Messaging.Server.QueueStore.Postgres Simplex.Messaging.Server.QueueStore.Postgres.Migrations @@ -523,6 +526,7 @@ test-suite simplexmq-test if flag(server_postgres) other-modules: AgentTests.NotificationTests + CoreTests.XFTPStoreTests NtfClient NtfServerTests PostgresSchemaDump diff --git a/src/Simplex/FileTransfer/Server.hs b/src/Simplex/FileTransfer/Server.hs index 6e0a9735a6..91f917f767 100644 --- a/src/Simplex/FileTransfer/Server.hs +++ b/src/Simplex/FileTransfer/Server.hs @@ -31,7 +31,6 @@ import qualified Data.ByteString.Char8 as B import Data.Int (Int64) import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as L -import qualified Data.Map.Strict as M import Data.Maybe (fromMaybe, isJust) import qualified Data.Text as T import qualified Data.Text.IO as T @@ -88,7 +87,7 @@ import UnliftIO.Concurrent (threadDelay) import UnliftIO.Directory (canonicalizePath, doesFileExist, removeFile, renameFile) import qualified UnliftIO.Exception as E -type M a = ReaderT XFTPEnv IO a +type M s a = ReaderT (XFTPEnv s) IO a data XFTPTransportRequest = XFTPTransportRequest { thParams :: THandleParamsXFTP 'TServer, @@ -112,19 +111,19 @@ corsPreflightHeaders = ("Access-Control-Max-Age", "86400") ] -runXFTPServer :: XFTPServerConfig -> IO () +runXFTPServer :: FileStoreClass s => XFTPServerConfig s -> IO () runXFTPServer cfg = do started <- newEmptyTMVarIO runXFTPServerBlocking started cfg -runXFTPServerBlocking :: TMVar Bool -> XFTPServerConfig -> IO () +runXFTPServerBlocking :: FileStoreClass s => TMVar Bool -> XFTPServerConfig s -> IO () runXFTPServerBlocking started cfg = newXFTPServerEnv cfg >>= runReaderT (xftpServer cfg started) data Handshake = HandshakeSent C.PrivateKeyX25519 | HandshakeAccepted (THandleParams XFTPVersion 'TServer) -xftpServer :: XFTPServerConfig -> TMVar Bool -> M () +xftpServer :: forall s. FileStoreClass s => XFTPServerConfig s -> TMVar Bool -> M s () xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpiration, fileExpiration, xftpServerVRange} started = do mapM_ (expireServerFiles Nothing) fileExpiration restoreServerStats @@ -137,7 +136,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira ) `finally` stopServer where - runServer :: M () + runServer :: M s () runServer = do srvCreds@(chain, pk) <- asks tlsServerCreds httpCreds_ <- asks httpServerCreds @@ -168,7 +167,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira Nothing -> pure () Just thParams -> processRequest req0 {thParams} | otherwise -> liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS') - xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M (Maybe (THandleParams XFTPVersion 'TServer)) + xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M s (Maybe (THandleParams XFTPVersion 'TServer)) xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, request, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do s <- atomically $ TM.lookup sessionId sessions r <- runExceptT $ case s of @@ -227,39 +226,40 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS) pure Nothing Nothing -> throwE HANDSHAKE - sendError :: XFTPErrorType -> M (Maybe (THandleParams XFTPVersion 'TServer)) + sendError :: XFTPErrorType -> M s (Maybe (THandleParams XFTPVersion 'TServer)) sendError err = do runExceptT (encodeXftp err) >>= \case Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) bs Left _ -> logError $ "Error encoding handshake error: " <> tshow err pure Nothing - encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT XFTPEnv IO) Builder + encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT (XFTPEnv s) IO) Builder encodeXftp a = byteString <$> liftHS (C.pad (smpEncode a) xftpBlockSize) liftHS = liftEitherWith (const HANDSHAKE) - stopServer :: M () + stopServer :: M s () stopServer = do - withFileLog closeStoreLog + st <- asks store + liftIO $ closeFileStore st saveServerStats logNote "Server stopped" - expireFilesThread_ :: XFTPServerConfig -> [M ()] + expireFilesThread_ :: XFTPServerConfig s -> [M s ()] expireFilesThread_ XFTPServerConfig {fileExpiration = Just fileExp} = [expireFiles fileExp] expireFilesThread_ _ = [] - expireFiles :: ExpirationConfig -> M () + expireFiles :: ExpirationConfig -> M s () expireFiles expCfg = do let interval = checkInterval expCfg * 1000000 forever $ do liftIO $ threadDelay' interval expireServerFiles (Just 100000) expCfg - serverStatsThread_ :: XFTPServerConfig -> [M ()] + serverStatsThread_ :: XFTPServerConfig s -> [M s ()] serverStatsThread_ XFTPServerConfig {logStatsInterval = Just interval, logStatsStartTime, serverStatsLogFile} = [logServerStats logStatsStartTime interval serverStatsLogFile] serverStatsThread_ _ = [] - logServerStats :: Int64 -> Int64 -> FilePath -> M () + logServerStats :: Int64 -> Int64 -> FilePath -> M s () logServerStats startAt logInterval statsFilePath = do initialDelay <- (startAt -) . fromIntegral . (`div` 1000000_000000) . diffTimeToPicoseconds . utctDayTime <$> liftIO getCurrentTime liftIO $ putStrLn $ "server stats log enabled: " <> statsFilePath @@ -300,12 +300,12 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira ] liftIO $ threadDelay' interval - prometheusMetricsThread_ :: XFTPServerConfig -> [M ()] + prometheusMetricsThread_ :: XFTPServerConfig s -> [M s ()] prometheusMetricsThread_ XFTPServerConfig {prometheusInterval = Just interval, prometheusMetricsFile} = [savePrometheusMetrics interval prometheusMetricsFile] prometheusMetricsThread_ _ = [] - savePrometheusMetrics :: Int -> FilePath -> M () + savePrometheusMetrics :: Int -> FilePath -> M s () savePrometheusMetrics saveInterval metricsFile = do labelMyThread "savePrometheusMetrics" liftIO $ putStrLn $ "Prometheus metrics saved every " <> show saveInterval <> " seconds to " <> metricsFile @@ -324,11 +324,11 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira let fd = periodStatDataCounts $ _filesDownloaded d pure FileServerMetrics {statsData = d, filesDownloadedPeriods = fd, rtsOptions} - controlPortThread_ :: XFTPServerConfig -> [M ()] + controlPortThread_ :: XFTPServerConfig s -> [M s ()] controlPortThread_ XFTPServerConfig {controlPort = Just port} = [runCPServer port] controlPortThread_ _ = [] - runCPServer :: ServiceName -> M () + runCPServer :: ServiceName -> M s () runCPServer port = do cpStarted <- newEmptyTMVarIO u <- askUnliftIO @@ -336,7 +336,7 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira labelMyThread "control port server" runLocalTCPServer cpStarted port $ runCPClient u where - runCPClient :: UnliftIO (ReaderT XFTPEnv IO) -> Socket -> IO () + runCPClient :: UnliftIO (ReaderT (XFTPEnv s) IO) -> Socket -> IO () runCPClient u sock = do labelMyThread "control port client" h <- socketToHandle sock ReadWriteMode @@ -368,13 +368,13 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira CPDelete fileId -> withUserRole $ unliftIO u $ do fs <- asks store r <- runExceptT $ do - (fr, _) <- ExceptT $ atomically $ getFile fs SFRecipient fileId + (fr, _) <- ExceptT $ liftIO $ getFile fs SFRecipient fileId ExceptT $ deleteServerFile_ fr liftIO . hPutStrLn h $ either (\e -> "error: " <> show e) (\() -> "ok") r CPBlock fileId info -> withUserRole $ unliftIO u $ do fs <- asks store r <- runExceptT $ do - (fr, _) <- ExceptT $ atomically $ getFile fs SFRecipient fileId + (fr, _) <- ExceptT $ liftIO $ getFile fs SFRecipient fileId ExceptT $ blockServerFile fr info liftIO . hPutStrLn h $ either (\e -> "error: " <> show e) (\() -> "ok") r CPHelp -> hPutStrLn h "commands: stats-rts, delete, help, quit" @@ -395,7 +395,7 @@ data ServerFile = ServerFile sbState :: LC.SbState } -processRequest :: XFTPTransportRequest -> M () +processRequest :: FileStoreClass s => XFTPTransportRequest -> M s () processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse, addCORS} | B.length bodyHead /= xftpBlockSize = sendXFTPResponse ("", NoEntity, FRErr BLOCK) Nothing | otherwise = @@ -430,7 +430,7 @@ processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHea done #ifdef slow_servers -randomDelay :: M () +randomDelay :: M s () randomDelay = do d <- asks $ responseDelay . config when (d > 0) $ do @@ -440,20 +440,20 @@ randomDelay = do data VerificationResult = VRVerified XFTPRequest | VRFailed XFTPErrorType -verifyXFTPTransmission :: Maybe (THandleAuth 'TServer) -> SignedTransmission FileCmd -> M VerificationResult +verifyXFTPTransmission :: forall s. FileStoreClass s => Maybe (THandleAuth 'TServer) -> SignedTransmission FileCmd -> M s VerificationResult verifyXFTPTransmission thAuth (tAuth, authorized, (corrId, fId, cmd)) = case cmd of FileCmd SFSender (FNEW file rcps auth') -> pure $ XFTPReqNew file rcps auth' `verifyWith` sndKey file FileCmd SFRecipient PING -> pure $ VRVerified XFTPReqPing FileCmd party _ -> verifyCmd party where - verifyCmd :: SFileParty p -> M VerificationResult + verifyCmd :: SFileParty p -> M s VerificationResult verifyCmd party = do st <- asks store - atomically $ verify =<< getFile st party fId + liftIO $ verify =<< getFile st party fId where verify = \case - Right (fr, k) -> result <$> readTVar (fileStatus fr) + Right (fr, k) -> result <$> readTVarIO (fileStatus fr) where result = \case EntityActive -> XFTPReqCmd fId fr cmd `verifyWith` k @@ -464,7 +464,7 @@ verifyXFTPTransmission thAuth (tAuth, authorized, (corrId, fId, cmd)) = -- TODO verify with DH authorization req `verifyWith` k = if verifyCmdAuthorization thAuth tAuth authorized corrId k then VRVerified req else VRFailed AUTH -processXFTPRequest :: HTTP2Body -> XFTPRequest -> M (FileResponse, Maybe ServerFile) +processXFTPRequest :: forall s. FileStoreClass s => HTTP2Body -> XFTPRequest -> M s (FileResponse, Maybe ServerFile) processXFTPRequest HTTP2Body {bodyPart} = \case XFTPReqNew file rks auth -> noFile =<< ifM allowNew (createFile file rks) (pure $ FRErr AUTH) where @@ -483,7 +483,7 @@ processXFTPRequest HTTP2Body {bodyPart} = \case XFTPReqPing -> noFile FRPong where noFile resp = pure (resp, Nothing) - createFile :: FileInfo -> NonEmpty RcvPublicAuthKey -> M FileResponse + createFile :: FileInfo -> NonEmpty RcvPublicAuthKey -> M s FileResponse createFile file rks = do st <- asks store r <- runExceptT $ do @@ -502,25 +502,25 @@ processXFTPRequest HTTP2Body {bodyPart} = \case let rIds = L.map (\(FileRecipient rId _) -> rId) rcps pure $ FRSndIds sId rIds pure $ either FRErr id r - addFileRetry :: FileStore -> FileInfo -> Int -> RoundedFileTime -> M (Either XFTPErrorType XFTPFileId) + addFileRetry :: s -> FileInfo -> Int -> RoundedFileTime -> M s (Either XFTPErrorType XFTPFileId) addFileRetry st file n ts = retryAdd n $ \sId -> runExceptT $ do ExceptT $ addFile st sId file ts EntityActive pure sId - addRecipientRetry :: FileStore -> Int -> XFTPFileId -> RcvPublicAuthKey -> M (Either XFTPErrorType FileRecipient) + addRecipientRetry :: s -> Int -> XFTPFileId -> RcvPublicAuthKey -> M s (Either XFTPErrorType FileRecipient) addRecipientRetry st n sId rpk = retryAdd n $ \rId -> runExceptT $ do let rcp = FileRecipient rId rpk ExceptT $ addRecipient st sId rcp pure rcp - retryAdd :: Int -> (XFTPFileId -> STM (Either XFTPErrorType a)) -> M (Either XFTPErrorType a) + retryAdd :: Int -> (XFTPFileId -> IO (Either XFTPErrorType a)) -> M s (Either XFTPErrorType a) retryAdd 0 _ = pure $ Left INTERNAL retryAdd n add = do fId <- getFileId - atomically (add fId) >>= \case + liftIO (add fId) >>= \case Left DUPLICATE_ -> retryAdd (n - 1) add r -> pure r - addRecipients :: XFTPFileId -> NonEmpty RcvPublicAuthKey -> M FileResponse + addRecipients :: XFTPFileId -> NonEmpty RcvPublicAuthKey -> M s FileResponse addRecipients sId rks = do st <- asks store r <- runExceptT $ do @@ -531,7 +531,7 @@ processXFTPRequest HTTP2Body {bodyPart} = \case let rIds = L.map (\(FileRecipient rId _) -> rId) rcps pure $ FRRcvIds rIds pure $ either FRErr id r - receiveServerFile :: FileRec -> M FileResponse + receiveServerFile :: FileRec -> M s FileResponse receiveServerFile FileRec {senderId, fileInfo = FileInfo {size, digest}, filePath} = case bodyPart of Nothing -> pure $ FRErr SIZE -- TODO validate body size from request before downloading, once it's populated @@ -549,7 +549,7 @@ processXFTPRequest HTTP2Body {bodyPart} = \case | bs == 0 || bs > s -> pure $ FRErr SIZE | otherwise -> drain (s - bs) reserve = do - us <- asks $ usedStorage . store + us <- asks usedStorage quota <- asks $ fromMaybe maxBound . fileSizeQuota . config atomically . stateTVar us $ \used -> let used' = used + fromIntegral size in if used' <= quota then (True, used') else (False, used) @@ -559,21 +559,28 @@ processXFTPRequest HTTP2Body {bodyPart} = \case receiveChunk (XFTPRcvChunkSpec fPath size digest) >>= \case Right () -> do stats <- asks serverStats - withFileLog $ \sl -> logPutFile sl senderId fPath - atomically $ writeTVar filePath (Just fPath) - incFileStat filesUploaded - incFileStat filesCount - liftIO $ atomicModifyIORef'_ (filesSize stats) (+ fromIntegral size) - pure FROk + st <- asks store + liftIO (setFilePath st senderId fPath) >>= \case + Right () -> do + withFileLog $ \sl -> logPutFile sl senderId fPath + incFileStat filesUploaded + incFileStat filesCount + liftIO $ atomicModifyIORef'_ (filesSize stats) (+ fromIntegral size) + pure FROk + Left _e -> do + us <- asks usedStorage + atomically $ modifyTVar' us $ subtract (fromIntegral size) + liftIO $ whenM (doesFileExist fPath) (removeFile fPath) `catch` logFileError + pure $ FRErr AUTH Left e -> do - us <- asks $ usedStorage . store + us <- asks usedStorage atomically $ modifyTVar' us $ subtract (fromIntegral size) liftIO $ whenM (doesFileExist fPath) (removeFile fPath) `catch` logFileError pure $ FRErr e receiveChunk spec = do t <- asks $ fileTimeout . config liftIO $ fromMaybe (Left TIMEOUT) <$> timeout t (runExceptT $ receiveFile getBody spec) - sendServerFile :: FileRec -> RcvPublicDhKey -> M (FileResponse, Maybe ServerFile) + sendServerFile :: FileRec -> RcvPublicDhKey -> M s (FileResponse, Maybe ServerFile) sendServerFile FileRec {senderId, filePath, fileInfo = FileInfo {size}} rDhKey = do readTVarIO filePath >>= \case Just path -> ifM (doesFileExist path) sendFile (pure (FRErr AUTH, Nothing)) @@ -592,38 +599,41 @@ processXFTPRequest HTTP2Body {bodyPart} = \case _ -> pure (FRErr INTERNAL, Nothing) _ -> pure (FRErr NO_FILE, Nothing) - deleteServerFile :: FileRec -> M FileResponse + deleteServerFile :: FileRec -> M s FileResponse deleteServerFile fr = either FRErr (\() -> FROk) <$> deleteServerFile_ fr logFileError :: SomeException -> IO () logFileError e = logError $ "Error deleting file: " <> tshow e - ackFileReception :: RecipientId -> FileRec -> M FileResponse + ackFileReception :: RecipientId -> FileRec -> M s FileResponse ackFileReception rId fr = do withFileLog (`logAckFile` rId) st <- asks store - atomically $ deleteRecipient st rId fr + liftIO $ deleteRecipient st rId fr incFileStat fileDownloadAcks pure FROk -deleteServerFile_ :: FileRec -> M (Either XFTPErrorType ()) +deleteServerFile_ :: FileStoreClass s => FileRec -> M s (Either XFTPErrorType ()) deleteServerFile_ fr@FileRec {senderId} = do withFileLog (`logDeleteFile` senderId) deleteOrBlockServerFile_ fr filesDeleted (`deleteFile` senderId) -- this also deletes the file from storage, but doesn't include it in delete statistics -blockServerFile :: FileRec -> BlockingInfo -> M (Either XFTPErrorType ()) +blockServerFile :: FileStoreClass s => FileRec -> BlockingInfo -> M s (Either XFTPErrorType ()) blockServerFile fr@FileRec {senderId} info = do withFileLog $ \sl -> logBlockFile sl senderId info deleteOrBlockServerFile_ fr filesBlocked $ \st -> blockFile st senderId info True -deleteOrBlockServerFile_ :: FileRec -> (FileServerStats -> IORef Int) -> (FileStore -> STM (Either XFTPErrorType ())) -> M (Either XFTPErrorType ()) +deleteOrBlockServerFile_ :: FileStoreClass s => FileRec -> (FileServerStats -> IORef Int) -> (s -> IO (Either XFTPErrorType ())) -> M s (Either XFTPErrorType ()) deleteOrBlockServerFile_ FileRec {filePath, fileInfo} stat storeAction = runExceptT $ do path <- readTVarIO filePath stats <- asks serverStats ExceptT $ first (\(_ :: SomeException) -> FILE_IO) <$> try (forM_ path $ \p -> whenM (doesFileExist p) (removeFile p >> deletedStats stats)) st <- asks store - void $ atomically $ storeAction st + ExceptT $ liftIO $ storeAction st + forM_ path $ \_ -> do + us <- asks usedStorage + atomically $ modifyTVar' us $ subtract (fromIntegral $ size fileInfo) lift $ incFileStat stat where deletedStats stats = do @@ -633,47 +643,50 @@ deleteOrBlockServerFile_ FileRec {filePath, fileInfo} stat storeAction = runExce getFileTime :: IO RoundedFileTime getFileTime = getRoundedSystemTime -expireServerFiles :: Maybe Int -> ExpirationConfig -> M () +expireServerFiles :: FileStoreClass s => Maybe Int -> ExpirationConfig -> M s () expireServerFiles itemDelay expCfg = do st <- asks store - usedStart <- readTVarIO $ usedStorage st + us <- asks usedStorage + usedStart <- readTVarIO us old <- liftIO $ expireBeforeEpoch expCfg - files' <- readTVarIO (files st) - logNote $ "Expiration check: " <> tshow (M.size files') <> " files" - forM_ (M.keys files') $ \sId -> do - mapM_ threadDelay itemDelay - atomically (expiredFilePath st sId old) - >>= mapM_ (maybeRemove $ delete st sId) - usedEnd <- readTVarIO $ usedStorage st + filesCount <- liftIO $ getFileCount st + logNote $ "Expiration check: " <> tshow filesCount <> " files" + expireLoop st us old + usedEnd <- readTVarIO us logNote $ "Used " <> mbs usedStart <> " -> " <> mbs usedEnd <> ", " <> mbs (usedStart - usedEnd) <> " reclaimed." where mbs bs = tshow (bs `div` 1048576) <> "mb" - maybeRemove del = maybe del (remove del) - remove del filePath = - ifM - (doesFileExist filePath) - ((removeFile filePath >> del) `catch` \(e :: SomeException) -> logError $ "failed to remove expired file " <> tshow filePath <> ": " <> tshow e) - del - delete st sId = do - withFileLog (`logDeleteFile` sId) - void . atomically $ deleteFile st sId -- will not update usedStorage if sId isn't in store - incFileStat filesExpired - -randomId :: Int -> M ByteString + expireLoop st us old = do + expired <- liftIO $ expiredFiles st old 10000 + forM_ expired $ \(sId, filePath_, fileSize) -> do + mapM_ threadDelay itemDelay + forM_ filePath_ $ \fp -> + whenM (doesFileExist fp) $ + removeFile fp `catch` \(e :: SomeException) -> logError $ "failed to remove expired file " <> tshow fp <> ": " <> tshow e + withFileLog (`logDeleteFile` sId) + liftIO (deleteFile st sId) >>= \case + Right () -> do + forM_ filePath_ $ \_ -> + atomically $ modifyTVar' us $ subtract (fromIntegral fileSize) + incFileStat filesExpired + Left _ -> pure () + unless (null expired) $ expireLoop st us old + +randomId :: Int -> M s ByteString randomId n = atomically . C.randomBytes n =<< asks random -getFileId :: M XFTPFileId +getFileId :: M s XFTPFileId getFileId = fmap EntityId . randomId =<< asks (fileIdSize . config) -withFileLog :: (StoreLog 'WriteMode -> IO a) -> M () +withFileLog :: (StoreLog 'WriteMode -> IO a) -> M s () withFileLog action = liftIO . mapM_ action =<< asks storeLog -incFileStat :: (FileServerStats -> IORef Int) -> M () +incFileStat :: (FileServerStats -> IORef Int) -> M s () incFileStat statSel = do stats <- asks serverStats liftIO $ atomicModifyIORef'_ (statSel stats) (+ 1) -saveServerStats :: M () +saveServerStats :: M s () saveServerStats = asks (serverStatsBackupFile . config) >>= mapM_ (\f -> asks serverStats >>= liftIO . getFileServerStatsData >>= liftIO . saveStats f) @@ -683,7 +696,7 @@ saveServerStats = B.writeFile f $ strEncode stats logNote "server stats saved" -restoreServerStats :: M () +restoreServerStats :: FileStoreClass s => M s () restoreServerStats = asks (serverStatsBackupFile . config) >>= mapM_ restoreStats where restoreStats f = whenM (doesFileExist f) $ do @@ -691,9 +704,9 @@ restoreServerStats = asks (serverStatsBackupFile . config) >>= mapM_ restoreStat liftIO (strDecode <$> B.readFile f) >>= \case Right d@FileServerStatsData {_filesCount = statsFilesCount, _filesSize = statsFilesSize} -> do s <- asks serverStats - FileStore {files, usedStorage} <- asks store - _filesCount <- M.size <$> readTVarIO files - _filesSize <- readTVarIO usedStorage + st <- asks store + _filesCount <- liftIO $ getFileCount st + _filesSize <- readTVarIO =<< asks usedStorage liftIO $ setFileServerStats s d {_filesCount, _filesSize} renameFile f $ f <> ".bak" logNote "server stats restored" diff --git a/src/Simplex/FileTransfer/Server/Env.hs b/src/Simplex/FileTransfer/Server/Env.hs index d4c58df66c..5dd7ec56cb 100644 --- a/src/Simplex/FileTransfer/Server/Env.hs +++ b/src/Simplex/FileTransfer/Server/Env.hs @@ -1,7 +1,9 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} @@ -9,13 +11,17 @@ module Simplex.FileTransfer.Server.Env ( XFTPServerConfig (..), + XFTPStoreConfig (..), XFTPEnv (..), XFTPRequest (..), defaultInactiveClientExpiration, defFileExpirationHours, defaultFileExpiration, newXFTPServerEnv, - countUsedStorage, + runWithStoreConfig, + checkFileStoreMode, + importToDatabase, + exportFromDatabase, ) where import Control.Logger.Simple @@ -23,7 +29,6 @@ import Control.Monad import Crypto.Random import Data.Int (Int64) import Data.List.NonEmpty (NonEmpty) -import qualified Data.Map.Strict as M import Data.Time.Clock (getCurrentTime) import Data.Word (Word32) import Data.X509.Validation (Fingerprint (..)) @@ -31,7 +36,18 @@ import Network.Socket import qualified Network.TLS as T import Simplex.FileTransfer.Protocol (FileCmd, FileInfo (..), XFTPFileId) import Simplex.FileTransfer.Server.Stats +import Data.Ini (Ini) import Simplex.FileTransfer.Server.Store +import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation) +#if defined(dbServerPostgres) +import Data.Functor (($>)) +import Data.Maybe (isNothing) +import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore, importFileStore, exportFileStore) +import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg (..), defaultXFTPDBOpts) +import Simplex.Messaging.Server.CLI (iniDBOptions, settingIsOn) +import System.Directory (doesFileExist) +import System.Exit (exitFailure) +#endif import Simplex.FileTransfer.Server.StoreLog import Simplex.FileTransfer.Transport (VersionRangeXFTP) import qualified Simplex.Messaging.Crypto as C @@ -42,10 +58,11 @@ import Simplex.Messaging.Util (tshow) import System.IO (IOMode (..)) import UnliftIO.STM -data XFTPServerConfig = XFTPServerConfig +data XFTPServerConfig s = XFTPServerConfig { xftpPort :: ServiceName, controlPort :: Maybe ServiceName, fileIdSize :: Int, + serverStoreCfg :: XFTPStoreConfig s, storeLogFile :: Maybe FilePath, filesPath :: FilePath, -- | server storage quota @@ -88,9 +105,16 @@ defaultInactiveClientExpiration = checkInterval = 3600 -- seconds, 1 hours } -data XFTPEnv = XFTPEnv - { config :: XFTPServerConfig, - store :: FileStore, +data XFTPStoreConfig s where + XSCMemory :: Maybe FilePath -> XFTPStoreConfig STMFileStore +#if defined(dbServerPostgres) + XSCDatabase :: PostgresFileStoreCfg -> XFTPStoreConfig PostgresFileStore +#endif + +data XFTPEnv s = XFTPEnv + { config :: XFTPServerConfig s, + store :: s, + usedStorage :: TVar Int64, storeLog :: Maybe (StoreLog 'WriteMode), random :: TVar ChaChaDRG, serverIdentity :: C.KeyHash, @@ -109,13 +133,22 @@ defaultFileExpiration = checkInterval = 2 * 3600 -- seconds, 2 hours } -newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv -newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials, httpCredentials} = do +newXFTPServerEnv :: FileStoreClass s => XFTPServerConfig s -> IO (XFTPEnv s) +newXFTPServerEnv config@XFTPServerConfig {serverStoreCfg, fileSizeQuota, xftpCredentials, httpCredentials} = do random <- C.newRandom - store <- newFileStore - storeLog <- mapM (`readWriteFileStore` store) storeLogFile - used <- countUsedStorage <$> readTVarIO (files store) - atomically $ writeTVar (usedStorage store) used + (store, storeLog) <- case serverStoreCfg of + XSCMemory storeLogPath -> do + st <- newFileStore () + sl <- mapM (`readWriteFileStore` st) storeLogPath + atomically $ writeTVar (stmStoreLog st) sl + pure (st, sl) +#if defined(dbServerPostgres) + XSCDatabase dbCfg -> do + st <- newFileStore dbCfg + pure (st, Nothing) +#endif + used <- getUsedStorage store + usedStorage <- newTVarIO used forM_ fileSizeQuota $ \quota -> do logNote $ "Total / available storage: " <> tshow quota <> " / " <> tshow (quota - used) when (quota < used) $ logWarn "WARNING: storage quota is less than used storage, no files can be uploaded!" @@ -123,12 +156,68 @@ newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCrede httpServerCreds <- mapM loadServerCredential httpCredentials Fingerprint fp <- loadFingerprint xftpCredentials serverStats <- newFileServerStats =<< getCurrentTime - pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats} - -countUsedStorage :: M.Map k FileRec -> Int64 -countUsedStorage = M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 + pure XFTPEnv {config, store, usedStorage, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats} data XFTPRequest = XFTPReqNew FileInfo (NonEmpty RcvPublicAuthKey) (Maybe BasicAuth) | XFTPReqCmd XFTPFileId FileRec FileCmd | XFTPReqPing + +-- | Select and run the store config based on INI settings. +-- CPP guards for Postgres are handled here so Main.hs stays CPP-free. +runWithStoreConfig :: + Ini -> + String -> + Maybe FilePath -> + FilePath -> + MigrationConfirmation -> + (forall s. FileStoreClass s => XFTPStoreConfig s -> IO ()) -> + IO () +runWithStoreConfig _ini storeType storeLogFile_ _storeLogFilePath _confirmMigrations run = case storeType of + "memory" -> run $ XSCMemory storeLogFile_ +#if defined(dbServerPostgres) + "database" -> run $ XSCDatabase dbCfg + where + enableDbStoreLog' = settingIsOn "STORE_LOG" "db_store_log" _ini + dbStoreLogPath = enableDbStoreLog' $> _storeLogFilePath + dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions _ini defaultXFTPDBOpts, dbStoreLogPath, confirmMigrations = _confirmMigrations} +#else + "database" -> error "Error: server binary is compiled without support for PostgreSQL database.\nPlease re-compile with `cabal build -fserver_postgres`." +#endif + _ -> error $ "Invalid store_files value: " <> storeType + +-- | Validate startup config when store_files=database. +checkFileStoreMode :: Ini -> String -> FilePath -> IO () +#if defined(dbServerPostgres) +checkFileStoreMode ini storeType storeLogFilePath = case storeType of + "database" -> do + storeLogExists <- doesFileExist storeLogFilePath + let dbStoreLogOn = settingIsOn "STORE_LOG" "db_store_log" ini + when (storeLogExists && isNothing dbStoreLogOn) $ do + putStrLn $ "Error: store log file " <> storeLogFilePath <> " exists but store_files is `database`." + putStrLn "Use `file-server database import` to migrate, or set `db_store_log: on`." + exitFailure + _ -> pure () +#else +checkFileStoreMode _ _ _ = pure () +#endif + +-- | Import StoreLog to PostgreSQL database. +importToDatabase :: FilePath -> Ini -> MigrationConfirmation -> IO () +#if defined(dbServerPostgres) +importToDatabase storeLogFilePath ini _confirmMigrations = do + let dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions ini defaultXFTPDBOpts, dbStoreLogPath = Nothing, confirmMigrations = _confirmMigrations} + importFileStore storeLogFilePath dbCfg +#else +importToDatabase _ _ _ = error "Error: server binary is compiled without support for PostgreSQL database.\nPlease re-compile with `cabal build -fserver_postgres`." +#endif + +-- | Export PostgreSQL database to StoreLog. +exportFromDatabase :: FilePath -> Ini -> MigrationConfirmation -> IO () +#if defined(dbServerPostgres) +exportFromDatabase storeLogFilePath ini _confirmMigrations = do + let dbCfg = PostgresFileStoreCfg {dbOpts = iniDBOptions ini defaultXFTPDBOpts, dbStoreLogPath = Nothing, confirmMigrations = _confirmMigrations} + exportFileStore storeLogFilePath dbCfg +#else +exportFromDatabase _ _ _ = error "Error: server binary is compiled without support for PostgreSQL database.\nPlease re-compile with `cabal build -fserver_postgres`." +#endif diff --git a/src/Simplex/FileTransfer/Server/Main.hs b/src/Simplex/FileTransfer/Server/Main.hs index 101fe945bb..9b31dcce55 100644 --- a/src/Simplex/FileTransfer/Server/Main.hs +++ b/src/Simplex/FileTransfer/Server/Main.hs @@ -5,6 +5,7 @@ {-# LANGUAGE OverloadedLists #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeApplications #-} module Simplex.FileTransfer.Server.Main @@ -12,7 +13,7 @@ module Simplex.FileTransfer.Server.Main xftpServerCLI_, ) where -import Control.Monad (when) +import Control.Monad (unless, when) import Data.Either (fromRight) import Data.Functor (($>)) import Data.Ini (lookupValue, readIniFile) @@ -28,11 +29,12 @@ import Options.Applicative import Simplex.FileTransfer.Chunks import Simplex.FileTransfer.Description (FileSize (..)) import Simplex.FileTransfer.Server (runXFTPServer) -import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration) +import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), XFTPStoreConfig, defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration, runWithStoreConfig, checkFileStoreMode, importToDatabase, exportFromDatabase) import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import qualified Simplex.Messaging.Crypto as C import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), pattern XFTPServer) +import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation (..)) import Simplex.Messaging.Server.CLI import Simplex.Messaging.Server.Expiration import Simplex.Messaging.Server.Information (ServerPublicInfo (..)) @@ -51,7 +53,7 @@ xftpServerCLI :: FilePath -> FilePath -> IO () xftpServerCLI = xftpServerCLI_ (\_ _ _ _ -> pure ()) (\_ -> pure ()) xftpServerCLI_ :: - (XFTPServerConfig -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()) -> + (forall s. XFTPServerConfig s -> Maybe ServerPublicInfo -> Maybe TransportHost -> FilePath -> IO ()) -> (EmbeddedWebParams -> IO ()) -> FilePath -> FilePath -> @@ -66,9 +68,13 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do doesFileExist iniFile >>= \case True -> genOnline cfgPath certOpts _ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`." - Start -> + Start opts -> doesFileExist iniFile >>= \case - True -> readIniFile iniFile >>= either exitError runServer + True -> readIniFile iniFile >>= either exitError (runServer opts) + _ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`." + Database cmd -> + doesFileExist iniFile >>= \case + True -> readIniFile iniFile >>= either exitError (runDatabaseCmd cmd) _ -> exitError $ "Error: server is not initialized (" <> iniFile <> " does not exist).\nRun `" <> executableName <> " init`." Delete -> do confirmOrExit @@ -84,6 +90,21 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do executableName = "file-server" storeLogFilePath = combine logPath "file-server-store.log" defaultStaticPath = combine logPath "www" + runDatabaseCmd cmd ini = case cmd of + SCImport -> do + storeLogExists <- doesFileExist storeLogFilePath + unless storeLogExists $ exitError $ "Error: store log file " <> storeLogFilePath <> " does not exist." + confirmOrExit + ("Import store log " <> storeLogFilePath <> " to PostgreSQL database?") + "Import cancelled." + importToDatabase storeLogFilePath ini MCYesUp + SCExport -> do + storeLogExists <- doesFileExist storeLogFilePath + when storeLogExists $ exitError $ "Error: store log file " <> storeLogFilePath <> " already exists." + confirmOrExit + ("Export PostgreSQL database to store log " <> storeLogFilePath <> "?") + "Export cancelled." + exportFromDatabase storeLogFilePath ini MCConsole initializeServer InitOptions {enableStoreLog, signAlgorithm, ip, fqdn, filesPath, fileSizeQuota, webStaticPath = webStaticPath_} = do clearDirIfExists cfgPath clearDirIfExists logPath @@ -126,6 +147,14 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do \# and restoring it when the server is started.\n\ \# Log is compacted on start (deleted objects are removed).\n" <> ("enable: " <> onOff enableStoreLog <> "\n\n") + <> "# File storage mode: `memory` or `database` (PostgreSQL).\n\ + \store_files: memory\n\n\ + \# Database connection settings for PostgreSQL database (`store_files: database`).\n\ + \# db_connection: postgresql://xftp@/xftp_server_store\n\ + \# db_schema: xftp_server\n\ + \# db_pool_size: 10\n\n\ + \# Write database changes to store log file\n\ + \# db_store_log: off\n\n" <> "# Expire files after the specified number of hours.\n" <> ("expire_files_hours: " <> tshow defFileExpirationHours <> "\n\n") <> "log_stats: off\n\ @@ -173,7 +202,7 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do \# TLS credentials for HTTPS web server on the same port as XFTP.\n\ \# cert: " <> T.pack (cfgPath `combine` "web.crt") <> "\n\ \# key: " <> T.pack (cfgPath `combine` "web.key") <> "\n" - runServer ini = do + runServer StartOptions {confirmMigrations} ini = do hSetBuffering stdout LineBuffering hSetBuffering stderr LineBuffering fp <- checkSavedFingerprint cfgPath defaultX509Config @@ -183,18 +212,22 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do printServiceInfo serverVersion srv let information = serverPublicInfo ini printSourceCode (sourceCode <$> information) - printXFTPConfig serverConfig - case webStaticPath' of - Just path -> do - let onionHost = - either (const Nothing) (find isOnion) $ - strDecode @(L.NonEmpty TransportHost) . encodeUtf8 =<< lookupValue "TRANSPORT" "host" ini - webHttpPort = eitherToMaybe (lookupValue "WEB" "http" ini) >>= readMaybe . T.unpack - generateSite serverConfig information onionHost path - when (isJust webHttpPort || isJust webHttpsParams') $ - serveStaticFiles EmbeddedWebParams {webStaticPath = path, webHttpPort, webHttpsParams = webHttpsParams'} - Nothing -> pure () - runXFTPServer serverConfig + let storeType = fromRight "memory" $ T.unpack <$> lookupValue "STORE_LOG" "store_files" ini + checkFileStoreMode ini storeType storeLogFilePath + runWithStoreConfig ini storeType (enableStoreLog $> storeLogFilePath) storeLogFilePath confirmMigrations $ \storeCfg -> do + let cfg = serverConfig storeCfg + printXFTPConfig cfg + case webStaticPath' of + Just path -> do + let onionHost = + either (const Nothing) (find isOnion) $ + strDecode @(L.NonEmpty TransportHost) . encodeUtf8 =<< lookupValue "TRANSPORT" "host" ini + webHttpPort = eitherToMaybe (lookupValue "WEB" "http" ini) >>= readMaybe . T.unpack + generateSite cfg information onionHost path + when (isJust webHttpPort || isJust webHttpsParams') $ + serveStaticFiles EmbeddedWebParams {webStaticPath = path, webHttpPort, webHttpsParams = webHttpsParams'} + Nothing -> pure () + runXFTPServer cfg where isOnion = \case THOnionHost _ -> True; _ -> False enableStoreLog = settingIsOn "STORE_LOG" "enable" ini @@ -236,11 +269,13 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do webStaticPath' = eitherToMaybe $ T.unpack <$> lookupValue "WEB" "static_path" ini - serverConfig = + serverConfig :: XFTPStoreConfig s -> XFTPServerConfig s + serverConfig serverStoreCfg = XFTPServerConfig { xftpPort = T.unpack $ strictIni "TRANSPORT" "port" ini, controlPort = either (const Nothing) (Just . T.unpack) $ lookupValue "TRANSPORT" "control_port" ini, fileIdSize = 16, + serverStoreCfg, storeLogFile = enableStoreLog $> storeLogFilePath, filesPath = T.unpack $ strictIni "FILES" "path" ini, fileSizeQuota = either error unFileSize <$> strDecodeIni "FILES" "storage_quota" ini, @@ -289,9 +324,16 @@ xftpServerCLI_ generateSite serveStaticFiles cfgPath logPath = do data CliCommand = Init InitOptions | OnlineCert CertOptions - | Start + | Start StartOptions + | Database StoreCmd | Delete +data StoreCmd = SCImport | SCExport + +newtype StartOptions = StartOptions + { confirmMigrations :: MigrationConfirmation + } + data InitOptions = InitOptions { enableStoreLog :: Bool, signAlgorithm :: SignAlgorithm, @@ -308,7 +350,8 @@ cliCommandP cfgPath logPath iniFile = hsubparser ( command "init" (info (Init <$> initP) (progDesc $ "Initialize server - creates " <> cfgPath <> " and " <> logPath <> " directories and configuration files")) <> command "cert" (info (OnlineCert <$> certOptionsP) (progDesc $ "Generate new online TLS server credentials (configuration: " <> iniFile <> ")")) - <> command "start" (info (pure Start) (progDesc $ "Start server (configuration: " <> iniFile <> ")")) + <> command "start" (info (Start <$> startOptsP) (progDesc $ "Start server (configuration: " <> iniFile <> ")")) + <> command "database" (info (Database <$> storeCmdP) (progDesc "Import/export file store to/from PostgreSQL database")) <> command "delete" (info (pure Delete) (progDesc "Delete configuration and log files")) ) where @@ -375,3 +418,26 @@ cliCommandP cfgPath logPath iniFile = <> metavar "PATH" ) pure InitOptions {enableStoreLog, signAlgorithm, ip, fqdn, filesPath, fileSizeQuota, webStaticPath} + startOptsP :: Parser StartOptions + startOptsP = do + confirmMigrations <- + option + parseConfirmMigrations + ( long "confirm-migrations" + <> metavar "CONFIRM_MIGRATIONS" + <> help "Confirm PostgreSQL database migration: up, down (default is manual confirmation)" + <> value MCConsole + ) + pure StartOptions {confirmMigrations} + where + parseConfirmMigrations :: ReadM MigrationConfirmation + parseConfirmMigrations = eitherReader $ \case + "up" -> Right MCYesUp + "down" -> Right MCYesUpDown + _ -> Left "invalid migration confirmation, pass 'up' or 'down'" + storeCmdP :: Parser StoreCmd + storeCmdP = + hsubparser + ( command "import" (info (pure SCImport) (progDesc "Import store log file into PostgreSQL database")) + <> command "export" (info (pure SCExport) (progDesc "Export PostgreSQL database to store log file")) + ) diff --git a/src/Simplex/FileTransfer/Server/Store.hs b/src/Simplex/FileTransfer/Server/Store.hs index eec481a21d..4641e24f97 100644 --- a/src/Simplex/FileTransfer/Server/Store.hs +++ b/src/Simplex/FileTransfer/Server/Store.hs @@ -4,48 +4,39 @@ {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeFamilies #-} module Simplex.FileTransfer.Server.Store - ( FileStore (..), + ( FileStoreClass (..), FileRec (..), FileRecipient (..), + STMFileStore (..), RoundedFileTime, - newFileStore, - addFile, - setFilePath, - addRecipient, - deleteFile, - blockFile, - deleteRecipient, - expiredFilePath, - getFile, - ackFile, fileTimePrecision, ) where import Control.Concurrent.STM -import Control.Monad +import Control.Monad (forM) import qualified Data.Attoparsec.ByteString.Char8 as A import Data.Int (Int64) +import qualified Data.Map.Strict as M +import Data.Maybe (catMaybes) import Data.Set (Set) import qualified Data.Set as S +import Data.Word (Word32) import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..), XFTPFileId) import Simplex.FileTransfer.Transport (XFTPErrorType (..)) import qualified Simplex.Messaging.Crypto as C import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol (BlockingInfo, RcvPublicAuthKey, RecipientId, SenderId) import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..)) +import Simplex.Messaging.Server.StoreLog (StoreLog, closeStoreLog) +import System.IO (IOMode (..)) import Simplex.Messaging.SystemTime import Simplex.Messaging.TMap (TMap) import qualified Simplex.Messaging.TMap as TM -import Simplex.Messaging.Util (ifM, ($>>=)) - -data FileStore = FileStore - { files :: TMap SenderId FileRec, - recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey), - usedStorage :: TVar Int64 - } +import Simplex.Messaging.Util (ifM) data FileRec = FileRec { senderId :: SenderId, @@ -59,105 +50,134 @@ data FileRec = FileRec type RoundedFileTime = RoundedSystemTime 3600 fileTimePrecision :: Int64 -fileTimePrecision = 3600 -- truncate creation time to 1 hour +fileTimePrecision = 3600 -data FileRecipient = FileRecipient RecipientId RcvPublicAuthKey +data FileRecipient = FileRecipient RecipientId C.APublicAuthKey deriving (Show) instance StrEncoding FileRecipient where strEncode (FileRecipient rId rKey) = strEncode rId <> ":" <> strEncode rKey strP = FileRecipient <$> strP <* A.char ':' <*> strP -newFileStore :: IO FileStore -newFileStore = do - files <- TM.emptyIO - recipients <- TM.emptyIO - usedStorage <- newTVarIO 0 - pure FileStore {files, recipients, usedStorage} +class FileStoreClass s where + type FileStoreConfig s + newFileStore :: FileStoreConfig s -> IO s + closeFileStore :: s -> IO () + addFile :: s -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> IO (Either XFTPErrorType ()) + setFilePath :: s -> SenderId -> FilePath -> IO (Either XFTPErrorType ()) + addRecipient :: s -> SenderId -> FileRecipient -> IO (Either XFTPErrorType ()) + deleteFile :: s -> SenderId -> IO (Either XFTPErrorType ()) + blockFile :: s -> SenderId -> BlockingInfo -> Bool -> IO (Either XFTPErrorType ()) + deleteRecipient :: s -> RecipientId -> FileRec -> IO () + getFile :: s -> SFileParty p -> XFTPFileId -> IO (Either XFTPErrorType (FileRec, C.APublicAuthKey)) + ackFile :: s -> RecipientId -> IO (Either XFTPErrorType ()) + expiredFiles :: s -> Int64 -> Int -> IO [(SenderId, Maybe FilePath, Word32)] + getUsedStorage :: s -> IO Int64 + getFileCount :: s -> IO Int + +-- STM in-memory store + +data STMFileStore = STMFileStore + { files :: TMap SenderId FileRec, + recipients :: TMap RecipientId (SenderId, RcvPublicAuthKey), + stmStoreLog :: TVar (Maybe (StoreLog 'WriteMode)) + } -addFile :: FileStore -> SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> STM (Either XFTPErrorType ()) -addFile FileStore {files} sId fileInfo createdAt status = - ifM (TM.member sId files) (pure $ Left DUPLICATE_) $ do - f <- newFileRec sId fileInfo createdAt status - TM.insert sId f files - pure $ Right () +instance FileStoreClass STMFileStore where + type FileStoreConfig STMFileStore = () -newFileRec :: SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> STM FileRec -newFileRec senderId fileInfo createdAt status = do - recipientIds <- newTVar S.empty - filePath <- newTVar Nothing - fileStatus <- newTVar status - pure FileRec {senderId, fileInfo, filePath, recipientIds, createdAt, fileStatus} + newFileStore () = do + files <- TM.emptyIO + recipients <- TM.emptyIO + stmStoreLog <- newTVarIO Nothing + pure STMFileStore {files, recipients, stmStoreLog} + + closeFileStore STMFileStore {stmStoreLog} = readTVarIO stmStoreLog >>= mapM_ closeStoreLog -setFilePath :: FileStore -> SenderId -> FilePath -> STM (Either XFTPErrorType ()) -setFilePath st sId fPath = - withFile st sId $ \FileRec {fileInfo, filePath} -> do - writeTVar filePath (Just fPath) - modifyTVar' (usedStorage st) (+ fromIntegral (size fileInfo)) - pure $ Right () - -addRecipient :: FileStore -> SenderId -> FileRecipient -> STM (Either XFTPErrorType ()) -addRecipient st@FileStore {recipients} senderId (FileRecipient rId rKey) = - withFile st senderId $ \FileRec {recipientIds} -> do - rIds <- readTVar recipientIds - mem <- TM.member rId recipients - if rId `S.member` rIds || mem - then pure $ Left DUPLICATE_ - else do - writeTVar recipientIds $! S.insert rId rIds - TM.insert rId (senderId, rKey) recipients + addFile STMFileStore {files} sId fileInfo createdAt status = atomically $ + ifM (TM.member sId files) (pure $ Left DUPLICATE_) $ do + f <- newFileRec sId fileInfo createdAt status + TM.insert sId f files + pure $ Right () + + setFilePath st sId fPath = atomically $ + withFile st sId $ \FileRec {filePath, fileStatus} -> do + readTVar filePath >>= \case + Just _ -> pure $ Left AUTH + Nothing -> + readTVar fileStatus >>= \case + EntityActive -> do + writeTVar filePath (Just fPath) + pure $ Right () + _ -> pure $ Left AUTH + + addRecipient st@STMFileStore {recipients} senderId (FileRecipient rId rKey) = atomically $ + withFile st senderId $ \FileRec {recipientIds} -> do + rIds <- readTVar recipientIds + mem <- TM.member rId recipients + if rId `S.member` rIds || mem + then pure $ Left DUPLICATE_ + else do + writeTVar recipientIds $! S.insert rId rIds + TM.insert rId (senderId, rKey) recipients + pure $ Right () + + deleteFile STMFileStore {files, recipients} senderId = atomically $ do + TM.lookupDelete senderId files >>= \case + Just FileRec {recipientIds} -> do + readTVar recipientIds >>= mapM_ (`TM.delete` recipients) pure $ Right () + _ -> pure $ Left AUTH --- this function must be called after the file is deleted from the file system -deleteFile :: FileStore -> SenderId -> STM (Either XFTPErrorType ()) -deleteFile FileStore {files, recipients, usedStorage} senderId = do - TM.lookupDelete senderId files >>= \case - Just FileRec {fileInfo, recipientIds} -> do - readTVar recipientIds >>= mapM_ (`TM.delete` recipients) - modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo) + blockFile st senderId info _deleted = atomically $ + withFile st senderId $ \FileRec {fileStatus} -> do + writeTVar fileStatus $! EntityBlocked info pure $ Right () - _ -> pure $ Left AUTH --- this function must be called after the file is deleted from the file system -blockFile :: FileStore -> SenderId -> BlockingInfo -> Bool -> STM (Either XFTPErrorType ()) -blockFile st@FileStore {usedStorage} senderId info deleted = - withFile st senderId $ \FileRec {fileInfo, fileStatus} -> do - when deleted $ modifyTVar' usedStorage $ subtract (fromIntegral $ size fileInfo) - writeTVar fileStatus $! EntityBlocked info - pure $ Right () - -deleteRecipient :: FileStore -> RecipientId -> FileRec -> STM () -deleteRecipient FileStore {recipients} rId FileRec {recipientIds} = do - TM.delete rId recipients - modifyTVar' recipientIds $ S.delete rId - -getFile :: FileStore -> SFileParty p -> XFTPFileId -> STM (Either XFTPErrorType (FileRec, C.APublicAuthKey)) -getFile st party fId = case party of - SFSender -> withFile st fId $ pure . Right . (\f -> (f, sndKey $ fileInfo f)) - SFRecipient -> - TM.lookup fId (recipients st) >>= \case - Just (sId, rKey) -> withFile st sId $ pure . Right . (,rKey) + deleteRecipient STMFileStore {recipients} rId FileRec {recipientIds} = atomically $ do + TM.delete rId recipients + modifyTVar' recipientIds $ S.delete rId + + getFile st party fId = atomically $ case party of + SFSender -> withFile st fId $ pure . Right . (\f -> (f, sndKey $ fileInfo f)) + SFRecipient -> + TM.lookup fId (recipients st) >>= \case + Just (sId, rKey) -> withFile st sId $ pure . Right . (,rKey) + _ -> pure $ Left AUTH + + ackFile st@STMFileStore {recipients} recipientId = atomically $ do + TM.lookupDelete recipientId recipients >>= \case + Just (sId, _) -> + withFile st sId $ \FileRec {recipientIds} -> do + modifyTVar' recipientIds $ S.delete recipientId + pure $ Right () _ -> pure $ Left AUTH -expiredFilePath :: FileStore -> XFTPFileId -> Int64 -> STM (Maybe (Maybe FilePath)) -expiredFilePath FileStore {files} sId old = - TM.lookup sId files - $>>= \FileRec {filePath, createdAt = RoundedSystemTime createdAt} -> + expiredFiles STMFileStore {files} old _limit = do + fs <- readTVarIO files + fmap catMaybes . forM (M.toList fs) $ \(sId, FileRec {fileInfo = FileInfo {size}, filePath, createdAt = RoundedSystemTime createdAt}) -> if createdAt + fileTimePrecision < old - then Just <$> readTVar filePath + then do + path <- readTVarIO filePath + pure $ Just (sId, path, size) else pure Nothing -ackFile :: FileStore -> RecipientId -> STM (Either XFTPErrorType ()) -ackFile st@FileStore {recipients} recipientId = do - TM.lookupDelete recipientId recipients >>= \case - Just (sId, _) -> - withFile st sId $ \FileRec {recipientIds} -> do - modifyTVar' recipientIds $ S.delete recipientId - pure $ Right () - _ -> pure $ Left AUTH + getUsedStorage STMFileStore {files} = + M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 <$> readTVarIO files + + getFileCount STMFileStore {files} = M.size <$> readTVarIO files + +-- Internal STM helpers + +newFileRec :: SenderId -> FileInfo -> RoundedFileTime -> ServerEntityStatus -> STM FileRec +newFileRec senderId fileInfo createdAt status = do + recipientIds <- newTVar S.empty + filePath <- newTVar Nothing + fileStatus <- newTVar status + pure FileRec {senderId, fileInfo, filePath, recipientIds, createdAt, fileStatus} -withFile :: FileStore -> SenderId -> (FileRec -> STM (Either XFTPErrorType a)) -> STM (Either XFTPErrorType a) -withFile FileStore {files} sId a = +withFile :: STMFileStore -> SenderId -> (FileRec -> STM (Either XFTPErrorType a)) -> STM (Either XFTPErrorType a) +withFile STMFileStore {files} sId a = TM.lookup sId files >>= \case Just f -> a f _ -> pure $ Left AUTH diff --git a/src/Simplex/FileTransfer/Server/Store/Postgres.hs b/src/Simplex/FileTransfer/Server/Store/Postgres.hs new file mode 100644 index 0000000000..bb24ba1116 --- /dev/null +++ b/src/Simplex/FileTransfer/Server/Store/Postgres.hs @@ -0,0 +1,357 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} + +module Simplex.FileTransfer.Server.Store.Postgres + ( PostgresFileStore (..), + importFileStore, + exportFileStore, + ) +where + +import qualified Control.Exception as E +import Control.Logger.Simple +import Control.Monad +import Control.Monad.Except +import Control.Monad.IO.Class +import Control.Monad.Trans.Except (throwE) +import Data.ByteString (ByteString) +import qualified Data.ByteString.Char8 as B +import Data.ByteString.Builder (Builder) +import qualified Data.ByteString.Builder as BB +import qualified Data.ByteString.Lazy as LB +import Data.Functor (($>)) +import Data.Int (Int32, Int64) +import Data.List (intersperse) +import qualified Data.List.NonEmpty as L +import qualified Data.Map.Strict as M +import qualified Data.Set as S +import Data.Text (Text) +import Data.Word (Word32) +import Database.PostgreSQL.Simple (Binary (..), Only (..), SqlError, (:.) (..)) +import qualified Database.PostgreSQL.Simple as DB +import qualified Database.PostgreSQL.Simple.Copy as DB +import Database.PostgreSQL.Simple.Errors (ConstraintViolation (..), constraintViolation) +import Database.PostgreSQL.Simple.ToField (Action (..), ToField (..)) +import GHC.IO (catchAny) +import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..)) +import Simplex.FileTransfer.Server.Store +import Simplex.FileTransfer.Server.Store.Postgres.Config +import Simplex.FileTransfer.Server.Store.Postgres.Migrations (xftpServerMigrations) +import Simplex.FileTransfer.Server.StoreLog +import Simplex.FileTransfer.Transport (XFTPErrorType (..)) +import Simplex.Messaging.Agent.Store.Postgres (closeDBStore, createDBStore) +import Simplex.Messaging.Agent.Store.Postgres.Common (DBStore, withTransaction) +import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..)) +import Simplex.Messaging.Agent.Store.Shared (MigrationConfig (..), MigrationConfirmation (..)) +import qualified Simplex.Messaging.Crypto as C +import Simplex.Messaging.Protocol (RcvPublicAuthKey, RecipientId, SenderId) +import Simplex.Messaging.Transport (EntityId (..)) +import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..)) +import Simplex.Messaging.Server.QueueStore.Postgres () +import Simplex.Messaging.Server.StoreLog (openWriteStoreLog) +import Simplex.Messaging.Util (firstRow, tshow) +import System.Directory (renameFile) +import System.Exit (exitFailure) +import System.IO (IOMode (..), hFlush, stdout) +import UnliftIO.STM + +data PostgresFileStore = PostgresFileStore + { dbStore :: DBStore, + dbStoreLog :: Maybe (StoreLog 'WriteMode) + } + +instance FileStoreClass PostgresFileStore where + type FileStoreConfig PostgresFileStore = PostgresFileStoreCfg + + newFileStore PostgresFileStoreCfg {dbOpts, dbStoreLogPath, confirmMigrations} = do + dbStore <- either err pure =<< createDBStore dbOpts xftpServerMigrations (MigrationConfig confirmMigrations Nothing) + dbStoreLog <- mapM (openWriteStoreLog True) dbStoreLogPath + pure PostgresFileStore {dbStore, dbStoreLog} + where + err e = do + logError $ "STORE: newFileStore, error opening PostgreSQL database, " <> tshow e + exitFailure + + closeFileStore PostgresFileStore {dbStore, dbStoreLog} = do + closeDBStore dbStore + mapM_ closeStoreLog dbStoreLog + + addFile st sId fileInfo@FileInfo {sndKey, size, digest} createdAt status = + E.uninterruptibleMask_ $ runExceptT $ do + void $ withDB "addFile" st $ \db -> + E.try + ( DB.execute + db + "INSERT INTO files (sender_id, file_size, file_digest, sender_key, created_at, status) VALUES (?,?,?,?,?,?)" + (sId, (fromIntegral size :: Int32), Binary digest, Binary (C.encodePubKey sndKey), createdAt, status) + ) + >>= either handleDuplicate (pure . Right) + withLog "addFile" st $ \s -> logAddFile s sId fileInfo createdAt status + + setFilePath st sId fPath = E.uninterruptibleMask_ $ runExceptT $ do + assertUpdated $ withDB' "setFilePath" st $ \db -> + DB.execute db "UPDATE files SET file_path = ? WHERE sender_id = ? AND file_path IS NULL AND status = 'active'" (fPath, sId) + withLog "setFilePath" st $ \s -> logPutFile s sId fPath + + addRecipient st senderId (FileRecipient rId rKey) = E.uninterruptibleMask_ $ runExceptT $ do + void $ withDB "addRecipient" st $ \db -> + E.try + ( DB.execute + db + "INSERT INTO recipients (recipient_id, sender_id, recipient_key) VALUES (?,?,?)" + (rId, senderId, Binary (C.encodePubKey rKey)) + ) + >>= either handleDuplicate (pure . Right) + withLog "addRecipient" st $ \s -> logAddRecipients s senderId (pure $ FileRecipient rId rKey) + + deleteFile st sId = E.uninterruptibleMask_ $ runExceptT $ do + assertUpdated $ withDB' "deleteFile" st $ \db -> + DB.execute db "DELETE FROM files WHERE sender_id = ?" (Only sId) + withLog "deleteFile" st $ \s -> logDeleteFile s sId + + blockFile st sId info _deleted = E.uninterruptibleMask_ $ runExceptT $ do + assertUpdated $ withDB' "blockFile" st $ \db -> + DB.execute db "UPDATE files SET status = ? WHERE sender_id = ?" (EntityBlocked info, sId) + withLog "blockFile" st $ \s -> logBlockFile s sId info + + deleteRecipient st rId _fr = + void $ runExceptT $ withDB' "deleteRecipient" st $ \db -> + DB.execute db "DELETE FROM recipients WHERE recipient_id = ?" (Only rId) + + getFile st party fId = runExceptT $ case party of + SFSender -> do + row <- loadFileRow "SELECT sender_id, file_size, file_digest, sender_key, file_path, created_at, status FROM files WHERE sender_id = ?" + fr <- ExceptT $ rowToFileRec row + pure (fr, sndKey (fileInfo fr)) + SFRecipient -> do + row :. Only rcpKeyBs <- + loadFileRow + "SELECT f.sender_id, f.file_size, f.file_digest, f.sender_key, f.file_path, f.created_at, f.status, r.recipient_key FROM files f JOIN recipients r ON r.sender_id = f.sender_id WHERE r.recipient_id = ?" + fr <- ExceptT $ rowToFileRec row + rcpKey <- either (const $ throwE INTERNAL) pure $ C.decodePubKey rcpKeyBs + pure (fr, rcpKey) + where + loadFileRow :: DB.FromRow r => DB.Query -> ExceptT XFTPErrorType IO r + loadFileRow q = + withDB "getFile" st $ \db -> + firstRow id AUTH $ DB.query db q (Only fId) + + ackFile st rId = E.uninterruptibleMask_ $ runExceptT $ do + assertUpdated $ withDB' "ackFile" st $ \db -> + DB.execute db "DELETE FROM recipients WHERE recipient_id = ?" (Only rId) + withLog "ackFile" st $ \s -> logAckFile s rId + + expiredFiles st old limit = + fmap toResult $ withTransaction (dbStore st) $ \db -> + DB.query + db + "SELECT sender_id, file_path, file_size FROM files WHERE created_at + ? < ? ORDER BY created_at LIMIT ?" + (fileTimePrecision, old, limit) + where + toResult :: [(SenderId, Maybe FilePath, Int32)] -> [(SenderId, Maybe FilePath, Word32)] + toResult = map (\(sId, path, size) -> (sId, path, fromIntegral size)) + + getUsedStorage st = + withTransaction (dbStore st) $ \db -> do + [Only total] <- DB.query_ db "SELECT COALESCE(SUM(file_size::INT8), 0)::INT8 FROM files" + pure total + + getFileCount st = + withTransaction (dbStore st) $ \db -> do + [Only count] <- DB.query_ db "SELECT COUNT(*) FROM files" + pure (fromIntegral (count :: Int64)) + +-- Internal helpers + +mkFileRec :: SenderId -> FileInfo -> Maybe FilePath -> RoundedFileTime -> ServerEntityStatus -> IO FileRec +mkFileRec senderId fileInfo path createdAt status = do + filePath <- newTVarIO path + recipientIds <- newTVarIO S.empty + fileStatus <- newTVarIO status + pure FileRec {senderId, fileInfo, filePath, recipientIds, createdAt, fileStatus} + +type FileRecRow = (SenderId, Int32, ByteString, ByteString, Maybe FilePath, RoundedFileTime, ServerEntityStatus) + +rowToFileRec :: FileRecRow -> IO (Either XFTPErrorType FileRec) +rowToFileRec (sId, size, digest, sndKeyBs, path, createdAt, status) = + case C.decodePubKey sndKeyBs of + Right sndKey -> do + let fileInfo = FileInfo {sndKey, size = fromIntegral size, digest} + Right <$> mkFileRec sId fileInfo path createdAt status + Left _ -> pure $ Left INTERNAL + +-- DB helpers + +withDB :: forall a. Text -> PostgresFileStore -> (DB.Connection -> IO (Either XFTPErrorType a)) -> ExceptT XFTPErrorType IO a +withDB op st action = + ExceptT $ E.try (withTransaction (dbStore st) action) >>= either logErr pure + where + logErr :: E.SomeException -> IO (Either XFTPErrorType a) + logErr e = logError ("STORE: " <> err) $> Left INTERNAL + where + err = op <> ", withDB, " <> tshow e + +withDB' :: Text -> PostgresFileStore -> (DB.Connection -> IO a) -> ExceptT XFTPErrorType IO a +withDB' op st action = withDB op st $ fmap Right . action + +assertUpdated :: ExceptT XFTPErrorType IO Int64 -> ExceptT XFTPErrorType IO () +assertUpdated = (>>= \n -> when (n == 0) (throwE AUTH)) + +handleDuplicate :: SqlError -> IO (Either XFTPErrorType a) +handleDuplicate e = case constraintViolation e of + Just (UniqueViolation _) -> pure $ Left DUPLICATE_ + Just (ForeignKeyViolation _ _) -> pure $ Left AUTH + _ -> E.throwIO e + +withLog :: MonadIO m => Text -> PostgresFileStore -> (StoreLog 'WriteMode -> IO ()) -> m () +withLog op PostgresFileStore {dbStoreLog} action = + forM_ dbStoreLog $ \sl -> liftIO $ action sl `catchAny` \e -> + logWarn $ "STORE: " <> op <> ", withLog, " <> tshow e + +-- Import: StoreLog -> PostgreSQL + +importFileStore :: FilePath -> PostgresFileStoreCfg -> IO () +importFileStore storeLogFilePath dbCfg = do + putStrLn $ "Reading store log: " <> storeLogFilePath + stmStore <- newFileStore () :: IO STMFileStore + sl <- readWriteFileStore storeLogFilePath stmStore + closeStoreLog sl + allFiles <- readTVarIO (files stmStore) + allRcps <- readTVarIO (recipients stmStore) + let fileCount = M.size allFiles + rcpCount = M.size allRcps + putStrLn $ "Loaded " <> show fileCount <> " files, " <> show rcpCount <> " recipients." + let dbCfg' = dbCfg {dbOpts = (dbOpts dbCfg) {createSchema = True}, confirmMigrations = MCYesUp} + pgStore <- newFileStore dbCfg' :: IO PostgresFileStore + existingCount <- getFileCount pgStore + when (existingCount > 0) $ do + putStrLn $ "WARNING: database already contains " <> show existingCount <> " files. Import will fail on duplicate keys." + putStrLn "Drop the existing schema first or use a fresh database." + exitFailure + putStrLn "Importing files..." + fCnt <- withTransaction (dbStore pgStore) $ \db -> do + DB.copy_ + db + "COPY files (sender_id, file_size, file_digest, sender_key, file_path, created_at, status) FROM STDIN WITH (FORMAT csv)" + iforM_ (M.toList allFiles) $ \i (sId, fr) -> do + DB.putCopyData db =<< fileRecToCSV sId fr + when (i > 0 && i `mod` 10000 == 0) $ putStr (" " <> show i <> " files\r") >> hFlush stdout + DB.putCopyEnd db + [Only cnt] <- DB.query_ db "SELECT COUNT(*) FROM files" + pure (cnt :: Int64) + putStrLn $ "Imported " <> show fCnt <> " files." + putStrLn "Importing recipients..." + rCnt <- withTransaction (dbStore pgStore) $ \db -> do + DB.copy_ + db + "COPY recipients (recipient_id, sender_id, recipient_key) FROM STDIN WITH (FORMAT csv)" + iforM_ (M.toList allRcps) $ \i (rId, (sId, rKey)) -> do + DB.putCopyData db $ recipientToCSV rId sId rKey + when (i > 0 && i `mod` 10000 == 0) $ putStr (" " <> show i <> " recipients\r") >> hFlush stdout + DB.putCopyEnd db + [Only cnt] <- DB.query_ db "SELECT COUNT(*) FROM recipients" + pure (cnt :: Int64) + putStrLn $ "Imported " <> show rCnt <> " recipients." + when (fromIntegral fileCount /= fCnt) $ + putStrLn $ "WARNING: expected " <> show fileCount <> " files, got " <> show fCnt + when (fromIntegral rcpCount /= rCnt) $ + putStrLn $ "WARNING: expected " <> show rcpCount <> " recipients, got " <> show rCnt + closeFileStore pgStore + renameFile storeLogFilePath (storeLogFilePath <> ".bak") + putStrLn $ "Store log renamed to " <> storeLogFilePath <> ".bak" + +-- Export: PostgreSQL -> StoreLog + +exportFileStore :: FilePath -> PostgresFileStoreCfg -> IO () +exportFileStore storeLogFilePath dbCfg = do + pgStore <- newFileStore dbCfg :: IO PostgresFileStore + sl <- openWriteStoreLog False storeLogFilePath + putStrLn "Exporting files..." + -- Load all recipients into a map for lookup + rcpMap <- withTransaction (dbStore pgStore) $ \db -> + DB.fold_ + db + "SELECT recipient_id, sender_id, recipient_key FROM recipients ORDER BY sender_id" + M.empty + (\acc (rId, sId, rKeyBs :: ByteString) -> + case C.decodePubKey rKeyBs of + Right rKey -> pure $! M.insertWith (++) sId [FileRecipient rId rKey] acc + Left _ -> putStrLn ("WARNING: invalid recipient key for " <> show rId) $> acc) + -- Fold over files, writing StoreLog records + (!fCnt, !rCnt) <- withTransaction (dbStore pgStore) $ \db -> + DB.fold_ + db + "SELECT sender_id, file_size, file_digest, sender_key, file_path, created_at, status FROM files ORDER BY created_at" + (0 :: Int, 0 :: Int) + ( \(!fc, !rc) (sId, size :: Int32, digest :: ByteString, sndKeyBs :: ByteString, path :: Maybe String, createdAt, status) -> + case C.decodePubKey sndKeyBs of + Right sndKey -> do + let fileInfo = FileInfo {sndKey, size = fromIntegral size, digest} + logAddFile sl sId fileInfo createdAt status + let rcps = M.findWithDefault [] sId rcpMap + rc' = rc + length rcps + forM_ (L.nonEmpty rcps) $ logAddRecipients sl sId + forM_ path $ logPutFile sl sId + pure (fc + 1, rc') + Left _ -> do + putStrLn $ "WARNING: invalid sender key for " <> show sId + pure (fc, rc) + ) + closeStoreLog sl + closeFileStore pgStore + putStrLn $ "Exported " <> show fCnt <> " files, " <> show rCnt <> " recipients to " <> storeLogFilePath + +-- CSV helpers for COPY protocol + +iforM_ :: Monad m => [a] -> (Int -> a -> m ()) -> m () +iforM_ xs f = zipWithM_ f [0 ..] xs + +fileRecToCSV :: SenderId -> FileRec -> IO ByteString +fileRecToCSV sId FileRec {fileInfo = FileInfo {sndKey, size, digest}, filePath, createdAt, fileStatus} = do + path <- readTVarIO filePath + status <- readTVarIO fileStatus + pure $ LB.toStrict $ BB.toLazyByteString $ mconcat (BB.char7 ',' `intersperse` fields path status) <> BB.char7 '\n' + where + fields path status = + [ renderField (toField (Binary (unEntityId sId))), + renderField (toField (fromIntegral size :: Int32)), + renderField (toField (Binary digest)), + renderField (toField (Binary (C.encodePubKey sndKey))), + nullable (toField <$> path), + renderField (toField createdAt), + quotedField (toField status) + ] + +recipientToCSV :: RecipientId -> SenderId -> RcvPublicAuthKey -> ByteString +recipientToCSV rId sId rKey = + LB.toStrict $ BB.toLazyByteString $ mconcat (BB.char7 ',' `intersperse` fields) <> BB.char7 '\n' + where + fields = + [ renderField (toField (Binary (unEntityId rId))), + renderField (toField (Binary (unEntityId sId))), + renderField (toField (Binary (C.encodePubKey rKey))) + ] + +renderField :: Action -> Builder +renderField = \case + Plain bld -> bld + Escape s -> BB.byteString s + EscapeByteA s -> BB.string7 "\\x" <> BB.byteStringHex s + EscapeIdentifier s -> BB.byteString s + Many as -> mconcat (map renderField as) + +nullable :: Maybe Action -> Builder +nullable = maybe mempty renderField + +quotedField :: Action -> Builder +quotedField a = BB.char7 '"' <> escapeQuotes (renderField a) <> BB.char7 '"' + where + escapeQuotes bld = + let bs = LB.toStrict $ BB.toLazyByteString bld + in BB.byteString $ B.concatMap (\c -> if c == '"' then "\"\"" else B.singleton c) bs diff --git a/src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs b/src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs new file mode 100644 index 0000000000..a0dd5d7bad --- /dev/null +++ b/src/Simplex/FileTransfer/Server/Store/Postgres/Config.hs @@ -0,0 +1,25 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Simplex.FileTransfer.Server.Store.Postgres.Config + ( PostgresFileStoreCfg (..), + defaultXFTPDBOpts, + ) +where + +import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..)) +import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation) + +data PostgresFileStoreCfg = PostgresFileStoreCfg + { dbOpts :: DBOpts, + dbStoreLogPath :: Maybe FilePath, + confirmMigrations :: MigrationConfirmation + } + +defaultXFTPDBOpts :: DBOpts +defaultXFTPDBOpts = + DBOpts + { connstr = "postgresql://xftp@/xftp_server_store", + schema = "xftp_server", + poolSize = 10, + createSchema = False + } diff --git a/src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs b/src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs new file mode 100644 index 0000000000..15e1178dea --- /dev/null +++ b/src/Simplex/FileTransfer/Server/Store/Postgres/Migrations.hs @@ -0,0 +1,47 @@ +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE QuasiQuotes #-} + +module Simplex.FileTransfer.Server.Store.Postgres.Migrations + ( xftpServerMigrations, + ) +where + +import Data.List (sortOn) +import Data.Text (Text) +import Simplex.Messaging.Agent.Store.Shared +import Text.RawString.QQ (r) + +xftpSchemaMigrations :: [(String, Text, Maybe Text)] +xftpSchemaMigrations = + [ ("20260325_initial", m20260325_initial, Nothing) + ] + +-- | The list of migrations in ascending order by date +xftpServerMigrations :: [Migration] +xftpServerMigrations = sortOn name $ map migration xftpSchemaMigrations + where + migration (name, up, down) = Migration {name, up, down = down} + +m20260325_initial :: Text +m20260325_initial = + [r| +CREATE TABLE files ( + sender_id BYTEA NOT NULL PRIMARY KEY, + file_size INT4 NOT NULL CHECK (file_size > 0), + file_digest BYTEA NOT NULL, + sender_key BYTEA NOT NULL, + file_path TEXT, + created_at INT8 NOT NULL, + status TEXT NOT NULL DEFAULT 'active' +); + +CREATE TABLE recipients ( + recipient_id BYTEA NOT NULL PRIMARY KEY, + sender_id BYTEA NOT NULL REFERENCES files ON DELETE CASCADE, + recipient_key BYTEA NOT NULL +); + +CREATE INDEX idx_recipients_sender_id ON recipients (sender_id); +CREATE INDEX idx_files_created_at ON files (created_at); +|] diff --git a/src/Simplex/FileTransfer/Server/StoreLog.hs b/src/Simplex/FileTransfer/Server/StoreLog.hs index c82beda29b..3947de9979 100644 --- a/src/Simplex/FileTransfer/Server/StoreLog.hs +++ b/src/Simplex/FileTransfer/Server/StoreLog.hs @@ -10,6 +10,7 @@ module Simplex.FileTransfer.Server.StoreLog FileStoreLogRecord (..), closeStoreLog, readWriteFileStore, + writeFileStore, logAddFile, logPutFile, logAddRecipients, @@ -87,16 +88,16 @@ logBlockFile s fId = logFileStoreRecord s . BlockFile fId logAckFile :: StoreLog 'WriteMode -> RecipientId -> IO () logAckFile s = logFileStoreRecord s . AckFile -readWriteFileStore :: FilePath -> FileStore -> IO (StoreLog 'WriteMode) +readWriteFileStore :: FilePath -> STMFileStore -> IO (StoreLog 'WriteMode) readWriteFileStore = readWriteStoreLog readFileStore writeFileStore -readFileStore :: FilePath -> FileStore -> IO () +readFileStore :: FilePath -> STMFileStore -> IO () readFileStore f st = mapM_ (addFileLogRecord . LB.toStrict) . LB.lines =<< LB.readFile f where addFileLogRecord s = case strDecode s of Left e -> B.putStrLn $ "Log parsing error (" <> B.pack e <> "): " <> B.take 100 s Right lr -> - atomically (addToStore lr) >>= \case + addToStore lr >>= \case Left e -> B.putStrLn $ "Log processing error (" <> bshow e <> "): " <> B.take 100 s _ -> pure () addToStore = \case @@ -108,8 +109,8 @@ readFileStore f st = mapM_ (addFileLogRecord . LB.toStrict) . LB.lines =<< LB.re AckFile rId -> ackFile st rId addRecipients sId rcps = mapM_ (ExceptT . addRecipient st sId) rcps -writeFileStore :: StoreLog 'WriteMode -> FileStore -> IO () -writeFileStore s FileStore {files, recipients} = do +writeFileStore :: StoreLog 'WriteMode -> STMFileStore -> IO () +writeFileStore s STMFileStore {files, recipients} = do allRcps <- readTVarIO recipients readTVarIO files >>= mapM_ (logFile allRcps) where diff --git a/tests/CoreTests/XFTPStoreTests.hs b/tests/CoreTests/XFTPStoreTests.hs new file mode 100644 index 0000000000..85e951ed66 --- /dev/null +++ b/tests/CoreTests/XFTPStoreTests.hs @@ -0,0 +1,274 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} + +module CoreTests.XFTPStoreTests (xftpStoreTests, xftpMigrationTests) where + +import Control.Monad +import Data.Word (Word32) +import Simplex.FileTransfer.Protocol (FileInfo (..), SFileParty (..)) +import Simplex.FileTransfer.Server.Store +import Simplex.FileTransfer.Server.Store.Postgres (PostgresFileStore, importFileStore, exportFileStore) +import Simplex.FileTransfer.Server.StoreLog (closeStoreLog, readWriteFileStore, writeFileStore) +import Simplex.FileTransfer.Transport (XFTPErrorType (..)) +import qualified Simplex.Messaging.Crypto as C +import Simplex.Messaging.Protocol (BlockingInfo (..), BlockingReason (..), EntityId (..)) +import Simplex.Messaging.Server.QueueStore (ServerEntityStatus (..)) +import Simplex.Messaging.Server.StoreLog (openWriteStoreLog) +import Simplex.Messaging.SystemTime (RoundedSystemTime (..)) +import System.Directory (doesFileExist, removeFile) +import Test.Hspec hiding (fit, it) +import UnliftIO.STM +import Util +import XFTPClient (testXFTPPostgresCfg) + +xftpStoreTests :: Spec +xftpStoreTests = describe "PostgresFileStore operations" $ do + it "should add and get file by sender" testAddGetFileSender + it "should add and get file by recipient" testAddGetFileRecipient + it "should reject duplicate file" testDuplicateFile + it "should return AUTH for nonexistent file" testGetNonexistent + it "should set file path with IS NULL guard" testSetFilePath + it "should reject duplicate recipient" testDuplicateRecipient + it "should delete file and cascade recipients" testDeleteFileCascade + it "should block file and update status" testBlockFile + it "should ack file reception" testAckFile + it "should return expired files with limit" testExpiredFiles + it "should compute used storage and file count" testStorageAndCount + +xftpMigrationTests :: Spec +xftpMigrationTests = describe "XFTP migration round-trip" $ do + it "should export to StoreLog and import back to Postgres preserving data" testMigrationRoundTrip + +-- Test helpers + +withPgStore :: (PostgresFileStore -> IO ()) -> IO () +withPgStore test = do + st <- newFileStore testXFTPPostgresCfg :: IO PostgresFileStore + test st + closeFileStore st + +testSenderId :: EntityId +testSenderId = EntityId "sender001_______" + +testRecipientId :: EntityId +testRecipientId = EntityId "recipient001____" + +testFileInfo :: C.APublicAuthKey -> FileInfo +testFileInfo sndKey = + FileInfo + { sndKey, + size = 128000 :: Word32, + digest = "test_digest_bytes_here___" + } + +testCreatedAt :: RoundedFileTime +testCreatedAt = RoundedSystemTime 1000000 + +-- Tests + +testAddGetFileSender :: Expectation +testAddGetFileSender = withPgStore $ \st -> do + g <- C.newRandom + (sk, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sk + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + result <- getFile st SFSender testSenderId + case result of + Right (FileRec {senderId, fileInfo = fi, createdAt}, key) -> do + senderId `shouldBe` testSenderId + sndKey fi `shouldBe` sk + size fi `shouldBe` 128000 + createdAt `shouldBe` testCreatedAt + key `shouldBe` sk + Left e -> expectationFailure $ "getFile failed: " <> show e + +testAddGetFileRecipient :: Expectation +testAddGetFileRecipient = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right () + result <- getFile st SFRecipient testRecipientId + case result of + Right (FileRec {senderId}, key) -> do + senderId `shouldBe` testSenderId + key `shouldBe` rcpKey + Left e -> expectationFailure $ "getFile failed: " <> show e + +testDuplicateFile :: Expectation +testDuplicateFile = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Left DUPLICATE_ + +testGetNonexistent :: Expectation +testGetNonexistent = withPgStore $ \st -> do + getFile st SFSender testSenderId >>= (`shouldBe` Left AUTH) . fmap (const ()) + getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ()) + +testSetFilePath :: Expectation +testSetFilePath = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + setFilePath st testSenderId "/tmp/test_file" `shouldReturn` Right () + -- Second setFilePath should fail (file_path IS NULL guard) + setFilePath st testSenderId "/tmp/other_file" `shouldReturn` Left AUTH + -- Verify path was set + result <- getFile st SFSender testSenderId + case result of + Right (FileRec {filePath}, _) -> readTVarIO filePath `shouldReturn` Just "/tmp/test_file" + Left e -> expectationFailure $ "getFile failed: " <> show e + +testDuplicateRecipient :: Expectation +testDuplicateRecipient = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right () + addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Left DUPLICATE_ + +testDeleteFileCascade :: Expectation +testDeleteFileCascade = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right () + deleteFile st testSenderId `shouldReturn` Right () + -- File and recipient should both be gone + getFile st SFSender testSenderId >>= (`shouldBe` Left AUTH) . fmap (const ()) + getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ()) + +testBlockFile :: Expectation +testBlockFile = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + let blockInfo = BlockingInfo {reason = BRContent, notice = Nothing} + blockFile st testSenderId blockInfo False `shouldReturn` Right () + result <- getFile st SFSender testSenderId + case result of + Right (FileRec {fileStatus}, _) -> readTVarIO fileStatus `shouldReturn` EntityBlocked blockInfo + Left e -> expectationFailure $ "getFile failed: " <> show e + +testAckFile :: Expectation +testAckFile = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (rcpKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + addFile st testSenderId fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addRecipient st testSenderId (FileRecipient testRecipientId rcpKey) `shouldReturn` Right () + ackFile st testRecipientId `shouldReturn` Right () + -- Recipient gone, but file still exists + getFile st SFRecipient testRecipientId >>= (`shouldBe` Left AUTH) . fmap (const ()) + result <- getFile st SFSender testSenderId + case result of + Right _ -> pure () + Left e -> expectationFailure $ "getFile failed: " <> show e + +testExpiredFiles :: Expectation +testExpiredFiles = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo = testFileInfo sndKey + oldTime = RoundedSystemTime 100000 + newTime = RoundedSystemTime 999999999 + -- Add old and new files + addFile st (EntityId "old_file________") fileInfo oldTime EntityActive `shouldReturn` Right () + void $ setFilePath st (EntityId "old_file________") "/tmp/old" + addFile st (EntityId "new_file________") fileInfo newTime EntityActive `shouldReturn` Right () + -- Query expired with cutoff that only catches old file + expired <- expiredFiles st 500000 100 + length expired `shouldBe` 1 + case expired of + [(sId, path, sz)] -> do + sId `shouldBe` EntityId "old_file________" + path `shouldBe` Just "/tmp/old" + sz `shouldBe` 128000 + _ -> expectationFailure "expected 1 expired file" + +testStorageAndCount :: Expectation +testStorageAndCount = withPgStore $ \st -> do + g <- C.newRandom + (sndKey, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + getUsedStorage st `shouldReturn` 0 + getFileCount st `shouldReturn` 0 + let fileInfo = testFileInfo sndKey + addFile st (EntityId "file_a__________") fileInfo testCreatedAt EntityActive `shouldReturn` Right () + addFile st (EntityId "file_b__________") fileInfo testCreatedAt EntityActive `shouldReturn` Right () + getFileCount st `shouldReturn` 2 + used <- getUsedStorage st + used `shouldBe` 256000 -- 128000 * 2 + +-- Migration round-trip test + +testMigrationRoundTrip :: Expectation +testMigrationRoundTrip = do + let storeLogPath = "tests/tmp/xftp-migration-test.log" + storeLogPath2 = "tests/tmp/xftp-migration-test2.log" + -- 1. Create STM store with test data + stmStore <- newFileStore () :: IO STMFileStore + g <- C.newRandom + (sndKey1, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (rcpKey1, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + (sndKey2, _) <- atomically $ C.generateAuthKeyPair C.SEd25519 g + let fileInfo1 = testFileInfo sndKey1 + fileInfo2 = FileInfo {sndKey = sndKey2, size = 64000, digest = "other_digest____________"} + sId1 = EntityId "migration_file_1" + sId2 = EntityId "migration_file_2" + rId1 = EntityId "migration_rcp_1_" + addFile stmStore sId1 fileInfo1 testCreatedAt EntityActive `shouldReturn` Right () + void $ setFilePath stmStore sId1 "/tmp/file1" + addRecipient stmStore sId1 (FileRecipient rId1 rcpKey1) `shouldReturn` Right () + let testBlockInfo = BlockingInfo {reason = BRSpam, notice = Nothing} + addFile stmStore sId2 fileInfo2 testCreatedAt (EntityBlocked testBlockInfo) `shouldReturn` Right () + -- 2. Write to StoreLog + sl <- openWriteStoreLog False storeLogPath + writeFileStore sl stmStore + closeStoreLog sl + -- 3. Import StoreLog to Postgres + importFileStore storeLogPath testXFTPPostgresCfg + -- StoreLog should be renamed to .bak + doesFileExist storeLogPath `shouldReturn` False + doesFileExist (storeLogPath <> ".bak") `shouldReturn` True + -- 4. Export from Postgres back to StoreLog + exportFileStore storeLogPath2 testXFTPPostgresCfg + -- 5. Read exported StoreLog into a new STM store and verify + stmStore2 <- newFileStore () :: IO STMFileStore + sl2 <- readWriteFileStore storeLogPath2 stmStore2 + closeStoreLog sl2 + -- Verify file 1 + result1 <- getFile stmStore2 SFSender sId1 + case result1 of + Right (FileRec {fileInfo = fi, filePath, fileStatus}, _) -> do + size fi `shouldBe` 128000 + readTVarIO filePath `shouldReturn` Just "/tmp/file1" + readTVarIO fileStatus `shouldReturn` EntityActive + Left e -> expectationFailure $ "getFile sId1 failed: " <> show e + -- Verify recipient + result1r <- getFile stmStore2 SFRecipient rId1 + case result1r of + Right (_, key) -> key `shouldBe` rcpKey1 + Left e -> expectationFailure $ "getFile rId1 failed: " <> show e + -- Verify file 2 (blocked) + result2 <- getFile stmStore2 SFSender sId2 + case result2 of + Right (FileRec {fileInfo = fi, fileStatus}, _) -> do + size fi `shouldBe` 64000 + readTVarIO fileStatus `shouldReturn` EntityBlocked (BlockingInfo {reason = BRSpam, notice = Nothing}) + Left e -> expectationFailure $ "getFile sId2 failed: " <> show e + -- Cleanup + removeFile (storeLogPath <> ".bak") + removeFile storeLogPath2 diff --git a/tests/Test.hs b/tests/Test.hs index 63f97d8070..45e00287b6 100644 --- a/tests/Test.hs +++ b/tests/Test.hs @@ -33,7 +33,8 @@ import System.Environment (setEnv) import Test.Hspec hiding (fit, it) import Util import XFTPAgent -import XFTPCLI +import XFTPCLI (xftpCLIFileTests) +import XFTPClient (xftpMemoryServer, xftpMemoryServer2) import XFTPServerTests (xftpServerTests) import WebTests (webTests) import XFTPWebTests (xftpWebTests) @@ -46,12 +47,14 @@ import AgentTests.SchemaDump (schemaDumpTest) #endif #if defined(dbServerPostgres) +import CoreTests.XFTPStoreTests (xftpStoreTests, xftpMigrationTests) import NtfServerTests (ntfServerTests) import NtfClient (ntfTestServerDBConnectInfo, ntfTestStoreDBOpts) import PostgresSchemaDump (postgresSchemaDumpTest) import SMPClient (testServerDBConnectInfo, testStoreDBOpts) import Simplex.Messaging.Notifications.Server.Store.Migrations (ntfServerMigrations) import Simplex.Messaging.Server.QueueStore.Postgres.Migrations (serverMigrations) +import XFTPClient (testXFTPDBConnectInfo, xftpPostgresServer, xftpPostgresServer2) #endif #if defined(dbPostgres) || defined(dbServerPostgres) @@ -148,10 +151,21 @@ main = do describe "SMP proxy, jornal message store" $ before (pure $ ASType SQSMemory SMSJournal) smpProxyTests describe "XFTP" $ do - describe "XFTP server" xftpServerTests + describe "XFTP server" $ + before (pure xftpMemoryServer) xftpServerTests describe "XFTP file description" fileDescriptionTests - describe "XFTP CLI" xftpCLITests + describe "XFTP CLI (memory)" $ + before (pure (xftpMemoryServer, xftpMemoryServer2)) xftpCLIFileTests describe "XFTP agent" xftpAgentTests +#if defined(dbServerPostgres) + around_ (postgressBracket testXFTPDBConnectInfo) $ do + describe "XFTP Postgres store operations" xftpStoreTests + describe "XFTP migration round-trip" xftpMigrationTests + describe "XFTP server (PostgreSQL)" $ + before (pure xftpPostgresServer) xftpServerTests + describe "XFTP CLI (PostgreSQL)" $ + before (pure (xftpPostgresServer, xftpPostgresServer2)) xftpCLIFileTests +#endif #if defined(dbPostgres) describe "XFTP Web Client" $ xftpWebTests (dropAllSchemasExceptSystem testDBConnectInfo) #else diff --git a/tests/XFTPAgent.hs b/tests/XFTPAgent.hs index a83ec08a62..71d0f0b091 100644 --- a/tests/XFTPAgent.hs +++ b/tests/XFTPAgent.hs @@ -27,6 +27,7 @@ import Simplex.FileTransfer.Client (XFTPClientConfig (..)) import Simplex.FileTransfer.Description (FileChunk (..), FileDescription (..), FileDescriptionURI (..), ValidFileDescription, fileDescriptionURI, kb, mb, qrSizeLimit, pattern ValidFileDescription) import Simplex.FileTransfer.Protocol (FileParty (..)) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..)) +import Simplex.FileTransfer.Server.Store (STMFileStore) import Simplex.FileTransfer.Transport (XFTPErrorType (AUTH)) import Simplex.FileTransfer.Types (RcvFileId, SndFileId) import Simplex.Messaging.Agent (AgentClient, testProtocolServer, xftpDeleteRcvFile, xftpDeleteSndFileInternal, xftpDeleteSndFileRemote, xftpReceiveFile, xftpSendDescription, xftpSendFile, xftpStartWorkers) @@ -93,6 +94,13 @@ xftpAgentTests = it "should fail without password" $ testXFTPServerTest auth (srv Nothing) `shouldReturn` authErr it "should fail with incorrect password" $ testXFTPServerTest auth (srv $ Just "wrong") `shouldReturn` authErr +testXFTPServerTest :: HasCallStack => Maybe BasicAuth -> XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure) +testXFTPServerTest newFileBasicAuth srv = + withXFTPServerCfg testXFTPServerConfig {newFileBasicAuth, xftpPort = xftpTestPort2} $ \_ -> + -- initially passed server is not running + withAgent 1 agentCfg initAgentServers testDB $ \a -> + testProtocolServer a NRMInteractive 1 srv + rfProgress :: forall m. (HasCallStack, MonadIO m, MonadFail m) => AgentClient -> Int64 -> m () rfProgress c expected = loop 0 where @@ -272,7 +280,7 @@ testXFTPAgentSendReceiveMatrix = do newClient = agentCfg oldServer = withXFTPServerCfgNoALPN newServer = withXFTPServerCfg - run :: HasCallStack => (HasCallStack => XFTPServerConfig -> (ThreadId -> IO ()) -> IO ()) -> AgentConfig -> AgentConfig -> IO () + run :: HasCallStack => (HasCallStack => XFTPServerConfig STMFileStore -> (ThreadId -> IO ()) -> IO ()) -> AgentConfig -> AgentConfig -> IO () run withServer sender receiver = withServer testXFTPServerConfig $ \_t -> do filePath <- createRandomFile_ (kb 319 :: Integer) "testfile" @@ -670,9 +678,8 @@ testXFTPAgentRequestAdditionalRecipientIDs = withXFTPServer $ do void $ testReceive rcp (rfds !! 299) filePath void $ testReceive rcp (rfds !! 499) filePath -testXFTPServerTest :: HasCallStack => Maybe BasicAuth -> XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure) -testXFTPServerTest newFileBasicAuth srv = - withXFTPServerCfg testXFTPServerConfig {newFileBasicAuth, xftpPort = xftpTestPort2} $ \_ -> - -- initially passed server is not running - withAgent 1 agentCfg initAgentServers testDB $ \a -> - testProtocolServer a NRMInteractive 1 srv +testXFTPServerTest_ :: HasCallStack => XFTPServerWithAuth -> IO (Maybe ProtocolTestFailure) +testXFTPServerTest_ srv = + -- initially passed server is not running + withAgent 1 agentCfg initAgentServers testDB $ \a -> + testProtocolServer a NRMInteractive 1 srv diff --git a/tests/XFTPCLI.hs b/tests/XFTPCLI.hs index d6c97d73ce..70da884eb4 100644 --- a/tests/XFTPCLI.hs +++ b/tests/XFTPCLI.hs @@ -1,4 +1,4 @@ -module XFTPCLI where +module XFTPCLI (xftpCLIFileTests, xftpCLI, senderFiles, recipientFiles, testBracket) where import Control.Exception (bracket_) import qualified Data.ByteString as LB @@ -11,14 +11,17 @@ import System.FilePath (()) import System.IO.Silently (capture_) import Test.Hspec hiding (fit, it) import Util -import XFTPClient (testXFTPServerStr, testXFTPServerStr2, withXFTPServer, withXFTPServer2, xftpServerFiles, xftpServerFiles2) +import XFTPClient (XFTPTestServer (..), testXFTPServerStr, testXFTPServerStr2, xftpServerFiles, xftpServerFiles2) -xftpCLITests :: Spec -xftpCLITests = around_ testBracket . describe "XFTP CLI" $ do - it "should send and receive file" testXFTPCLISendReceive - it "should send and receive file with 2 servers" testXFTPCLISendReceive2servers - it "should delete file from 2 servers" testXFTPCLIDelete - it "prepareChunkSizes should use 2 chunk sizes" testPrepareChunkSizes +xftpCLIFileTests :: SpecWith (XFTPTestServer, XFTPTestServer) +xftpCLIFileTests = around_ testBracket $ do + it "should send and receive file" $ \(XFTPTestServer withSrv, _) -> + withSrv id testXFTPCLISendReceive_ + it "should send and receive file with 2 servers" $ \(XFTPTestServer withSrv1, XFTPTestServer withSrv2) -> + withSrv1 id $ withSrv2 id testXFTPCLISendReceive2servers_ + it "should delete file from 2 servers" $ \(XFTPTestServer withSrv1, XFTPTestServer withSrv2) -> + withSrv1 id $ withSrv2 id testXFTPCLIDelete_ + it "prepareChunkSizes should use 2 chunk sizes" $ \(_, _) -> testPrepareChunkSizes testBracket :: IO () -> IO () testBracket = @@ -37,8 +40,8 @@ recipientFiles = "tests/tmp/xftp-recipient-files" xftpCLI :: [String] -> IO [String] xftpCLI params = lines <$> capture_ (withArgs params xftpClientCLI) -testXFTPCLISendReceive :: IO () -testXFTPCLISendReceive = withXFTPServer $ do +testXFTPCLISendReceive_ :: IO () +testXFTPCLISendReceive_ = do let filePath = senderFiles "testfile" xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath] file <- LB.readFile filePath @@ -73,8 +76,8 @@ testXFTPCLISendReceive = withXFTPServer $ do recvResult `shouldBe` ["File description " <> fd <> " is deleted."] LB.readFile (recipientFiles fileName) `shouldReturn` file -testXFTPCLISendReceive2servers :: IO () -testXFTPCLISendReceive2servers = withXFTPServer . withXFTPServer2 $ do +testXFTPCLISendReceive2servers_ :: IO () +testXFTPCLISendReceive2servers_ = do let filePath = senderFiles "testfile" xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath] file <- LB.readFile filePath @@ -111,8 +114,8 @@ testXFTPCLISendReceive2servers = withXFTPServer . withXFTPServer2 $ do recvResult `shouldBe` ["File description " <> fd <> " is deleted."] LB.readFile (recipientFiles fileName) `shouldReturn` file -testXFTPCLIDelete :: IO () -testXFTPCLIDelete = withXFTPServer . withXFTPServer2 $ do +testXFTPCLIDelete_ :: IO () +testXFTPCLIDelete_ = do let filePath = senderFiles "testfile" xftpCLI ["rand", filePath, "17mb"] `shouldReturn` ["File created: " <> filePath] file <- LB.readFile filePath diff --git a/tests/XFTPClient.hs b/tests/XFTPClient.hs index 85a1d21b84..ba07ae0502 100644 --- a/tests/XFTPClient.hs +++ b/tests/XFTPClient.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} @@ -7,6 +8,9 @@ module XFTPClient where import Control.Concurrent (ThreadId, threadDelay) +import Control.Exception (SomeException, catch) +import System.Directory (removeFile) +import Control.Monad (void) import Data.String (fromString) import Data.Time.Clock (getCurrentTime) import Network.Socket (ServiceName) @@ -14,55 +18,112 @@ import SMPClient (serverBracket) import Simplex.FileTransfer.Client import Simplex.FileTransfer.Description import Simplex.FileTransfer.Server (runXFTPServerBlocking) -import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defaultFileExpiration, defaultInactiveClientExpiration) +import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), XFTPStoreConfig (..), defaultFileExpiration, defaultInactiveClientExpiration) +import Simplex.FileTransfer.Server.Store (FileStoreClass, STMFileStore) import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import Simplex.Messaging.Protocol (XFTPServer) import Simplex.Messaging.Transport.HTTP2 (httpALPN) import Simplex.Messaging.Transport.Server import Test.Hspec hiding (fit, it) +#if defined(dbServerPostgres) +import qualified Database.PostgreSQL.Simple as PSQL +import Database.PostgreSQL.Simple (ConnectInfo (..), defaultConnectInfo) +import Simplex.FileTransfer.Server.Store.Postgres.Config (PostgresFileStoreCfg (..), defaultXFTPDBOpts) +import Simplex.Messaging.Agent.Store.Postgres.Options (DBOpts (..)) +import Simplex.Messaging.Agent.Store.Shared (MigrationConfirmation (..)) +#endif + +-- Parameterized server bracket + +newtype XFTPTestServer = XFTPTestServer + { runServer :: forall a. (forall s. XFTPServerConfig s -> XFTPServerConfig s) -> IO a -> IO a + } + +-- Store-log-dependent agent tests need the bracket + a way to clear server state +type XFTPTestServerClear = (XFTPTestServer, IO ()) + +xftpMemoryServer :: XFTPTestServer +xftpMemoryServer = XFTPTestServer $ \cfgF test -> withXFTPServerCfg (cfgF testXFTPServerConfig) $ \_ -> test + +xftpMemoryServerWithLog :: XFTPTestServer +xftpMemoryServerWithLog = XFTPTestServer $ \cfgF test -> + withXFTPServerCfg (cfgF testXFTPServerConfig {serverStoreCfg = XSCMemory (Just testXFTPLogFile), storeLogFile = Just testXFTPLogFile, serverStatsBackupFile = Just testXFTPStatsBackupFile}) $ \_ -> test + +xftpMemoryServerClear :: XFTPTestServerClear +xftpMemoryServerClear = (xftpMemoryServerWithLog, removeFile testXFTPLogFile `catch` \(_ :: SomeException) -> pure ()) + +xftpMemoryServer2 :: XFTPTestServer +xftpMemoryServer2 = XFTPTestServer $ \cfgF test -> withXFTPServerCfg (cfgF testXFTPServerConfig2) $ \_ -> test + +#if defined(dbServerPostgres) +testXFTPDBConnectInfo :: ConnectInfo +testXFTPDBConnectInfo = + defaultConnectInfo + { connectUser = "test_xftp_server_user", + connectDatabase = "test_xftp_server_db" + } + +testXFTPPostgresCfg :: PostgresFileStoreCfg +testXFTPPostgresCfg = + PostgresFileStoreCfg + { dbOpts = defaultXFTPDBOpts + { connstr = "postgresql://test_xftp_server_user@/test_xftp_server_db", + schema = "xftp_server_test", + poolSize = 10, + createSchema = True + }, + dbStoreLogPath = Nothing, + confirmMigrations = MCYesUp + } + +xftpPostgresServer :: XFTPTestServer +xftpPostgresServer = XFTPTestServer $ \cfgF test -> withXFTPServerCfg (cfgF testXFTPServerConfig {serverStoreCfg = XSCDatabase testXFTPPostgresCfg}) $ \_ -> test -xftpTest :: HasCallStack => (HasCallStack => XFTPClient -> IO ()) -> Expectation -xftpTest test = runXFTPTest test `shouldReturn` () +xftpPostgresServer2 :: XFTPTestServer +xftpPostgresServer2 = XFTPTestServer $ \cfgF test -> withXFTPServerCfg (cfgF testXFTPServerConfig2 {serverStoreCfg = XSCDatabase testXFTPPostgresCfg}) $ \_ -> test -xftpTestN :: HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO ()) -> Expectation -xftpTestN n test = runXFTPTestN n test `shouldReturn` () +xftpPostgresServerClear :: XFTPTestServerClear +xftpPostgresServerClear = (xftpPostgresServer, clearXFTPPostgresStore) -xftpTest2 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> IO ()) -> Expectation +clearXFTPPostgresStore :: IO () +clearXFTPPostgresStore = do + let DBOpts {connstr} = dbOpts testXFTPPostgresCfg + conn <- PSQL.connectPostgreSQL connstr + void $ PSQL.execute_ conn "SET search_path TO xftp_server_test,public" + void $ PSQL.execute_ conn "DELETE FROM files" + PSQL.close conn +#endif + +xftpTest :: HasCallStack => (HasCallStack => XFTPClient -> IO ()) -> XFTPTestServer -> Expectation +xftpTest test (XFTPTestServer withSrv) = withSrv id (testXFTPClient test) `shouldReturn` () + +xftpTestN :: HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO ()) -> XFTPTestServer -> Expectation +xftpTestN nClients test (XFTPTestServer withSrv) = withSrv id (run nClients []) `shouldReturn` () + where + run :: Int -> [XFTPClient] -> IO () + run 0 hs = test hs + run n hs = testXFTPClient $ \h -> run (n - 1) (h : hs) + +xftpTest2 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> IO ()) -> XFTPTestServer -> Expectation xftpTest2 test = xftpTestN 2 _test where _test [h1, h2] = test h1 h2 _test _ = error "expected 2 handles" -xftpTest4 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> XFTPClient -> XFTPClient -> IO ()) -> Expectation +xftpTest4 :: HasCallStack => (HasCallStack => XFTPClient -> XFTPClient -> XFTPClient -> XFTPClient -> IO ()) -> XFTPTestServer -> Expectation xftpTest4 test = xftpTestN 4 _test where _test [h1, h2, h3, h4] = test h1 h2 h3 h4 _test _ = error "expected 4 handles" -runXFTPTest :: HasCallStack => (HasCallStack => XFTPClient -> IO a) -> IO a -runXFTPTest test = withXFTPServer $ testXFTPClient test - -runXFTPTestN :: forall a. HasCallStack => Int -> (HasCallStack => [XFTPClient] -> IO a) -> IO a -runXFTPTestN nClients test = withXFTPServer $ run nClients [] - where - run :: Int -> [XFTPClient] -> IO a - run 0 hs = test hs - run n hs = testXFTPClient $ \h -> run (n - 1) (h : hs) - -withXFTPServerStoreLogOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a -withXFTPServerStoreLogOn = withXFTPServerCfg testXFTPServerConfig {storeLogFile = Just testXFTPLogFile, serverStatsBackupFile = Just testXFTPStatsBackupFile} - -withXFTPServerCfgNoALPN :: HasCallStack => XFTPServerConfig -> (HasCallStack => ThreadId -> IO a) -> IO a -withXFTPServerCfgNoALPN cfg = withXFTPServerCfg cfg {transportConfig = (transportConfig cfg) {serverALPN = Nothing}} - -withXFTPServerCfg :: HasCallStack => XFTPServerConfig -> (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerCfg :: (HasCallStack, FileStoreClass s) => XFTPServerConfig s -> (HasCallStack => ThreadId -> IO a) -> IO a withXFTPServerCfg cfg = serverBracket (\started -> runXFTPServerBlocking started cfg) (threadDelay 10000) -withXFTPServerThreadOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a -withXFTPServerThreadOn = withXFTPServerCfg testXFTPServerConfig +withXFTPServerCfgNoALPN :: (HasCallStack, FileStoreClass s) => XFTPServerConfig s -> (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerCfgNoALPN cfg = withXFTPServerCfg cfg {transportConfig = (transportConfig cfg) {serverALPN = Nothing}} withXFTPServer :: HasCallStack => IO a -> IO a withXFTPServer = withXFTPServerCfg testXFTPServerConfig . const @@ -70,6 +131,14 @@ withXFTPServer = withXFTPServerCfg testXFTPServerConfig . const withXFTPServer2 :: HasCallStack => IO a -> IO a withXFTPServer2 = withXFTPServerCfg testXFTPServerConfig {xftpPort = xftpTestPort2, filesPath = xftpServerFiles2} . const +withXFTPServerStoreLogOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerStoreLogOn = withXFTPServerCfg testXFTPServerConfig {serverStoreCfg = XSCMemory (Just testXFTPLogFile), storeLogFile = Just testXFTPLogFile, serverStatsBackupFile = Just testXFTPStatsBackupFile} + +withXFTPServerThreadOn :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerThreadOn = withXFTPServerCfg testXFTPServerConfig + +-- Constants + xftpTestPort :: ServiceName xftpTestPort = "8000" @@ -103,12 +172,13 @@ testXFTPStatsBackupFile = "tests/tmp/xftp-server-stats.log" xftpTestPrometheusMetricsFile :: FilePath xftpTestPrometheusMetricsFile = "tests/tmp/xftp-server-metrics.txt" -testXFTPServerConfig :: XFTPServerConfig +testXFTPServerConfig :: XFTPServerConfig STMFileStore testXFTPServerConfig = XFTPServerConfig { xftpPort = xftpTestPort, controlPort = Nothing, fileIdSize = 16, + serverStoreCfg = XSCMemory Nothing, storeLogFile = Nothing, filesPath = xftpServerFiles, fileSizeQuota = Nothing, @@ -139,6 +209,9 @@ testXFTPServerConfig = webStaticPath = Nothing } +testXFTPServerConfig2 :: XFTPServerConfig STMFileStore +testXFTPServerConfig2 = testXFTPServerConfig {xftpPort = xftpTestPort2, filesPath = xftpServerFiles2} + testXFTPClientConfig :: XFTPClientConfig testXFTPClientConfig = defaultXFTPClientConfig @@ -152,7 +225,7 @@ testXFTPClientWith cfg client = do Right c -> client c Left e -> error $ show e -testXFTPServerConfigSNI :: XFTPServerConfig +testXFTPServerConfigSNI :: XFTPServerConfig STMFileStore testXFTPServerConfigSNI = testXFTPServerConfig { httpCredentials = @@ -171,7 +244,7 @@ testXFTPServerConfigSNI = withXFTPServerSNI :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a withXFTPServerSNI = withXFTPServerCfg testXFTPServerConfigSNI -testXFTPServerConfigEd25519SNI :: XFTPServerConfig +testXFTPServerConfigEd25519SNI :: XFTPServerConfig STMFileStore testXFTPServerConfigEd25519SNI = testXFTPServerConfig { xftpCredentials = diff --git a/tests/XFTPServerTests.hs b/tests/XFTPServerTests.hs index 0af3d7ecaa..10ac0d36c6 100644 --- a/tests/XFTPServerTests.hs +++ b/tests/XFTPServerTests.hs @@ -6,7 +6,7 @@ {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ScopedTypeVariables #-} -module XFTPServerTests where +module XFTPServerTests (xftpServerTests) where import AgentTests.FunctionalAPITests (runRight_) import Control.Concurrent (threadDelay) @@ -52,7 +52,7 @@ import UnliftIO.STM import Util import XFTPClient -xftpServerTests :: Spec +xftpServerTests :: SpecWith XFTPTestServer xftpServerTests = before_ (createDirectoryIfMissing False xftpServerFiles) . after_ (removeDirectoryRecursive xftpServerFiles) $ do describe "XFTP file chunk delivery" $ do @@ -76,7 +76,7 @@ xftpServerTests = it "allowed with correct basic auth" $ testFileBasicAuth True (Just "pwd") (Just "pwd") True it "allowed with auth on server without auth" $ testFileBasicAuth True Nothing (Just "any") True it "should not change content for uploaded and committed files" testFileSkipCommitted - describe "XFTP SNI and CORS" $ do + describe "XFTP SNI and CORS" $ beforeWith (const (pure ())) $ do it "should select web certificate when SNI is used" testSNICertSelection it "should select XFTP certificate when SNI is not used" testNoSNICertSelection it "should add CORS headers when SNI is used" testCORSHeaders @@ -103,10 +103,10 @@ createTestChunk fp = do readChunk :: XFTPFileId -> IO ByteString readChunk sId = B.readFile (xftpServerFiles B.unpack (B64.encode $ unEntityId sId)) -testFileChunkDelivery :: Expectation +testFileChunkDelivery :: XFTPTestServer -> Expectation testFileChunkDelivery = xftpTest $ \c -> runRight_ $ runTestFileChunkDelivery c c -testFileChunkDelivery2 :: Expectation +testFileChunkDelivery2 :: XFTPTestServer -> Expectation testFileChunkDelivery2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkDelivery s r runTestFileChunkDelivery :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO () @@ -129,7 +129,7 @@ runTestFileChunkDelivery s r = do downloadXFTPChunk g r rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk1" chSize digest liftIO $ B.readFile "tests/tmp/received_chunk1" `shouldReturn` bytes -testFileChunkDeliveryAddRecipients :: Expectation +testFileChunkDeliveryAddRecipients :: XFTPTestServer -> Expectation testFileChunkDeliveryAddRecipients = xftpTest4 $ \s r1 r2 r3 -> runRight_ $ do g <- liftIO C.newRandom (sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g @@ -150,10 +150,10 @@ testFileChunkDeliveryAddRecipients = xftpTest4 $ \s r1 r2 r3 -> runRight_ $ do testReceiveChunk r2 rpKey2 rId2 "tests/tmp/received_chunk2" testReceiveChunk r3 rpKey3 rId3 "tests/tmp/received_chunk3" -testFileChunkDelete :: Expectation +testFileChunkDelete :: XFTPTestServer -> Expectation testFileChunkDelete = xftpTest $ \c -> runRight_ $ runTestFileChunkDelete c c -testFileChunkDelete2 :: Expectation +testFileChunkDelete2 :: XFTPTestServer -> Expectation testFileChunkDelete2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkDelete s r runTestFileChunkDelete :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO () @@ -179,10 +179,10 @@ runTestFileChunkDelete s r = do deleteXFTPChunk s spKey sId `catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH)) -testFileChunkAck :: Expectation +testFileChunkAck :: XFTPTestServer -> Expectation testFileChunkAck = xftpTest $ \c -> runRight_ $ runTestFileChunkAck c c -testFileChunkAck2 :: Expectation +testFileChunkAck2 :: XFTPTestServer -> Expectation testFileChunkAck2 = xftpTest2 $ \s r -> runRight_ $ runTestFileChunkAck s r runTestFileChunkAck :: XFTPClient -> XFTPClient -> ExceptT XFTPClientError IO () @@ -206,7 +206,7 @@ runTestFileChunkAck s r = do ackXFTPChunk r rpKey rId `catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH)) -testWrongChunkSize :: Expectation +testWrongChunkSize :: XFTPTestServer -> Expectation testWrongChunkSize = xftpTest $ \c -> do g <- C.newRandom (sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g @@ -218,8 +218,8 @@ testWrongChunkSize = xftpTest $ \c -> do void (createXFTPChunk c spKey file [rcvKey] Nothing) `catchError` (liftIO . (`shouldBe` PCEProtocolError SIZE)) -testFileChunkExpiration :: Expectation -testFileChunkExpiration = withXFTPServerCfg testXFTPServerConfig {fileExpiration} $ +testFileChunkExpiration :: XFTPTestServer -> Expectation +testFileChunkExpiration _ = withXFTPServerCfg testXFTPServerConfig {fileExpiration} $ \_ -> testXFTPClient $ \c -> runRight_ $ do g <- liftIO C.newRandom (sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g @@ -242,8 +242,8 @@ testFileChunkExpiration = withXFTPServerCfg testXFTPServerConfig {fileExpiration where fileExpiration = Just ExpirationConfig {ttl = 1, checkInterval = 1} -testInactiveClientExpiration :: Expectation -testInactiveClientExpiration = withXFTPServerCfg testXFTPServerConfig {inactiveClientExpiration} $ \_ -> runRight_ $ do +testInactiveClientExpiration :: XFTPTestServer -> Expectation +testInactiveClientExpiration _ = withXFTPServerCfg testXFTPServerConfig {inactiveClientExpiration} $ \_ -> runRight_ $ do disconnected <- newEmptyTMVarIO ts <- liftIO getCurrentTime c <- ExceptT $ getXFTPClient (1, testXFTPServer, Nothing) testXFTPClientConfig [] ts (\_ -> atomically $ putTMVar disconnected ()) @@ -258,8 +258,8 @@ testInactiveClientExpiration = withXFTPServerCfg testXFTPServerConfig {inactiveC where inactiveClientExpiration = Just ExpirationConfig {ttl = 1, checkInterval = 1} -testFileStorageQuota :: Expectation -testFileStorageQuota = withXFTPServerCfg testXFTPServerConfig {fileSizeQuota = Just $ chSize * 2} $ +testFileStorageQuota :: XFTPTestServer -> Expectation +testFileStorageQuota _ = withXFTPServerCfg testXFTPServerConfig {fileSizeQuota = Just $ chSize * 2} $ \_ -> testXFTPClient $ \c -> runRight_ $ do g <- liftIO C.newRandom (sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g @@ -286,8 +286,8 @@ testFileStorageQuota = withXFTPServerCfg testXFTPServerConfig {fileSizeQuota = J uploadXFTPChunk c spKey sId3 chunkSpec download rId3 -testFileLog :: Expectation -testFileLog = do +testFileLog :: XFTPTestServer -> Expectation +testFileLog _ = do g <- C.newRandom bytes <- liftIO $ createTestChunk testChunkPath (sndKey, spKey) <- atomically $ C.generateAuthKeyPair C.SEd25519 g @@ -378,8 +378,8 @@ testFileLog = do downloadXFTPChunk g c rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk1" chSize digest liftIO $ B.readFile "tests/tmp/received_chunk1" `shouldReturn` bytes -testFileBasicAuth :: Bool -> Maybe BasicAuth -> Maybe BasicAuth -> Bool -> IO () -testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success = +testFileBasicAuth :: Bool -> Maybe BasicAuth -> Maybe BasicAuth -> Bool -> XFTPTestServer -> IO () +testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success _ = withXFTPServerCfg testXFTPServerConfig {allowNewFiles, newFileBasicAuth} $ \_ -> testXFTPClient $ \c -> do g <- C.newRandom @@ -400,8 +400,8 @@ testFileBasicAuth allowNewFiles newFileBasicAuth clntAuth success = void (createXFTPChunk c spKey file [rcvKey] clntAuth) `catchError` (liftIO . (`shouldBe` PCEProtocolError AUTH)) -testFileSkipCommitted :: IO () -testFileSkipCommitted = +testFileSkipCommitted :: XFTPTestServer -> IO () +testFileSkipCommitted _ = withXFTPServerCfg testXFTPServerConfig $ \_ -> testXFTPClient $ \c -> do g <- C.newRandom @@ -598,3 +598,4 @@ testStaleWebSession = decoded <- either (error . show) pure $ C.unPad respBody decoded `shouldBe` smpEncode SESSION + diff --git a/tests/XFTPWebTests.hs b/tests/XFTPWebTests.hs index d948235244..c9a98eef1c 100644 --- a/tests/XFTPWebTests.hs +++ b/tests/XFTPWebTests.hs @@ -45,6 +45,7 @@ import System.Process (CreateProcess (..), StdStream (..), createProcess, proc, import Test.Hspec hiding (fit, it) import Util import Simplex.FileTransfer.Server.Env (XFTPServerConfig) +import Simplex.FileTransfer.Server.Store (STMFileStore) import XFTPClient (testXFTPServerConfigEd25519SNI, testXFTPServerConfigSNI, withXFTPServerCfg, xftpTestPort) import AgentTests.FunctionalAPITests (rfGet, runRight, runRight_, sfGet, withAgent) import Simplex.Messaging.Agent (AgentClient, xftpReceiveFile, xftpSendFile, xftpStartWorkers) @@ -2854,7 +2855,7 @@ tsIntegrationTests dbCleanup = describe "integration" $ it "cross-language: Haskell upload, TS download" $ haskellUploadTsDownloadTest testXFTPServerConfigSNI -webHandshakeTest :: XFTPServerConfig -> FilePath -> Expectation +webHandshakeTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation webHandshakeTest cfg caFile = do withXFTPServerCfg cfg $ \_ -> do Fingerprint fp <- loadFileFingerprint caFile @@ -2895,7 +2896,7 @@ webHandshakeTest cfg caFile = do <> jsOut "new Uint8Array([idOk ? 1 : 0, ack.length === 0 ? 1 : 0])" result `shouldBe` B.pack [1, 1] -pingTest :: XFTPServerConfig -> FilePath -> Expectation +pingTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation pingTest cfg caFile = do withXFTPServerCfg cfg $ \_ -> do Fingerprint fp <- loadFileFingerprint caFile @@ -2917,7 +2918,7 @@ pingTest cfg caFile = do <> jsOut "new Uint8Array([1])" result `shouldBe` B.pack [1] -fullRoundTripTest :: XFTPServerConfig -> FilePath -> Expectation +fullRoundTripTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation fullRoundTripTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" withXFTPServerCfg cfg $ \_ -> do @@ -2998,7 +2999,7 @@ agentURIRoundTripTest = do <> jsOut "new Uint8Array([match])" result `shouldBe` B.pack [1] -agentUploadDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +agentUploadDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation agentUploadDownloadTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" withXFTPServerCfg cfg $ \_ -> do @@ -3031,7 +3032,7 @@ agentUploadDownloadTest cfg caFile = do <> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])" result `shouldBe` B.pack [1, 1, 1] -agentDeleteTest :: XFTPServerConfig -> FilePath -> Expectation +agentDeleteTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation agentDeleteTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" withXFTPServerCfg cfg $ \_ -> do @@ -3063,7 +3064,7 @@ agentDeleteTest cfg caFile = do <> jsOut "new Uint8Array([deleted])" result `shouldBe` B.pack [1] -agentRedirectTest :: XFTPServerConfig -> FilePath -> Expectation +agentRedirectTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation agentRedirectTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" withXFTPServerCfg cfg $ \_ -> do @@ -3097,7 +3098,7 @@ agentRedirectTest cfg caFile = do <> jsOut "new Uint8Array([hasRedirect, nameMatch, sizeMatch, dataMatch])" result `shouldBe` B.pack [1, 1, 1, 1] -tsUploadHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadHaskellDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation tsUploadHaskellDownloadTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" createDirectoryIfMissing False recipientFiles @@ -3132,7 +3133,7 @@ tsUploadHaskellDownloadTest cfg caFile = do downloadedData <- B.readFile outPath downloadedData `shouldBe` originalData -tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig STMFileStore -> FilePath -> Expectation tsUploadRedirectHaskellDownloadTest cfg caFile = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" createDirectoryIfMissing False recipientFiles @@ -3167,7 +3168,7 @@ tsUploadRedirectHaskellDownloadTest cfg caFile = do downloadedData <- B.readFile outPath downloadedData `shouldBe` originalData -haskellUploadTsDownloadTest :: XFTPServerConfig -> Expectation +haskellUploadTsDownloadTest :: XFTPServerConfig STMFileStore -> Expectation haskellUploadTsDownloadTest cfg = do createDirectoryIfMissing False "tests/tmp/xftp-server-files" createDirectoryIfMissing False senderFiles diff --git a/tests/manual/README.md b/tests/manual/README.md new file mode 100644 index 0000000000..74ccc4d51d --- /dev/null +++ b/tests/manual/README.md @@ -0,0 +1,170 @@ +# XFTP Server Manual Test Suite + +Automated integration tests for the XFTP server covering memory and PostgreSQL backends, migration, persistence, blocking, and edge cases. + +- `xftp-test.py` — automated test script (143 checks) +- `xftp-server-testing.md` — manual step-by-step guide covering the same scenarios + +## Prerequisites + +- Linux (tested) +- Python 3 +- Haskell toolchain (`cabal`, `ghc`) +- PostgreSQL 16+ (`postgresql-16` package or equivalent) + +## Setup + +### 1. Build the XFTP binaries + +```bash +cabal build -fserver_postgres exe:xftp-server exe:xftp +``` + +### 2. Set up a local PostgreSQL instance + +The test script connects to PostgreSQL via `PGHOST` (Unix socket path). Set up a local instance that you own (no root required): + +```bash +# Pick a data directory and socket directory +export PGDATA=/tmp/pgdata +export PGHOST=/tmp/pgsocket + +# Clean up any previous instance +rm -rf $PGDATA $PGHOST +mkdir -p $PGDATA $PGHOST + +# Initialize the cluster +/usr/lib/postgresql/16/bin/initdb -D $PGDATA --auth=trust --no-locale --encoding=UTF8 + +# Configure to listen on our socket directory and localhost TCP +echo "unix_socket_directories = '$PGHOST'" >> $PGDATA/postgresql.conf +echo "listen_addresses = '127.0.0.1'" >> $PGDATA/postgresql.conf + +# Start the server +/usr/lib/postgresql/16/bin/pg_ctl -D $PGDATA -l /tmp/pg.log start + +# Verify it's running +pg_isready -h $PGHOST +# Expected: /tmp/pgsocket:5432 - accepting connections +``` + +### 3. Create the required PostgreSQL roles + +The test script expects three roles to exist: + +- `postgres` — admin role used by the test bracket to create/drop databases +- `xftp` — test user for the XFTP server database + +```bash +# Create the postgres admin role (if initdb created the cluster as your user) +psql -h $PGHOST -d postgres -c "CREATE USER postgres WITH SUPERUSER;" + +# Create the xftp test user +psql -h $PGHOST -U postgres -d postgres -c "CREATE USER xftp WITH SUPERUSER;" +``` + +Verify both roles exist: + +```bash +psql -h $PGHOST -U postgres -d postgres -c "\du" +``` + +## Run the test suite + +```bash +PGHOST=/tmp/pgsocket python3 tests/manual/xftp-test.py +``` + +Expected output (abbreviated): + +``` +XFTP server: /project/git/simplexmq-4/dist-newstyle/.../xftp-server +XFTP client: /project/git/simplexmq-4/dist-newstyle/.../xftp +Test dir: /project/git/simplexmq-4/xftp-test +PGHOST: /tmp/pgsocket + +=== 1. Basic send/receive (memory) === + [PASS] 1.1 rcv1.xftp created + ... +=== 12. Recipient cascade and storage accounting === + ... + [PASS] 12.2e DB files after delete (0) + +========================================== +Results: 143 passed, 0 failed +========================================== +``` + +Total runtime: ~3 minutes. Exit code 0 on success, 1 on any failure. + +## What the suite tests + +| # | Section | Checks | Scope | +|---|---------|--------|-------| +| 1 | Basic memory | 9 | Send/recv/delete on STM backend | +| 2 | Basic PostgreSQL | 7 | Send/recv/delete on PG backend, DB row verification | +| 3 | Migration memory → PG | 12 | Send on memory, partial recv, import, recv remaining | +| 4 | Migration PG → memory | 5 | Export, switch to memory, delete exported files | +| 4b | Send PG, recv memory | 7 | Reverse direction — send on PG, export, recv on memory | +| 5 | Restart persistence | 6 | memory+log / memory no log / PostgreSQL | +| 6 | Config edge cases | 15 | store log conflicts, missing schema, dual-write, import/export guards | +| 7 | File blocking | 13 | Control port block, block state survives migration both directions | +| 8 | Migration edge cases | 23 | Acked recipients preserved, deleted files absent, 20MB multi-chunk, double round-trip | +| 9 | Auth & access control | 9 | allowNewFiles, basic auth (none/wrong/correct/server-no-auth), quota | +| 10 | Control port ops | 8 | No auth, wrong auth, stats, delete, invalid block | +| 11 | Blocked sender delete | 3 | Sender can't delete blocked file | +| 12 | Cascade & storage | 8 | Recipient cascade, disk/DB accounting | + +## Troubleshooting + +### Server binary not found + +``` +Binary not found: .../xftp-server +Run: cabal build -fserver_postgres exe:xftp-server +``` + +Run the cabal build command from step 1. + +### Cannot connect to PostgreSQL + +``` +Cannot connect to PostgreSQL as postgres. Is it running? +``` + +Check: +1. `pg_isready -h $PGHOST` returns "accepting connections" +2. `PGHOST` environment variable is exported in the shell running the test +3. The `postgres` role exists: `psql -h $PGHOST -U postgres -d postgres -c "SELECT 1;"` + +### PostgreSQL user 'xftp' does not exist + +``` +PostgreSQL user 'xftp' does not exist. +Run: psql -U postgres -c "CREATE USER xftp WITH SUPERUSER;" +``` + +Run the create-user command from step 3. + +### Port 7921 or 15230 already in use + +The test uses port 7921 for XFTP and 15230 for the control port. If these are occupied, stop whatever is using them or edit `PORT` / `CONTROL_PORT` constants at the top of `xftp-test.py`. + +### Server fails to start mid-test + +Check `xftp-test/server.log` in the project directory for the server's stdout/stderr. The test framework prints the last 5 lines of the log on startup failure. + +## Stopping the test PostgreSQL instance + +```bash +/usr/lib/postgresql/16/bin/pg_ctl -D /tmp/pgdata stop +``` + +## Cleanup + +The test script cleans up its own test directory (`./xftp-test/`) and drops the test database (`xftp_server_store`) on completion. To also remove the PostgreSQL instance: + +```bash +/usr/lib/postgresql/16/bin/pg_ctl -D /tmp/pgdata stop +rm -rf /tmp/pgdata /tmp/pgsocket /tmp/pg.log +``` diff --git a/tests/manual/xftp-server-testing.md b/tests/manual/xftp-server-testing.md new file mode 100644 index 0000000000..6d72a0cf87 --- /dev/null +++ b/tests/manual/xftp-server-testing.md @@ -0,0 +1,1437 @@ +# XFTP Server Manual Testing Guide + +Manual testing of the XFTP server with memory (STM) and PostgreSQL backends, including migration between them, blocking, auth, quota, control port, and edge cases. + +All paths are self-contained under `./xftp-test/`. The automated version of this guide is `xftp-test.py` (143 checks). This guide mirrors the script 1:1. + +## Prerequisites + +See `tests/manual/README.md` for PostgreSQL setup. After setup, in the shell running this guide: + +```bash +cabal build -fserver_postgres exe:xftp-server exe:xftp + +export XFTP_SERVER=$(cabal list-bin exe:xftp-server) +export XFTP=$(cabal list-bin exe:xftp) +export TEST_DIR=$(pwd)/xftp-test +export XFTP_SERVER_CFG_PATH=$TEST_DIR/etc +export XFTP_SERVER_LOG_PATH=$TEST_DIR/var +export PGHOST=/tmp/pgsocket +``` + +PostgreSQL roles `postgres` and `xftp` (both SUPERUSER) must exist — see the README. + +Helper functions for editing the INI config: + +```bash +ini_set() { sed -i "s|^${1}:.*|${1}: ${2}|" $XFTP_SERVER_CFG_PATH/file-server.ini; } +ini_uncomment() { sed -i "s|^# ${1}:|${1}:|" $XFTP_SERVER_CFG_PATH/file-server.ini; } +ini_comment() { sed -i "s|^${1}:|# ${1}:|" $XFTP_SERVER_CFG_PATH/file-server.ini; } + +enable_control_port() { + sed -i 's/^# control_port: 5226/control_port: 15230/' $XFTP_SERVER_CFG_PATH/file-server.ini + sed -i 's/^# control_port_admin_password:.*/control_port_admin_password: testadmin/' $XFTP_SERVER_CFG_PATH/file-server.ini +} + +# Extract recipient IDs from a file description (chunk format: "- N:rcvId:privKey:digest") +get_recipient_ids() { + grep '^ *- [0-9]' "$1" | cut -d: -f2 +} + +# Send a command to the control port as admin and print the response +control_cmd() { + python3 -c " +import socket, time +s = socket.create_connection(('127.0.0.1', 15230), timeout=5) +s.settimeout(2) +# Drain welcome +time.sleep(0.3) +s.recv(4096) +s.sendall(b'auth testadmin\n') +time.sleep(0.3); s.recv(4096) +s.sendall(b'$1\n') +time.sleep(0.3) +print(s.recv(4096).decode().strip()) +s.sendall(b'quit\n'); s.close() +" +} +``` + +## Important notes + +- **`-y` on `recv`** auto-confirms, ACKs chunks on the server, and deletes the descriptor file. +- **`-y` on `del`** auto-confirms and deletes the sender descriptor. +- **`database import` and `database export`** prompt for confirmation. Answer uppercase **`Y`**. +- Server defaults to port 443 (requires root). All tests use port 7921. +- **`init` does not create the store log file.** It is created on first `server start` with `enable: on`. +- **`--confirm-migrations up`** auto-confirms PG schema migrations. +- With `store_files: database`, the PG schema must already exist — create manually or use `database import` which creates it automatically. + +## 1. Basic send/receive (memory backend) + +### 1.1 Initialize and start server + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 + +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$! +sleep 2 +``` + +### 1.2 Send a file with 2 recipients + +```bash +dd if=/dev/urandom of=$TEST_DIR/testfile.bin bs=1M count=5 2>/dev/null + +$XFTP send $TEST_DIR/testfile.bin $TEST_DIR/descriptions -s "$SRV" -n 2 -v + +ls $TEST_DIR/descriptions/testfile.bin.xftp/ +# Expected: rcv1.xftp rcv2.xftp snd.xftp.private +``` + +### 1.3 Receive the file (recipient 1) + +```bash +$XFTP recv $TEST_DIR/descriptions/testfile.bin.xftp/rcv1.xftp $TEST_DIR/received -y -v +diff $TEST_DIR/testfile.bin $TEST_DIR/received/testfile.bin +# Expected: no output + +ls $TEST_DIR/descriptions/testfile.bin.xftp/rcv1.xftp 2>&1 +# Expected: No such file or directory (deleted by -y) +``` + +### 1.4 Receive the file (recipient 2) + +```bash +rm -f $TEST_DIR/received/testfile.bin + +$XFTP recv $TEST_DIR/descriptions/testfile.bin.xftp/rcv2.xftp $TEST_DIR/received -y -v +diff $TEST_DIR/testfile.bin $TEST_DIR/received/testfile.bin +``` + +### 1.5 Delete the file from server + +```bash +$XFTP del $TEST_DIR/descriptions/testfile.bin.xftp/snd.xftp.private -y -v + +ls $TEST_DIR/descriptions/testfile.bin.xftp/snd.xftp.private 2>&1 +# Expected: No such file or directory + +ls $TEST_DIR/files/ | wc -l +# Expected: 0 +``` + +### 1.6 Stop server + +```bash +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 2. Basic send/receive (PostgreSQL backend) + +### 2.1 Initialize fresh server for PostgreSQL + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size + +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +``` + +### 2.2 Start server with PostgreSQL + +```bash +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$! +sleep 2 +``` + +### 2.3 Send, receive, verify + +```bash +dd if=/dev/urandom of=$TEST_DIR/testfile.bin bs=1M count=5 2>/dev/null + +$XFTP send $TEST_DIR/testfile.bin $TEST_DIR/descriptions -s "$SRV" -n 2 -v +$XFTP recv $TEST_DIR/descriptions/testfile.bin.xftp/rcv1.xftp $TEST_DIR/received -y -v +diff $TEST_DIR/testfile.bin $TEST_DIR/received/testfile.bin +``` + +### 2.4 Verify data is in PostgreSQL + +```bash +psql -U xftp -d xftp_server_store \ + -c "SET search_path TO xftp_server; SELECT count(*) AS files FROM files;" +# Expected: > 0 + +psql -U xftp -d xftp_server_store \ + -c "SET search_path TO xftp_server; SELECT count(*) AS recipients FROM recipients;" +# Expected: > 0 +``` + +### 2.5 Delete and verify all cleaned up + +```bash +$XFTP del $TEST_DIR/descriptions/testfile.bin.xftp/snd.xftp.private -y -v + +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: 0 +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM recipients;" +# Expected: 0 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 3. Migration: memory to PostgreSQL + +### 3.1 Start with memory backend, send files + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/fileA.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/fileA.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +dd if=/dev/urandom of=$TEST_DIR/fileB.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/fileB.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +# Partially receive fileB (only rcv1) +$XFTP recv $TEST_DIR/descriptions/fileB.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/fileB.bin $TEST_DIR/received/fileB.bin +``` + +### 3.2 Stop server and migrate to PostgreSQL + +```bash +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size + +echo Y | $XFTP_SERVER database import +# Expected: "Loaded N files, M recipients" / "Imported N files" / "Imported M recipients" +# "Store log renamed to ...file-server-store.log.bak" + +ls $XFTP_SERVER_LOG_PATH/file-server-store.log.bak # should exist +ls $XFTP_SERVER_LOG_PATH/file-server-store.log 2>&1 # should NOT exist + +psql -U xftp -d xftp_server_store <<'SQL' +SET search_path TO xftp_server; +SELECT count(*) AS file_count FROM files; +SELECT count(*) AS recipient_count FROM recipients; +SQL +``` + +### 3.3 Start server with PostgreSQL and receive remaining files + +```bash +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/fileA.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/fileA.bin $TEST_DIR/received/fileA.bin + +$XFTP recv $TEST_DIR/descriptions/fileA.bin.xftp/rcv2.xftp $TEST_DIR/received -y + +rm -f $TEST_DIR/received/fileB.bin +$XFTP recv $TEST_DIR/descriptions/fileB.bin.xftp/rcv2.xftp $TEST_DIR/received -y +diff $TEST_DIR/fileB.bin $TEST_DIR/received/fileB.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 4. Migration: PostgreSQL back to memory + +Continues from section 3 state. + +### 4.1 Export from PostgreSQL + +```bash +echo Y | $XFTP_SERVER database export +ls $XFTP_SERVER_LOG_PATH/file-server-store.log # should exist +head -5 $XFTP_SERVER_LOG_PATH/file-server-store.log +# Should contain FNEW, FADD, FPUT entries +``` + +### 4.2 Switch back to memory and start + +```bash +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 +``` + +### 4.3 Verify deletes work on round-trip data + +```bash +$XFTP del $TEST_DIR/descriptions/fileA.bin.xftp/snd.xftp.private -y +$XFTP del $TEST_DIR/descriptions/fileB.bin.xftp/snd.xftp.private -y + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 4b. Send on PostgreSQL, export, receive on memory + +### 4b.1 Start PG server and send a file + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/pgfileA.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/pgfileA.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +# Receive rcv1 on PG +$XFTP recv $TEST_DIR/descriptions/pgfileA.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/pgfileA.bin $TEST_DIR/received/pgfileA.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 +``` + +### 4b.2 Export and switch to memory + +```bash +echo Y | $XFTP_SERVER database export + +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 +``` + +### 4b.3 Receive remaining file on memory backend + +```bash +rm -f $TEST_DIR/received/pgfileA.bin +$XFTP recv $TEST_DIR/descriptions/pgfileA.bin.xftp/rcv2.xftp $TEST_DIR/received -y +diff $TEST_DIR/pgfileA.bin $TEST_DIR/received/pgfileA.bin + +$XFTP del $TEST_DIR/descriptions/pgfileA.bin.xftp/snd.xftp.private -y + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 5. Server restart persistence + +### 5.1 Memory backend with store log + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/persist.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/persist.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/persist.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/persist.bin $TEST_DIR/received/persist.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 5.2 Memory backend WITHOUT store log + +```bash +rm -rf $TEST_DIR/descriptions/* $TEST_DIR/received/* +ini_set enable off + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/persist2.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/persist2.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/persist2.bin.xftp/rcv1.xftp $TEST_DIR/received -y 2>&1 +# Expected: CLIError "PCEProtocolError AUTH" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 5.3 PostgreSQL backend + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/persist.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/persist.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/persist.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/persist.bin $TEST_DIR/received/persist.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 6. Edge cases + +### 6.1 Receive after server-side delete + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/deltest.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/deltest.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +$XFTP del $TEST_DIR/descriptions/deltest.bin.xftp/snd.xftp.private -y + +$XFTP recv $TEST_DIR/descriptions/deltest.bin.xftp/rcv2.xftp $TEST_DIR/received -y 2>&1 +# Expected: AUTH error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 6.2 Multiple recipients and partial acknowledgment + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/multi.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/multi.bin $TEST_DIR/descriptions -s "$SRV" -n 3 + +$XFTP recv $TEST_DIR/descriptions/multi.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/multi.bin $TEST_DIR/received/multi.bin + +rm -f $TEST_DIR/received/multi.bin +$XFTP recv $TEST_DIR/descriptions/multi.bin.xftp/rcv2.xftp $TEST_DIR/received -y +diff $TEST_DIR/multi.bin $TEST_DIR/received/multi.bin + +rm -f $TEST_DIR/received/multi.bin +$XFTP recv $TEST_DIR/descriptions/multi.bin.xftp/rcv3.xftp $TEST_DIR/received -y +diff $TEST_DIR/multi.bin $TEST_DIR/received/multi.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 6.3 Switching to database mode with existing store log (should fail) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +# Run memory mode to create a store log +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 +dd if=/dev/urandom of=$TEST_DIR/dummy.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/dummy.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +ls $XFTP_SERVER_LOG_PATH/file-server-store.log # should exist + +# Switch to DB mode without importing +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" + +$XFTP_SERVER start --confirm-migrations up 2>&1 +# Expected error: +# Error: store log file .../file-server-store.log exists but store_files is `database`. +# Use `file-server database import` to migrate, or set `db_store_log: on`. +``` + +### 6.4 Database mode without schema (should fail) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size + +# Do NOT create the schema +$XFTP_SERVER start --confirm-migrations up 2>&1 +# Expected error: +# connectPostgresStore, schema xftp_server does not exist, exiting. +``` + +### 6.5 Dual-write mode: database + db_store_log: on + +Verifies that writes in dual-write mode land in BOTH the DB and the store log. + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +ini_uncomment db_store_log +ini_set db_store_log on +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +# Send a new file in dual-write mode +dd if=/dev/urandom of=$TEST_DIR/dual.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/dual.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Both the store log AND the DB must have entries +ls -la $XFTP_SERVER_LOG_PATH/file-server-store.log # size > 0 +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: > 0 + +# Now switch to memory-only and verify the file is accessible (proves store log has valid data) +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size +ini_comment db_store_log + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/dual.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/dual.bin $TEST_DIR/received/dual.bin +echo "Dual-write mode verified: same file accessible from DB and from store log" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 6.6 Import to non-empty database (should fail) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/dummy.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/dummy.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null + +# Export produces a real valid store log, then re-import into non-empty DB +echo Y | $XFTP_SERVER database export +echo Y | $XFTP_SERVER database import 2>&1 +# Expected: import fails because DB is not empty +``` + +### 6.7 Import without store log file (should fail) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size + +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +echo Y | $XFTP_SERVER database import 2>&1 +# Expected: Error: store log file ... does not exist. +``` + +### 6.8 Export when store log already exists (should fail) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/exp.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/exp.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null + +echo "existing" > $XFTP_SERVER_LOG_PATH/file-server-store.log +echo Y | $XFTP_SERVER database export 2>&1 +# Expected: Error: store log file ... already exists. +``` + +## 7. File blocking via control port + +### 7.1 Block a file, verify receive fails + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +enable_control_port +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/blockme.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/blockme.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +# Extract the first recipient ID from the descriptor +RCV_ID=$(get_recipient_ids $TEST_DIR/descriptions/blockme.bin.xftp/rcv1.xftp | head -1) +echo "Blocking recipient ID: $RCV_ID" + +control_cmd "block $RCV_ID reason=spam" +# Expected: ok + +$XFTP recv $TEST_DIR/descriptions/blockme.bin.xftp/rcv1.xftp $TEST_DIR/received -y 2>&1 +# Expected: CLIError "PCEProtocolError (BLOCKED {blockInfo = BlockingInfo {reason = BRSpam, ...}})" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 7.2 Blocked file survives memory -> PG migration + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +enable_control_port +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/blockmigrate.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/blockmigrate.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +RCV_ID=$(get_recipient_ids $TEST_DIR/descriptions/blockmigrate.bin.xftp/rcv1.xftp | head -1) +control_cmd "block $RCV_ID reason=content" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Migrate to PG +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/blockmigrate.bin.xftp/rcv1.xftp $TEST_DIR/received -y 2>&1 +# Expected: BLOCKED error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 7.3 Blocked file survives PG -> memory export + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +enable_control_port +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/blockpg.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/blockpg.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +RCV_ID=$(get_recipient_ids $TEST_DIR/descriptions/blockpg.bin.xftp/rcv1.xftp | head -1) +control_cmd "block $RCV_ID reason=spam" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +echo Y | $XFTP_SERVER database export +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/blockpg.bin.xftp/rcv1.xftp $TEST_DIR/received -y 2>&1 +# Expected: BLOCKED error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 8. Migration edge cases + +### 8.1 Acked recipient preserved after memory -> PG migration + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/acktest.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/acktest.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +# BACKUP rcv1 descriptor before recv (recv -y deletes it) +cp $TEST_DIR/descriptions/acktest.bin.xftp/rcv1.xftp $TEST_DIR/rcv1_backup.xftp + +# Recv rcv1 (acks it server-side, deletes descriptor) +$XFTP recv $TEST_DIR/descriptions/acktest.bin.xftp/rcv1.xftp $TEST_DIR/received -y + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Migrate to PG +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +# Acked rcv1 MUST fail (recipient removed by ack, preserved through migration) +$XFTP recv $TEST_DIR/rcv1_backup.xftp $TEST_DIR/received -y 2>&1 +# Expected: AUTH error + +# Unacked rcv2 MUST work +rm -f $TEST_DIR/received/acktest.bin +$XFTP recv $TEST_DIR/descriptions/acktest.bin.xftp/rcv2.xftp $TEST_DIR/received -y +diff $TEST_DIR/acktest.bin $TEST_DIR/received/acktest.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 8.2 Acked recipient preserved after PG -> memory export + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/ackpg.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/ackpg.bin $TEST_DIR/descriptions -s "$SRV" -n 2 + +cp $TEST_DIR/descriptions/ackpg.bin.xftp/rcv1.xftp $TEST_DIR/rcv1_backup.xftp +$XFTP recv $TEST_DIR/descriptions/ackpg.bin.xftp/rcv1.xftp $TEST_DIR/received -y + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +echo Y | $XFTP_SERVER database export + +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/rcv1_backup.xftp $TEST_DIR/received -y 2>&1 +# Expected: AUTH error + +rm -f $TEST_DIR/received/ackpg.bin +$XFTP recv $TEST_DIR/descriptions/ackpg.bin.xftp/rcv2.xftp $TEST_DIR/received -y +diff $TEST_DIR/ackpg.bin $TEST_DIR/received/ackpg.bin + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 8.3 Deleted file absent after migration (positive + negative control) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +# File to be deleted (use n=2, backup rcv2 before delete) +dd if=/dev/urandom of=$TEST_DIR/delmigrate.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/delmigrate.bin $TEST_DIR/descriptions -s "$SRV" -n 2 +cp $TEST_DIR/descriptions/delmigrate.bin.xftp/rcv2.xftp $TEST_DIR/rcv2_del_backup.xftp + +$XFTP recv $TEST_DIR/descriptions/delmigrate.bin.xftp/rcv1.xftp $TEST_DIR/received -y +$XFTP del $TEST_DIR/descriptions/delmigrate.bin.xftp/snd.xftp.private -y + +# Positive control: a file that is NOT deleted +dd if=/dev/urandom of=$TEST_DIR/keepmigrate.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/keepmigrate.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import + +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: > 0 (kept file imported) + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +# Positive: kept file MUST be receivable +$XFTP recv $TEST_DIR/descriptions/keepmigrate.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/keepmigrate.bin $TEST_DIR/received/keepmigrate.bin + +# Negative: deleted file's rcv2 MUST return AUTH +$XFTP recv $TEST_DIR/rcv2_del_backup.xftp $TEST_DIR/received -y 2>&1 +# Expected: AUTH error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 8.4 Large multi-chunk file integrity after migration + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/largefile.bin bs=1M count=20 2>/dev/null +$XFTP send $TEST_DIR/largefile.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/largefile.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/largefile.bin $TEST_DIR/received/largefile.bin +echo "20MB multi-chunk integrity preserved" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 8.5 Double round-trip: memory -> PG -> memory -> PG + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 +dd if=/dev/urandom of=$TEST_DIR/roundtrip.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/roundtrip.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Round 1: memory -> PG +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Round 1: PG -> memory +echo Y | $XFTP_SERVER database export +ini_set store_files memory +ini_comment db_connection +ini_comment db_schema +ini_comment db_pool_size +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null; sleep 1 + +# Round 2: memory -> PG +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +echo Y | $XFTP_SERVER database import + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +$XFTP recv $TEST_DIR/descriptions/roundtrip.bin.xftp/rcv1.xftp $TEST_DIR/received -y +diff $TEST_DIR/roundtrip.bin $TEST_DIR/received/roundtrip.bin +echo "File intact after memory->PG->memory->PG double round-trip" + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 9. Auth and access control + +### 9.1 allowNewFiles=false rejects upload + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set new_files off +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/reject.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/reject.bin $TEST_DIR/descriptions -s "$SRV" -n 1 2>&1 +# Expected: upload fails + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.2 Basic auth: no password → fails with AUTH + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +sed -i 's/^# create_password:.*$/create_password: secret123/' $XFTP_SERVER_CFG_PATH/file-server.ini +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/authtest.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/authtest.bin $TEST_DIR/descriptions -s "$SRV" -n 1 2>&1 +# Expected: AUTH error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.3 Basic auth: wrong password → PCEProtocolError AUTH + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +sed -i 's/^# create_password:.*$/create_password: secret123/' $XFTP_SERVER_CFG_PATH/file-server.ini +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +WRONG_SRV="xftp://$FP:wrongpass@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/authtest.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/authtest.bin $TEST_DIR/descriptions -s "$WRONG_SRV" -n 1 2>&1 +# Expected: "PCEProtocolError AUTH" in output +ls $TEST_DIR/descriptions/authtest.bin.xftp/rcv1.xftp 2>&1 +# Expected: No such file or directory (no descriptor created) + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.4 Basic auth: correct password → succeeds + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +sed -i 's/^# create_password:.*$/create_password: secret123/' $XFTP_SERVER_CFG_PATH/file-server.ini +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +CORRECT_SRV="xftp://$FP:secret123@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/authok.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/authok.bin $TEST_DIR/descriptions -s "$CORRECT_SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.5 Server without auth, client sends auth → succeeds + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +AUTH_SRV="xftp://$FP:anypass@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/noauth.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/noauth.bin $TEST_DIR/descriptions -s "$AUTH_SRV" -n 1 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.6 Storage quota boundary + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 3mb --ip 127.0.0.1 +ini_set port 7921 +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/quota1.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/quota1.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +dd if=/dev/urandom of=$TEST_DIR/quota2.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/quota2.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +dd if=/dev/urandom of=$TEST_DIR/quota3.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/quota3.bin $TEST_DIR/descriptions -s "$SRV" -n 1 2>&1 +# Expected: QUOTA error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +### 9.7 File expiration + +File expiration is not testable in a fast manual test because `createdAt` uses hour-level precision (`fileTimePrecision = 3600s`) and the check interval is hardcoded at 2 hours. It is tested in the Haskell test suite (`testFileChunkExpiration` with a 1-second TTL). + +## 10. Control port operations + +### 10.1 Command without auth → AUTH + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +enable_control_port +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/ctrldel.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/ctrldel.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +RCV_ID=$(get_recipient_ids $TEST_DIR/descriptions/ctrldel.bin.xftp/rcv1.xftp | head -1) + +# No auth +python3 -c " +import socket, time +s = socket.create_connection(('127.0.0.1', 15230), timeout=5) +s.settimeout(2) +time.sleep(0.3); s.recv(4096) +s.sendall(b'delete $RCV_ID\n') +time.sleep(0.3) +print(s.recv(4096).decode().strip()) +s.sendall(b'quit\n'); s.close() +" +# Expected: AUTH +``` + +### 10.2 Wrong password → CPRNone, commands return AUTH + +```bash +python3 -c " +import socket, time +s = socket.create_connection(('127.0.0.1', 15230), timeout=5) +s.settimeout(2) +time.sleep(0.3); s.recv(4096) +s.sendall(b'auth wrongpassword\n') +time.sleep(0.3) +print('auth:', s.recv(4096).decode().strip()) +# Expected: Current role is CPRNone +s.sendall(b'delete $RCV_ID\n') +time.sleep(0.3) +print('delete:', s.recv(4096).decode().strip()) +# Expected: AUTH +s.sendall(b'quit\n'); s.close() +" +``` + +### 10.3 stats-rts responds + +```bash +control_cmd "stats-rts" +# Expected: either GHC RTS stats or "unsupported operation (GHC.Stats.getRTSStats: ...)" +``` + +### 10.4 Delete command removes file + +```bash +control_cmd "delete $RCV_ID" +# Expected: ok + +$XFTP recv $TEST_DIR/descriptions/ctrldel.bin.xftp/rcv1.xftp $TEST_DIR/received -y 2>&1 +# Expected: AUTH error +``` + +### 10.5 Invalid block reason → error: + +```bash +dd if=/dev/urandom of=$TEST_DIR/badblock.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/badblock.bin $TEST_DIR/descriptions -s "$SRV" -n 1 +RCV_ID2=$(get_recipient_ids $TEST_DIR/descriptions/badblock.bin.xftp/rcv1.xftp | head -1) + +control_cmd "block $RCV_ID2 reason=invalid_reason" +# Expected: error:... + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 11. Blocked file: sender cannot delete + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +enable_control_port +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +$XFTP_SERVER start & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/blockdel.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/blockdel.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +RCV_ID=$(get_recipient_ids $TEST_DIR/descriptions/blockdel.bin.xftp/rcv1.xftp | head -1) +control_cmd "block $RCV_ID reason=spam" + +# Sender delete should fail with BLOCKED +$XFTP del $TEST_DIR/descriptions/blockdel.bin.xftp/snd.xftp.private -y 2>&1 +# Expected: BLOCKED error + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## 12. Recipient cascade and storage accounting + +### 12.1 Recipient cascade delete (PG) + +```bash +rm -rf $TEST_DIR +mkdir -p $TEST_DIR/{files,descriptions,received} +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +psql -U postgres -c "CREATE DATABASE xftp_server_store OWNER xftp;" + +$XFTP_SERVER init -p $TEST_DIR/files -q 10gb --ip 127.0.0.1 +ini_set port 7921 +ini_set store_files database +ini_uncomment db_connection +ini_uncomment db_schema +ini_uncomment db_pool_size +FP=$(cat $XFTP_SERVER_CFG_PATH/fingerprint) +SRV="xftp://$FP@127.0.0.1:7921" + +psql -U xftp -d xftp_server_store -c "CREATE SCHEMA IF NOT EXISTS xftp_server;" +rm -f $XFTP_SERVER_LOG_PATH/file-server-store.log + +$XFTP_SERVER start --confirm-migrations up & +SERVER_PID=$!; sleep 2 + +dd if=/dev/urandom of=$TEST_DIR/cascade.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/cascade.bin $TEST_DIR/descriptions -s "$SRV" -n 3 + +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: > 0 +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM recipients;" +# Expected: > 0 + +$XFTP del $TEST_DIR/descriptions/cascade.bin.xftp/snd.xftp.private -y + +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: 0 +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM recipients;" +# Expected: 0 (cascade delete) +``` + +### 12.2 Storage accounting + +```bash +dd if=/dev/urandom of=$TEST_DIR/stor1.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/stor1.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +dd if=/dev/urandom of=$TEST_DIR/stor2.bin bs=1M count=1 2>/dev/null +$XFTP send $TEST_DIR/stor2.bin $TEST_DIR/descriptions -s "$SRV" -n 1 + +ls $TEST_DIR/files/ | wc -l +# Expected: > 0 + +$XFTP del $TEST_DIR/descriptions/stor1.bin.xftp/snd.xftp.private -y +$XFTP del $TEST_DIR/descriptions/stor2.bin.xftp/snd.xftp.private -y + +ls $TEST_DIR/files/ | wc -l +# Expected: 0 + +psql -U xftp -d xftp_server_store -c "SET search_path TO xftp_server; SELECT count(*) FROM files;" +# Expected: 0 + +kill $SERVER_PID; wait $SERVER_PID 2>/dev/null +``` + +## Cleanup + +```bash +kill $SERVER_PID 2>/dev/null; wait $SERVER_PID 2>/dev/null +rm -rf $TEST_DIR +psql -U postgres -c "DROP DATABASE IF EXISTS xftp_server_store;" +``` + +## Summary of expected results + +| # | Scenario | Expected | +|---|----------|----------| +| 1 | Send/receive on memory | OK | +| 2 | Send/receive on PostgreSQL | OK (DB rows match) | +| 3 | Memory → PG, receive remaining | OK | +| 4 | PG → memory, delete round-trip | OK | +| 4b | Send PG, export, receive on memory | OK | +| 5.1 | Restart persistence (memory + log) | OK | +| 5.2 | Restart persistence (memory, no log) | AUTH error | +| 5.3 | Restart persistence (PostgreSQL) | OK | +| 6.1 | Receive after server delete | AUTH error | +| 6.2 | Multiple recipients (n=3) | All work | +| 6.3 | DB mode + existing store log | Server refuses | +| 6.4 | DB mode + no schema | Server fails | +| 6.5 | Dual-write (db_store_log: on) | Both DB and log have data | +| 6.6 | Import to non-empty DB | Error | +| 6.7 | Import without store log | Error | +| 6.8 | Export when store log exists | Error | +| 7.1 | Block file, receive fails | BLOCKED (not AUTH) | +| 7.2 | Block survives memory → PG | BLOCKED | +| 7.3 | Block survives PG → memory | BLOCKED | +| 8.1 | Acked rcv1 fails, rcv2 works (memory → PG) | AUTH + OK | +| 8.2 | Acked rcv1 fails, rcv2 works (PG → memory) | AUTH + OK | +| 8.3 | Deleted file absent, kept file present | rcv2_del=AUTH, kept=OK | +| 8.4 | Large 20MB multi-chunk migration | Integrity preserved | +| 8.5 | Double round-trip memory↔PG | Intact | +| 9.1 | new_files=off | Upload rejected | +| 9.2 | Basic auth, no password | AUTH | +| 9.3 | Basic auth, wrong password | PCEProtocolError AUTH | +| 9.4 | Basic auth, correct password | OK | +| 9.5 | No server auth, client sends auth | OK | +| 9.6 | Quota boundary | 3rd file QUOTA error | +| 10.1 | Control port, no auth | AUTH | +| 10.2 | Control port, wrong password | CPRNone → AUTH | +| 10.3 | stats-rts | Responds | +| 10.4 | Control port delete | ok → recv AUTH | +| 10.5 | Invalid block reason | error: | +| 11 | Blocked file, sender delete | BLOCKED | +| 12.1 | Recipient cascade delete (PG) | files=0, recipients=0 | +| 12.2 | Storage accounting | disk=0, DB=0 | diff --git a/tests/manual/xftp-test.py b/tests/manual/xftp-test.py new file mode 100644 index 0000000000..b20b4f3b2e --- /dev/null +++ b/tests/manual/xftp-test.py @@ -0,0 +1,1443 @@ +#!/usr/bin/env python3 +""" +Automated XFTP server test suite. +Tests memory and PostgreSQL backends, migration, persistence, and edge cases. + +Prerequisites: + cabal build -fserver_postgres exe:xftp-server exe:xftp + PostgreSQL running (set PGHOST if non-default socket) + User 'xftp' with SUPERUSER must exist: + psql -U postgres -c "CREATE USER xftp WITH SUPERUSER;" + +Usage: + python3 tests/manual/xftp-test.py + PGHOST=/tmp/pgsocket python3 tests/manual/xftp-test.py +""" + +import os +import re +import shutil +import signal +import socket +import subprocess +import sys +import time +import traceback +from pathlib import Path + + +# --- Configuration --- + +PORT = "7921" +DB_NAME = "xftp_server_store" +DB_USER = "xftp" +DB_SCHEMA = "xftp_server" +PG_ADMIN_USER = "postgres" + + +# --- State --- + +PASS = 0 +FAIL = 0 +server_proc = None + + +# --- Helpers --- + +def run(cmd, *, check=True, input=None, timeout=30): + """Run a command, return CompletedProcess.""" + r = subprocess.run( + cmd, shell=isinstance(cmd, str), + capture_output=True, text=True, + input=input, timeout=timeout, + ) + if check and r.returncode != 0: + raise subprocess.CalledProcessError(r.returncode, cmd, r.stdout, r.stderr) + return r + + +def cabal_bin(name): + r = run(f"cabal list-bin exe:{name}") + p = r.stdout.strip() + if not os.path.isfile(p): + sys.exit(f"Binary not found: {p}\nRun: cabal build -fserver_postgres exe:{name}") + return p + + +def psql(sql, *, user=PG_ADMIN_USER, db="postgres", check=True): + return run(["psql", "-U", user, "-d", db, "-t", "-A", "-c", sql], check=check) + + +def db_count(table): + r = psql(f"SET search_path TO {DB_SCHEMA}; SELECT count(*) FROM {table};", + user=DB_USER, db=DB_NAME, check=False) + if r.returncode != 0: + return -1 + # psql -t -A output includes "SET" line from SET search_path, take the last line + lines = [l.strip() for l in r.stdout.strip().split("\n") if l.strip() and l.strip() != "SET"] + return int(lines[-1]) if lines else -1 + + +def pass_(desc): + global PASS + PASS += 1 + print(f" [PASS] {desc}") + + +def fail_(desc): + global FAIL + FAIL += 1 + print(f" [FAIL] {desc}") + + +def check(desc, condition): + if condition: + pass_(desc) + else: + fail_(desc) + + + +# --- INI helpers --- + +def ini_set(key, value): + ini = ini_path() + txt = ini.read_text() + new_txt, n = re.subn(rf"^{re.escape(key)}:.*$", f"{key}: {value}", txt, flags=re.MULTILINE) + assert n > 0, f"ini_set: key '{key}' not found in {ini}" + ini.write_text(new_txt) + + +def ini_uncomment(key): + ini = ini_path() + txt = ini.read_text() + new_txt, n = re.subn(rf"^# {re.escape(key)}:", f"{key}:", txt, flags=re.MULTILINE) + assert n > 0, f"ini_uncomment: commented key '# {key}' not found in {ini}" + ini.write_text(new_txt) + + +def ini_comment(key): + ini = ini_path() + txt = ini.read_text() + new_txt, n = re.subn(rf"^{re.escape(key)}:", f"# {key}:", txt, flags=re.MULTILINE) + assert n > 0, f"ini_comment: key '{key}' not found in {ini}" + ini.write_text(new_txt) + + +def ini_path(): + return Path(os.environ["XFTP_SERVER_CFG_PATH"]) / "file-server.ini" + + +# --- Server management --- + +def init_server(quota="10gb"): + run([XFTP_SERVER, "init", "-p", str(test_dir / "files"), "-q", quota, "--ip", "127.0.0.1"]) + ini_set("port", PORT) + fp = (Path(os.environ["XFTP_SERVER_CFG_PATH"]) / "fingerprint").read_text().strip() + return f"xftp://{fp}@127.0.0.1:{PORT}" + + +_server_log_fh = None + +def start_server(*extra_args): + global server_proc, _server_log_fh + stop_server() + log_path = test_dir / "server.log" + _server_log_fh = open(log_path, "w") + server_proc = subprocess.Popen( + [XFTP_SERVER, "start"] + list(extra_args), + stdout=_server_log_fh, + stderr=subprocess.STDOUT, + ) + time.sleep(2) + if server_proc.poll() is not None: + _server_log_fh.close() + _server_log_fh = None + log = log_path.read_text() + print(f" [ERROR] Server exited with code {server_proc.returncode}") + for line in log.strip().split("\n")[-5:]: + print(f" {line}") + return False + return True + + +def stop_server(): + global server_proc, _server_log_fh + if server_proc and server_proc.poll() is None: + server_proc.send_signal(signal.SIGTERM) + try: + server_proc.wait(timeout=5) + except subprocess.TimeoutExpired: + server_proc.kill() + server_proc.wait() + server_proc = None + if _server_log_fh: + _server_log_fh.close() + _server_log_fh = None + time.sleep(0.5) + + +def clean_test_dir(): + stop_server() + if test_dir.exists(): + shutil.rmtree(test_dir) + (test_dir / "files").mkdir(parents=True) + (test_dir / "descriptions").mkdir() + (test_dir / "received").mkdir() + + +def clean_db(): + psql(f"DROP DATABASE IF EXISTS {DB_NAME};") + psql(f"CREATE DATABASE {DB_NAME} OWNER {DB_USER};") + + +def enable_db_mode(): + ini_set("store_files", "database") + ini_uncomment("db_connection") + ini_uncomment("db_schema") + ini_uncomment("db_pool_size") + + +def disable_db_mode(): + ini_set("store_files", "memory") + ini_comment("db_connection") + ini_comment("db_schema") + ini_comment("db_pool_size") + + +# --- File operations --- + +def make_file(name, size_mb=1): + path = test_dir / name + with open(path, "wb") as f: + f.write(os.urandom(size_mb * 1024 * 1024)) + return path + + +def desc_dir(name): + return test_dir / "descriptions" / f"{name}.xftp" + + +def send_file(src, n=1): + return run([XFTP, "send", str(src), str(test_dir / "descriptions"), + "-s", srv, "-n", str(n)], check=False, timeout=60) + + +def recv_file(desc_path): + return run([XFTP, "recv", str(desc_path), str(test_dir / "received"), "-y"], + check=False, timeout=60) + + +def del_file(desc_path): + return run([XFTP, "del", str(desc_path), "-y"], check=False, timeout=30) + + +def files_match(a, b): + """Compare two files byte-for-byte. Both must exist.""" + a, b = Path(a), Path(b) + if not a.exists() or not b.exists(): + return False + return a.read_bytes() == b.read_bytes() + + +def db_import(): + return run([XFTP_SERVER, "database", "import"], input="Y\n", check=False, timeout=30) + + +def db_export(): + return run([XFTP_SERVER, "database", "export"], input="Y\n", check=False, timeout=30) + + +def create_schema(): + """Create the xftp_server schema so the server can start on a fresh DB.""" + psql(f"CREATE SCHEMA IF NOT EXISTS {DB_SCHEMA};", user=DB_USER, db=DB_NAME) + + +CONTROL_PORT = "15230" +CONTROL_PASSWORD = "testadmin" + + +def enable_control_port(): + ini = ini_path() + txt = ini.read_text() + txt, n1 = re.subn(r"^# control_port:.*$", f"control_port: {CONTROL_PORT}", txt, flags=re.MULTILINE) + txt, n2 = re.subn(r"^# control_port_admin_password:.*$", + f"control_port_admin_password: {CONTROL_PASSWORD}", txt, flags=re.MULTILINE) + assert n1 > 0, "enable_control_port: '# control_port' not found in INI" + assert n2 > 0, "enable_control_port: '# control_port_admin_password' not found in INI" + ini.write_text(txt) + + +def control_recv(s): + """Receive all available data from control port (drains buffer).""" + time.sleep(0.3) + chunks = [] + s.settimeout(0.5) + while True: + try: + data = s.recv(4096) + if not data: + break + chunks.append(data) + except socket.timeout: + break + return b"".join(chunks).decode().strip() + + +def control_send_recv(s, cmd): + """Send a command and receive the response line.""" + s.sendall(f"{cmd}\n".encode()) + return control_recv(s) + + +def control_connect(): + """Connect to control port, drain welcome banner, return socket.""" + s = socket.create_connection(("127.0.0.1", int(CONTROL_PORT)), timeout=5) + try: + control_recv(s) # drain welcome banner (2 lines) + except Exception: + s.close() + raise + return s + + +def control_cmd(cmd, *, auth=True): + """Send a command to the server control port, return the response. + If auth=True, authenticates as admin first and verifies the role.""" + s = control_connect() + try: + if auth: + auth_resp = control_send_recv(s, f"auth {CONTROL_PASSWORD}") + assert auth_resp == "Current role is CPRAdmin", \ + f"control_cmd: auth failed, got: {auth_resp!r}" + return control_send_recv(s, cmd) + finally: + try: + s.sendall(b"quit\n") + except OSError: + pass + s.close() + + +def get_recipient_ids(desc_path): + """Extract recipient IDs from a file description (.xftp file).""" + text = Path(desc_path).read_text() + ids = [] + for line in text.split("\n"): + line = line.strip() + if line.startswith("- ") and ":" in line: + # Format: - N:recipientId:privateKey:digest[:size] + parts = line[2:].split(":") + if len(parts) >= 3: + ids.append(parts[1]) + return ids + + + +# =================================================================== +# Tests +# =================================================================== + +def test_1_basic_memory(): + global srv + print("\n=== 1. Basic send/receive (memory) ===") + clean_test_dir() + srv = init_server() + assert start_server() + + src = make_file("testfile.bin", 5) + send_file(src, n=2) + + dd = desc_dir("testfile.bin") + check("1.1 rcv1.xftp created", (dd / "rcv1.xftp").exists()) + check("1.2 rcv2.xftp created", (dd / "rcv2.xftp").exists()) + check("1.3 snd.xftp.private created", (dd / "snd.xftp.private").exists()) + + recv_file(dd / "rcv1.xftp") + check("1.4 rcv1 file matches", files_match(src, test_dir / "received/testfile.bin")) + check("1.5 rcv1.xftp deleted by -y", not (dd / "rcv1.xftp").exists()) + + (test_dir / "received/testfile.bin").unlink(missing_ok=True) + recv_file(dd / "rcv2.xftp") + check("1.6 rcv2 file matches", files_match(src, test_dir / "received/testfile.bin")) + check("1.7 rcv2.xftp deleted by -y", not (dd / "rcv2.xftp").exists()) + + del_file(dd / "snd.xftp.private") + check("1.8 snd.xftp.private deleted by -y", not (dd / "snd.xftp.private").exists()) + fc = len(list((test_dir / "files").iterdir())) + check(f"1.9 server files cleaned ({fc})", fc == 0) + + stop_server() + + +def test_2_basic_postgres(): + global srv + print("\n=== 2. Basic send/receive (PostgreSQL) ===") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + # Remove store log so database mode starts cleanly + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + # Create schema so server can connect + create_schema() + ok = start_server("--confirm-migrations", "up") + check("2.1 server started", ok) + if not ok: + return + + src = make_file("testfile.bin", 5) + send_file(src, n=2) + dd = desc_dir("testfile.bin") + check("2.2 send succeeded", (dd / "rcv1.xftp").exists()) + + recv_file(dd / "rcv1.xftp") + check("2.3 recv matches", files_match(src, test_dir / "received/testfile.bin")) + + fc = db_count("files") + rc = db_count("recipients") + check(f"2.4 files in database ({fc})", fc > 0 and fc != -1) + check(f"2.5 recipients in database ({rc})", rc > 0 and rc != -1) + + del_file(dd / "snd.xftp.private") + fc_after = db_count("files") + rc_after = db_count("recipients") + check(f"2.6 all files deleted ({fc_after})", fc_after == 0) + check(f"2.7 all recipients deleted ({rc_after})", rc_after == 0) + + stop_server() + + +def test_3_migration_memory_to_pg(): + global srv + print("\n=== 3. Migration: memory -> PostgreSQL ===") + clean_test_dir() + clean_db() + srv = init_server() + assert start_server() + + srcA = make_file("fileA.bin") + send_file(srcA, n=2) + check("3.1 fileA sent", (desc_dir("fileA.bin") / "rcv1.xftp").exists()) + + srcB = make_file("fileB.bin") + send_file(srcB, n=2) + check("3.2 fileB sent", (desc_dir("fileB.bin") / "rcv1.xftp").exists()) + + # Partially receive fileB + recv_file(desc_dir("fileB.bin") / "rcv1.xftp") + check("3.3 fileB rcv1 received", files_match(srcB, test_dir / "received/fileB.bin")) + + stop_server() + + # Switch to database and import + enable_db_mode() + r = db_import() + check("3.4 import succeeded", r.returncode == 0) + + log_bak = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log.bak" + log_file = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + check("3.5 store log renamed to .bak", log_bak.exists()) + check("3.6 store log removed", not log_file.exists()) + + fc = db_count("files") + rc = db_count("recipients") + check(f"3.7 files imported ({fc})", fc > 0 and fc != -1) + check(f"3.8 recipients imported ({rc})", rc > 0 and rc != -1) + + # Start PG server, receive remaining + ok = start_server("--confirm-migrations", "up") + check("3.9 PG server started", ok) + if not ok: + return + + recv_file(desc_dir("fileA.bin") / "rcv1.xftp") + check("3.10 fileA rcv1 after migration", files_match(srcA, test_dir / "received/fileA.bin")) + + recv_file(desc_dir("fileA.bin") / "rcv2.xftp") + # rcv2 downloads to fileA_1.bin (fileA.bin already exists from rcv1) + rcv2_path = test_dir / "received" + rcv2_files = [f for f in rcv2_path.iterdir() if f.name.startswith("fileA") and f.name != "fileA.bin"] + check("3.11 fileA rcv2 after migration", len(rcv2_files) == 1 and files_match(srcA, rcv2_files[0])) + + (test_dir / "received/fileB.bin").unlink(missing_ok=True) + recv_file(desc_dir("fileB.bin") / "rcv2.xftp") + check("3.12 fileB rcv2 after migration", files_match(srcB, test_dir / "received/fileB.bin")) + + stop_server() + + +def test_4_migration_pg_to_memory(): + global srv + print("\n=== 4. Migration: PostgreSQL -> memory ===") + + r = db_export() + check("4.1 export succeeded", r.returncode == 0) + + log_file = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + check("4.2 store log created", log_file.exists()) + + disable_db_mode() + ok = start_server() + check("4.3 memory server started", ok) + if not ok: + return + + r = del_file(desc_dir("fileA.bin") / "snd.xftp.private") + check("4.4 fileA delete on memory round-trip", r.returncode == 0) + + r = del_file(desc_dir("fileB.bin") / "snd.xftp.private") + check("4.5 fileB delete on memory round-trip", r.returncode == 0) + + stop_server() + + +def test_4b_send_pg_receive_memory(): + """Send on PostgreSQL, export, receive on memory.""" + global srv + print("\n=== 4b. Send on PG, export, receive on memory ===") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + check("4b.1 PG server started", ok) + if not ok: + return + + srcA = make_file("pgfileA.bin") + send_file(srcA, n=2) + check("4b.2 pgfileA sent", (desc_dir("pgfileA.bin") / "rcv1.xftp").exists()) + + # Partially receive rcv1 on PG + recv_file(desc_dir("pgfileA.bin") / "rcv1.xftp") + check("4b.3 pgfileA rcv1 on PG", files_match(srcA, test_dir / "received/pgfileA.bin")) + + stop_server() + + # Export to store log + r = db_export() + check("4b.4 export succeeded", r.returncode == 0) + + # Switch to memory + disable_db_mode() + ok = start_server() + check("4b.5 memory server started", ok) + if not ok: + return + + # rcv2 should work on memory backend + (test_dir / "received/pgfileA.bin").unlink(missing_ok=True) + recv_file(desc_dir("pgfileA.bin") / "rcv2.xftp") + check("4b.6 pgfileA rcv2 on memory after export", files_match(srcA, test_dir / "received/pgfileA.bin")) + + del_file(desc_dir("pgfileA.bin") / "snd.xftp.private") + check("4b.7 delete on memory", not (desc_dir("pgfileA.bin") / "snd.xftp.private").exists()) + + stop_server() + + +def test_5_restart_persistence(): + global srv + print("\n=== 5. Restart persistence ===") + + # 5.1 Memory with store log + print(" --- 5.1 memory + store log ---") + clean_test_dir() + srv = init_server() + assert start_server() + + src = make_file("persist.bin") + send_file(src) + stop_server() + assert start_server() + + recv_file(desc_dir("persist.bin") / "rcv1.xftp") + check("5.1 recv after restart (memory+log)", files_match(src, test_dir / "received/persist.bin")) + stop_server() + + # 5.2 Memory without store log + print(" --- 5.2 memory, no log ---") + for f in (test_dir / "descriptions").iterdir(): + shutil.rmtree(f) if f.is_dir() else f.unlink() + for f in (test_dir / "received").iterdir(): + f.unlink() + ini_set("enable", "off") + assert start_server() + + src2 = make_file("persist2.bin") + send_file(src2) + stop_server() + assert start_server() + + r = recv_file(desc_dir("persist2.bin") / "rcv1.xftp") + check("5.2a recv after restart (no log) fails", r.returncode != 0) + check("5.2b error is AUTH", "AUTH" in (r.stdout + r.stderr)) + stop_server() + + # 5.3 PostgreSQL + print(" --- 5.3 PostgreSQL ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + check("5.3a PG server started", ok) + if not ok: + return + + src = make_file("persist.bin") + send_file(src) + stop_server() + ok = start_server("--confirm-migrations", "up") + check("5.3b PG server restarted", ok) + if not ok: + return + + recv_file(desc_dir("persist.bin") / "rcv1.xftp") + check("5.3 recv after restart (PostgreSQL)", files_match(src, test_dir / "received/persist.bin")) + stop_server() + + +def test_6_edge_cases(): + global srv + print("\n=== 6. Edge cases ===") + + # 6.1 Receive after server-side delete + print(" --- 6.1 receive after delete ---") + clean_test_dir() + srv = init_server() + assert start_server() + + src = make_file("deltest.bin") + send_file(src, n=2) + del_file(desc_dir("deltest.bin") / "snd.xftp.private") + r = recv_file(desc_dir("deltest.bin") / "rcv2.xftp") + check("6.1a recv after server delete fails", r.returncode != 0) + check("6.1b error is AUTH", "AUTH" in (r.stdout + r.stderr)) + stop_server() + + # 6.2 Multiple recipients, partial ack + print(" --- 6.2 multiple recipients ---") + clean_test_dir() + srv = init_server() + assert start_server() + + src = make_file("multi.bin") + send_file(src, n=3) + + recv_file(desc_dir("multi.bin") / "rcv1.xftp") + check("6.2a rcv1 received", files_match(src, test_dir / "received/multi.bin")) + + (test_dir / "received/multi.bin").unlink(missing_ok=True) + recv_file(desc_dir("multi.bin") / "rcv2.xftp") + check("6.2b rcv2 still works", files_match(src, test_dir / "received/multi.bin")) + + (test_dir / "received/multi.bin").unlink(missing_ok=True) + recv_file(desc_dir("multi.bin") / "rcv3.xftp") + check("6.2c rcv3 still works", files_match(src, test_dir / "received/multi.bin")) + stop_server() + + # 6.3 Database mode with existing store log should fail + # Simulates: ran server in memory mode (creating store log), then switched to database + print(" --- 6.3 database mode + existing store log ---") + clean_test_dir() + clean_db() + srv = init_server() + # Start in memory mode to create the store log file + assert start_server() + make_file("dummy63.bin") + send_file(test_dir / "dummy63.bin") + stop_server() + # Verify store log was created + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + assert store_log.exists(), "store log should exist after memory-mode run" + # Switch to database mode without importing + enable_db_mode() + create_schema() + log_file = test_dir / "server-63.log" + with open(log_file, "w") as fh: + p = subprocess.Popen( + [XFTP_SERVER, "start", "--confirm-migrations", "up"], + stdout=fh, stderr=subprocess.STDOUT, + ) + time.sleep(5) + exited = p.poll() is not None + if not exited: + p.kill() + p.wait() + log_text = log_file.read_text() + check("6.3a server exited", exited) + check("6.3b error message correct", + "store log file" in log_text and "exists but store_files is" in log_text) + + # 6.4 Database mode, no store log, schema doesn't exist (should fail) + print(" --- 6.4 database mode + no schema ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + # No schema, no store log — server should fail with "schema does not exist" + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + ok = start_server("--confirm-migrations", "up") + check("6.4a start fails without schema", not ok) + log_text = (test_dir / "server.log").read_text() if (test_dir / "server.log").exists() else "" + check("6.4b error mentions schema", "schema" in log_text and "does not exist" in log_text) + stop_server() + + # 6.5 Dual-write mode: database + db_store_log: on + # Verifies that new writes in dual-write mode land in BOTH the DB and the store log, + # so switching to memory-only (using the store log) preserves files sent in dual-write. + print(" --- 6.5 database + store log + db_store_log: on ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + ini_uncomment("db_store_log") + ini_set("db_store_log", "on") + create_schema() + # Remove store log so import isn't needed for initial start + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + ok = start_server("--confirm-migrations", "up") + check("6.5a start in dual-write mode", ok) + if not ok: + stop_server() + else: + # Send a NEW file in dual-write mode + src = make_file("dual.bin") + send_file(src, n=1) + dd = desc_dir("dual.bin") + check("6.5b send in dual-write mode", (dd / "rcv1.xftp").exists()) + + stop_server() + + # Verify store log was written (dual-write) + check("6.5c store log has entries", + store_log.exists() and store_log.stat().st_size > 0) + + # Verify DB has the file too + fc = db_count("files") + check(f"6.5d file in DB ({fc})", fc > 0 and fc != -1) + + # Now switch to memory-only using the store log — proves the log has valid data + disable_db_mode() + ini_comment("db_store_log") + ok = start_server() + check("6.5e memory server from dual-write log", ok) + if ok: + recv_file(dd / "rcv1.xftp") + check("6.5f recv on memory from dual-write log", + files_match(src, test_dir / "received/dual.bin")) + stop_server() + + # 6.6 Import to non-empty database should fail + # Use db_export to produce a real store log, then try to re-import without clearing DB. + print(" --- 6.6 import to non-empty DB ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + if ok: + make_file("dummy.bin") + send_file(test_dir / "dummy.bin") + stop_server() + # Export produces a real, valid store log + r = db_export() + check("6.6a export for re-import test", r.returncode == 0) + # Now try to import the valid log back into the non-empty DB + r = db_import() + check("6.6b import to non-empty DB fails", r.returncode != 0) + else: + fail_("6.6 could not start server for setup") + + # 6.7 Import with no store log file (should fail) + print(" --- 6.7 import without store log ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + r = db_import() + check("6.7 import without store log fails", r.returncode != 0) + + # 6.8 Export when store log already exists (should fail) + print(" --- 6.8 export with existing store log ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + create_schema() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + ok = start_server("--confirm-migrations", "up") + if ok: + make_file("exp.bin") + send_file(test_dir / "exp.bin") + stop_server() + # Create a log file to block export + store_log.write_text("existing\n") + r = db_export() + check("6.8 export with existing store log fails", r.returncode != 0) + else: + fail_("6.8 could not start server for setup") + + +def test_7_blocking(): + """Block files via control port, verify blocked state survives migration.""" + global srv + print("\n=== 7. File blocking via control port ===") + + # 7.1 Block a file and verify receive fails with BLOCKED + print(" --- 7.1 block file, receive fails ---") + clean_test_dir() + srv = init_server() + enable_control_port() + assert start_server() + + src = make_file("blockme.bin") + send_file(src, n=2) + dd = desc_dir("blockme.bin") + + # Get recipient IDs from the file description + rcv_ids = get_recipient_ids(dd / "rcv1.xftp") + check("7.1a got recipient IDs", len(rcv_ids) > 0) + + # Block using the first chunk's recipient ID + resp = control_cmd(f"block {rcv_ids[0]} reason=spam") + check("7.1b block command OK", resp == "ok") + + # Receive should fail with BLOCKED + r = recv_file(dd / "rcv1.xftp") + output = r.stdout + r.stderr + check("7.1c receive blocked file fails", r.returncode != 0) + check("7.1d error is BLOCKED (not AUTH)", "BLOCKED" in output and "AUTH" not in output) + + stop_server() + + # 7.2 Blocked file survives migration memory -> PG + print(" --- 7.2 blocked file survives memory->PG migration ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_control_port() + assert start_server() + + src = make_file("blockmigrate.bin") + send_file(src, n=2) + dd = desc_dir("blockmigrate.bin") + + rcv_ids = get_recipient_ids(dd / "rcv1.xftp") + resp = control_cmd(f"block {rcv_ids[0]} reason=content") + check("7.2a block before migration", resp == "ok") + + stop_server() + + # Import to PG + enable_db_mode() + r = db_import() + check("7.2b import succeeded", r.returncode == 0) + + ok = start_server("--confirm-migrations", "up") + check("7.2c PG server started", ok) + if ok: + r = recv_file(dd / "rcv1.xftp") + check("7.2d recv fails after migration", r.returncode != 0) + check("7.2e error is BLOCKED", "BLOCKED" in (r.stdout + r.stderr)) + stop_server() + + # 7.3 Blocked file survives migration PG -> memory + print(" --- 7.3 blocked file survives PG->memory export ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_control_port() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + check("7.3a PG server started", ok) + if not ok: + return + + src = make_file("blockpg.bin") + send_file(src, n=2) + dd = desc_dir("blockpg.bin") + + rcv_ids = get_recipient_ids(dd / "rcv1.xftp") + resp = control_cmd(f"block {rcv_ids[0]} reason=spam") + check("7.3b block on PG", resp == "ok") + + stop_server() + + # Export to memory + r = db_export() + check("7.3c export succeeded", r.returncode == 0) + + disable_db_mode() + ok = start_server() + check("7.3d memory server started", ok) + if ok: + r = recv_file(dd / "rcv1.xftp") + check("7.3e recv fails after PG->memory", r.returncode != 0) + check("7.3f error is BLOCKED", "BLOCKED" in (r.stdout + r.stderr)) + stop_server() + + +def test_8_migration_edge_cases(): + """Edge cases in migration: acked recipients, deleted files, large files, double round-trip.""" + global srv + print("\n=== 8. Migration edge cases ===") + + # 8.1 Acked recipient fails after memory->PG migration + print(" --- 8.1 acked recipient fails after memory->PG ---") + clean_test_dir() + clean_db() + srv = init_server() + assert start_server() + + src = make_file("acktest.bin") + send_file(src, n=2) + dd = desc_dir("acktest.bin") + + # Copy rcv1 descriptor before recv (recv -y deletes it) + rcv1_backup = test_dir / "rcv1_acktest.xftp" + shutil.copy2(dd / "rcv1.xftp", rcv1_backup) + + # Receive rcv1 (acknowledges it on server, deletes descriptor) + recv_file(dd / "rcv1.xftp") + check("8.1a rcv1 received", files_match(src, test_dir / "received/acktest.bin")) + + stop_server() + + # Migrate to PG + enable_db_mode() + r = db_import() + check("8.1b import succeeded", r.returncode == 0) + + ok = start_server("--confirm-migrations", "up") + check("8.1c PG server started", ok) + if ok: + # Acked rcv1 should fail — recipient was removed by ack before migration + r = recv_file(rcv1_backup) + check("8.1d acked rcv1 fails after migration", r.returncode != 0) + check("8.1e error is AUTH", "AUTH" in (r.stdout + r.stderr)) + + # Unacked rcv2 should still work + (test_dir / "received/acktest.bin").unlink(missing_ok=True) + recv_file(dd / "rcv2.xftp") + check("8.1f rcv2 works after migration", files_match(src, test_dir / "received/acktest.bin")) + stop_server() + + # 8.2 Acked recipient fails after PG->memory export + print(" --- 8.2 acked recipient fails after PG->memory ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + check("8.2a PG server started", ok) + if not ok: + return + + src = make_file("ackpg.bin") + send_file(src, n=2) + dd = desc_dir("ackpg.bin") + + # Copy rcv1 descriptor before recv + rcv1_backup = test_dir / "rcv1_ackpg.xftp" + shutil.copy2(dd / "rcv1.xftp", rcv1_backup) + + recv_file(dd / "rcv1.xftp") + check("8.2b rcv1 received on PG", files_match(src, test_dir / "received/ackpg.bin")) + + stop_server() + + r = db_export() + check("8.2c export succeeded", r.returncode == 0) + + disable_db_mode() + ok = start_server() + check("8.2d memory server started", ok) + if ok: + # Acked rcv1 should fail + r = recv_file(rcv1_backup) + check("8.2e acked rcv1 fails after export", r.returncode != 0) + check("8.2f error is AUTH", "AUTH" in (r.stdout + r.stderr)) + + # Unacked rcv2 should work + (test_dir / "received/ackpg.bin").unlink(missing_ok=True) + recv_file(dd / "rcv2.xftp") + check("8.2g rcv2 works on memory after export", files_match(src, test_dir / "received/ackpg.bin")) + stop_server() + + # 8.3 Deleted file absent after migration + print(" --- 8.3 deleted file absent after migration ---") + clean_test_dir() + clean_db() + srv = init_server() + assert start_server() + + # Send a file that will be deleted before migration. + # Use n=2 so we have a rcv descriptor to test post-migration (rcv1 will be + # acked by the recv below; backup rcv2 before delete so we can try to recv + # it after migration — should return AUTH because the file was deleted). + srcDel = make_file("delmigrate.bin") + send_file(srcDel, n=2) + ddDel = desc_dir("delmigrate.bin") + # Backup rcv2 descriptor BEFORE delete (del doesn't touch rcv descriptors) + rcv2_del_backup = test_dir / "rcv2_delmigrate.xftp" + shutil.copy2(ddDel / "rcv2.xftp", rcv2_del_backup) + recv_file(ddDel / "rcv1.xftp") + del_file(ddDel / "snd.xftp.private") + + # Send a positive control file that will NOT be deleted + srcKeep = make_file("keepmigrate.bin") + send_file(srcKeep, n=1) + check("8.3a keep file sent", (desc_dir("keepmigrate.bin") / "rcv1.xftp").exists()) + + stop_server() + + enable_db_mode() + r = db_import() + check("8.3b import succeeded", r.returncode == 0) + + # The kept file must be imported — proves import actually ran. + fc = db_count("files") + check(f"8.3c files imported ({fc})", fc > 0 and fc != -1) + + ok = start_server("--confirm-migrations", "up") + check("8.3d PG server started", ok) + if ok: + # Positive control: kept file is receivable after migration + recv_file(desc_dir("keepmigrate.bin") / "rcv1.xftp") + check("8.3e kept file receivable after migration", + files_match(srcKeep, test_dir / "received/keepmigrate.bin")) + + # Negative control: deleted file's rcv2 must return AUTH after migration + r = recv_file(rcv2_del_backup) + check("8.3f deleted file rcv2 fails after migration", r.returncode != 0) + check("8.3g error is AUTH (deleted file absent)", + "AUTH" in (r.stdout + r.stderr)) + stop_server() + + # 8.4 Large multi-chunk file integrity after migration + print(" --- 8.4 large file (multi-chunk) migration ---") + clean_test_dir() + clean_db() + srv = init_server() + assert start_server() + + src = make_file("largefile.bin", size_mb=20) + send_file(src, n=1) + dd = desc_dir("largefile.bin") + check("8.4a large file sent", (dd / "rcv1.xftp").exists()) + + stop_server() + + enable_db_mode() + r = db_import() + check("8.4b import succeeded", r.returncode == 0) + + ok = start_server("--confirm-migrations", "up") + check("8.4c PG server started", ok) + if ok: + recv_file(dd / "rcv1.xftp") + check("8.4d large file integrity after migration", + files_match(src, test_dir / "received/largefile.bin")) + stop_server() + + # 8.5 Double round-trip: memory -> PG -> memory, then receive + print(" --- 8.5 double round-trip migration ---") + clean_test_dir() + clean_db() + srv = init_server() + assert start_server() + + src = make_file("roundtrip.bin") + send_file(src, n=1) + dd = desc_dir("roundtrip.bin") + + stop_server() + + # memory -> PG + enable_db_mode() + r = db_import() + check("8.5a first import (memory->PG)", r.returncode == 0) + + ok = start_server("--confirm-migrations", "up") + check("8.5b PG server started", ok) + stop_server() + + # PG -> memory + r = db_export() + check("8.5c first export (PG->memory)", r.returncode == 0) + + disable_db_mode() + ok = start_server() + check("8.5d memory server started (round 1)", ok) + stop_server() + + # memory -> PG again + clean_db() + enable_db_mode() + r = db_import() + check("8.5e second import (memory->PG)", r.returncode == 0) + + ok = start_server("--confirm-migrations", "up") + check("8.5f PG server started (round 2)", ok) + if ok: + recv_file(dd / "rcv1.xftp") + check("8.5g file intact after double round-trip", + files_match(src, test_dir / "received/roundtrip.bin")) + stop_server() + + +def test_9_auth_and_access_control(): + """Basic auth, allowNewFiles, storage quota, file expiration.""" + global srv + print("\n=== 9. Auth and access control ===") + + # 9.1 AllowNewFiles=false rejects upload + print(" --- 9.1 allowNewFiles=false ---") + clean_test_dir() + srv = init_server() + ini_set("new_files", "off") + assert start_server() + + src = make_file("reject.bin") + r = send_file(src) + check("9.1 upload rejected when new_files=off", r.returncode != 0) + stop_server() + + # 9.2 Basic auth: no password → fails + print(" --- 9.2 basic auth: no password ---") + clean_test_dir() + srv = init_server() + ini_set("new_files", "on") + # Uncomment and set create_password + ini = ini_path() + txt = ini.read_text() + txt, n = re.subn(r"^# create_password:.*$", "create_password: secret123", txt, flags=re.MULTILINE) + assert n > 0, "create_password commented line not found in INI" + ini.write_text(txt) + assert start_server() + + src = make_file("authtest.bin") + r = send_file(src) + check("9.2a upload without password fails", r.returncode != 0) + check("9.2b error is AUTH", "AUTH" in (r.stdout + r.stderr)) + stop_server() + + # 9.3 Basic auth: wrong password → fails + print(" --- 9.3 basic auth: wrong password ---") + # Reinit with password in server address + clean_test_dir() + srv = init_server() + ini_set("new_files", "on") + ini = ini_path() + txt = ini.read_text() + txt, n = re.subn(r"^# create_password:.*$", "create_password: secret123", txt, flags=re.MULTILINE) + assert n > 0, "create_password commented line not found in INI" + ini.write_text(txt) + fp = (Path(os.environ["XFTP_SERVER_CFG_PATH"]) / "fingerprint").read_text().strip() + wrong_srv = f"xftp://{fp}:wrongpass@127.0.0.1:{PORT}" + assert start_server() + + src = make_file("authtest.bin") + r = run([XFTP, "send", str(src), str(test_dir / "descriptions"), + "-s", wrong_srv, "-n", "1"], check=False, timeout=30) + output = r.stdout + r.stderr + check("9.3a wrong password prints AUTH error", "PCEProtocolError AUTH" in output) + check("9.3b no descriptor created", not (desc_dir("authtest.bin") / "rcv1.xftp").exists()) + stop_server() + + # 9.4 Basic auth: correct password → succeeds + print(" --- 9.4 basic auth: correct password ---") + clean_test_dir() + srv = init_server() + ini_set("new_files", "on") + ini = ini_path() + txt = ini.read_text() + txt, n = re.subn(r"^# create_password:.*$", "create_password: secret123", txt, flags=re.MULTILINE) + assert n > 0, "create_password commented line not found in INI" + ini.write_text(txt) + fp = (Path(os.environ["XFTP_SERVER_CFG_PATH"]) / "fingerprint").read_text().strip() + correct_srv = f"xftp://{fp}:secret123@127.0.0.1:{PORT}" + assert start_server() + + src = make_file("authok.bin") + r = run([XFTP, "send", str(src), str(test_dir / "descriptions"), + "-s", correct_srv, "-n", "1"], check=False, timeout=60) + check("9.4 upload with correct password succeeds", r.returncode == 0) + stop_server() + + # 9.5 Server no auth, client sends auth → succeeds + print(" --- 9.5 no server auth, client sends auth ---") + clean_test_dir() + srv = init_server() + fp = (Path(os.environ["XFTP_SERVER_CFG_PATH"]) / "fingerprint").read_text().strip() + auth_srv = f"xftp://{fp}:anypass@127.0.0.1:{PORT}" + assert start_server() + + src = make_file("noauth.bin") + r = run([XFTP, "send", str(src), str(test_dir / "descriptions"), + "-s", auth_srv, "-n", "1"], check=False, timeout=60) + check("9.5 upload with auth to no-auth server succeeds", r.returncode == 0) + stop_server() + + # 9.6 Storage quota: exact boundary + print(" --- 9.6 storage quota boundary ---") + clean_test_dir() + # Chunk size is 128KB, so 1MB file = ~8 chunks but stored as one padded chunk per server file + # Use small quota: allow exactly 2 files of 1MB + srv = init_server(quota="3mb") + assert start_server() + + src1 = make_file("quota1.bin") + r1 = send_file(src1) + check("9.6a first file within quota", r1.returncode == 0) + + src2 = make_file("quota2.bin") + r2 = send_file(src2) + check("9.6b second file within quota", r2.returncode == 0) + + src3 = make_file("quota3.bin") + r3 = send_file(src3) + check("9.6c third file rejected", r3.returncode != 0) + check("9.6d error is QUOTA", "QUOTA" in (r3.stdout + r3.stderr)) + stop_server() + + # 9.7 File expiration + # Note: createdAt uses hour-level precision (fileTimePrecision = 3600s). + # With expire_files_hours=0, TTL=0, and the check is createdAt + TTL < now. + # Files created in the current hour have createdAt = now (rounded), so + # createdAt + 0 is NOT < now — they won't expire until the next hour. + # The check interval is hardcoded at 2 hours and not configurable via INI. + # This makes expiration untestable in a fast automated test. + # File expiration IS tested in the Haskell test suite (testFileChunkExpiration) + # with a 1-second TTL and 1-second check interval configured programmatically. + print(" --- 9.7 file expiration (skipped: requires hour boundary, tested in Haskell suite) ---") + + +def test_10_control_port_operations(): + """Control port: delete, auth failure, invalid commands, stats.""" + global srv + print("\n=== 10. Control port operations ===") + + clean_test_dir() + srv = init_server() + enable_control_port() + assert start_server() + + # 10.1 Control port: command without authentication + # Server should respond with "AUTH" when no auth has been provided + print(" --- 10.1 no auth ---") + src = make_file("ctrldel.bin") + send_file(src, n=1) + dd = desc_dir("ctrldel.bin") + rcv_ids = get_recipient_ids(dd / "rcv1.xftp") + resp = control_cmd(f"delete {rcv_ids[0]}", auth=False) + check("10.1 command without auth returns AUTH", resp == "AUTH") + + # 10.2 Control port: wrong password assigns CPRNone, commands return AUTH + print(" --- 10.2 wrong password ---") + s = control_connect() + auth_resp = control_send_recv(s, "auth wrongpassword") + check("10.2a wrong password gives CPRNone", auth_resp == "Current role is CPRNone") + cmd_resp = control_send_recv(s, f"delete {rcv_ids[0]}") + check("10.2b CPRNone command returns AUTH", cmd_resp == "AUTH") + s.sendall(b"quit\n") + s.close() + + # 10.3 Control port: stats-rts + # Without +RTS -T, returns "unsupported operation (GHC.Stats.getRTSStats: ...)" + # With +RTS -T, returns actual GHC runtime stats with "gcs" field etc. + # Either is a valid non-error response. + print(" --- 10.3 stats-rts ---") + resp = control_cmd("stats-rts") + check("10.3 stats-rts responds", + "getRTSStats" in resp or "gcs" in resp or "allocated_bytes" in resp) + + # 10.4 Control port: delete command removes file + print(" --- 10.4 control port delete ---") + resp = control_cmd(f"delete {rcv_ids[0]}") + check("10.4a delete command returns ok", resp == "ok") + + r = recv_file(dd / "rcv1.xftp") + check("10.4b recv after control port delete fails", r.returncode != 0) + check("10.4c error is AUTH", "AUTH" in (r.stdout + r.stderr)) + + # 10.5 Control port: invalid block reason + print(" --- 10.5 invalid block reason ---") + src2 = make_file("badblock.bin") + send_file(src2, n=1) + dd2 = desc_dir("badblock.bin") + rcv_ids2 = get_recipient_ids(dd2 / "rcv1.xftp") + + resp = control_cmd(f"block {rcv_ids2[0]} reason=invalid_reason") + check("10.5 invalid block reason returns error", resp.startswith("error:")) + + stop_server() + + +def test_11_blocked_file_sender_delete(): + """Blocked file: sender cannot delete it.""" + global srv + print("\n=== 11. Blocked file: sender delete attempt ===") + + clean_test_dir() + srv = init_server() + enable_control_port() + assert start_server() + + src = make_file("blockdel.bin") + send_file(src, n=1) + dd = desc_dir("blockdel.bin") + + rcv_ids = get_recipient_ids(dd / "rcv1.xftp") + resp = control_cmd(f"block {rcv_ids[0]} reason=spam") + check("11.1 block succeeded", resp == "ok") + + # Sender tries to delete — should fail with BLOCKED + r = del_file(dd / "snd.xftp.private") + check("11.2 sender delete of blocked file fails", r.returncode != 0) + check("11.3 error mentions BLOCKED", + "BLOCKED" in (r.stdout + r.stderr)) + + stop_server() + + +def test_12_recipient_cascade_and_storage(): + """Recipient cascade delete and storage accounting.""" + global srv + print("\n=== 12. Recipient cascade and storage accounting ===") + + # 12.1 Recipient cascade: delete file, all recipients gone + print(" --- 12.1 recipient cascade delete (PG) ---") + clean_test_dir() + clean_db() + srv = init_server() + enable_db_mode() + store_log = Path(os.environ["XFTP_SERVER_LOG_PATH"]) / "file-server-store.log" + store_log.unlink(missing_ok=True) + create_schema() + ok = start_server("--confirm-migrations", "up") + check("12.1a PG server started", ok) + if not ok: + return + + src = make_file("cascade.bin") + send_file(src, n=3) + + fc_before = db_count("files") + rc_before = db_count("recipients") + check(f"12.1b files before delete ({fc_before})", fc_before > 0) + check(f"12.1c recipients before delete ({rc_before})", rc_before > 0) + + del_file(desc_dir("cascade.bin") / "snd.xftp.private") + + fc_after = db_count("files") + rc_after = db_count("recipients") + check(f"12.1d files after delete ({fc_after})", fc_after == 0) + check(f"12.1e recipients cascade deleted ({rc_after})", rc_after == 0) + + # 12.2 Storage accounting: upload, delete, verify disk + print(" --- 12.2 storage accounting ---") + src1 = make_file("stor1.bin") + r1 = send_file(src1) + check("12.2a stor1 upload succeeded", r1.returncode == 0) + src2 = make_file("stor2.bin") + r2 = send_file(src2) + check("12.2b stor2 upload succeeded", r2.returncode == 0) + + files_on_disk = len(list((test_dir / "files").iterdir())) + check(f"12.2c files on disk after upload ({files_on_disk})", files_on_disk > 0) + + del_file(desc_dir("stor1.bin") / "snd.xftp.private") + del_file(desc_dir("stor2.bin") / "snd.xftp.private") + + files_on_disk = len(list((test_dir / "files").iterdir())) + check(f"12.2d files on disk after delete ({files_on_disk})", files_on_disk == 0) + + fc = db_count("files") + check(f"12.2e DB files after delete ({fc})", fc == 0) + + stop_server() + + +# =================================================================== +# Main +# =================================================================== + +if __name__ == "__main__": + XFTP_SERVER = cabal_bin("xftp-server") + XFTP = cabal_bin("xftp") + test_dir = Path.cwd() / "xftp-test" + + os.environ["XFTP_SERVER_CFG_PATH"] = str(test_dir / "etc") + os.environ["XFTP_SERVER_LOG_PATH"] = str(test_dir / "var") + + srv = "" + + print(f"XFTP server: {XFTP_SERVER}") + print(f"XFTP client: {XFTP}") + print(f"Test dir: {test_dir}") + print(f"PGHOST: {os.environ.get('PGHOST', '(default)')}") + + # Verify prerequisites + r = psql("SELECT 1;", check=False) + if r.returncode != 0: + sys.exit(f"Cannot connect to PostgreSQL as {PG_ADMIN_USER}. Is it running?") + r = psql("SELECT 1;", user=DB_USER, db="postgres", check=False) + if r.returncode != 0: + sys.exit(f"PostgreSQL user '{DB_USER}' does not exist.\n" + f"Run: psql -U {PG_ADMIN_USER} -c \"CREATE USER {DB_USER} WITH SUPERUSER;\"") + + try: + test_1_basic_memory() + test_2_basic_postgres() + test_3_migration_memory_to_pg() + test_4_migration_pg_to_memory() # continues from test_3 state + test_4b_send_pg_receive_memory() + test_5_restart_persistence() + test_6_edge_cases() + test_7_blocking() + test_8_migration_edge_cases() + test_9_auth_and_access_control() + test_10_control_port_operations() + test_11_blocked_file_sender_delete() + test_12_recipient_cascade_and_storage() + except Exception: + stop_server() + print("\n [ERROR] Unexpected exception:") + traceback.print_exc() + FAIL += 1 + finally: + stop_server() + # Cleanup + if test_dir.exists(): + shutil.rmtree(test_dir) + psql(f"DROP DATABASE IF EXISTS {DB_NAME};", check=False) + + print(f"\n{'=' * 42}") + print(f"Results: {PASS} passed, {FAIL} failed") + print(f"{'=' * 42}") + sys.exit(1 if FAIL > 0 else 0)