From 736e5b408b4c8079269330be7f6047273331abb8 Mon Sep 17 00:00:00 2001 From: Wojtek Majewski Date: Mon, 5 Jan 2026 08:43:20 +0100 Subject: [PATCH] feat: add conditional step execution with skip infrastructure - Add skip_type column and cascade_skip_steps() function - Add condition evaluation in start_ready_steps - Add whenFailed option for error handling after retries - Add ifNot negative condition pattern - Add ContainmentPattern and StepMeta types for type-safe DSL - Add condition fields to FlowShape for auto-compilation --- .changeset/add-when-failed-option.md | 6 + .changeset/skip-infrastructure-schema.md | 5 + pkgs/core/schemas/0050_tables_definitions.sql | 8 +- pkgs/core/schemas/0060_tables_runtime.sql | 23 +- ...100_function__cascade_force_skip_steps.sql | 105 + pkgs/core/schemas/0100_function_add_step.sql | 21 +- ...00_function_cascade_resolve_conditions.sql | 274 +++ .../0100_function_compare_flow_shapes.sql | 54 + .../schemas/0100_function_complete_task.sql | 20 + .../0100_function_create_flow_from_shape.sql | 14 +- pkgs/core/schemas/0100_function_fail_task.sql | 118 +- .../schemas/0100_function_get_flow_shape.sql | 14 +- .../core/schemas/0100_function_start_flow.sql | 8 + .../0100_function_start_ready_steps.sql | 96 +- .../schemas/0120_function_start_tasks.sql | 3 +- pkgs/core/src/database-types.ts | 31 + .../20260121095914_pgflow_step_conditions.sql | 1798 +++++++++++++++++ pkgs/core/supabase/migrations/atlas.sum | 3 +- .../broadcast_order.test.sql | 64 + .../cascade_through_multiple_levels.test.sql | 95 + .../cascade_to_single_dependent.test.sql | 86 + .../multi_dependency_partial_skip.test.sql | 80 + .../single_step_skip.test.sql | 69 + .../skipped_event_payload.test.sql | 88 + .../condition_invalid_values.test.sql | 24 + .../add_step/condition_not_pattern.test.sql | 102 + .../add_step/condition_parameters.test.sql | 125 ++ .../condition_mode_drift.test.sql | 60 + .../pattern_differences.test.sql | 42 + .../branching_opposite_conditions.test.sql | 136 ++ .../combined_if_and_ifnot.test.sql | 92 + .../dependent_step_condition_met.test.sql | 66 + ...pendent_step_condition_unmet_skip.test.sql | 73 + .../ifnot_empty_object_absent_dep.test.sql | 59 + ...ot_root_step_pattern_matches_fail.test.sql | 59 + ...not_root_step_pattern_not_matches.test.sql | 51 + .../ifnot_root_step_skip.test.sql | 68 + .../ifnot_root_step_skip_cascade.test.sql | 77 + .../no_condition_always_executes.test.sql | 40 + ...n_skip_iterates_until_convergence.test.sql | 115 ++ .../plain_skip_propagates_to_map.test.sql | 104 + .../root_step_condition_met.test.sql | 53 + .../root_step_condition_unmet_fail.test.sql | 59 + .../root_step_condition_unmet_skip.test.sql | 70 + ...step_condition_unmet_skip_cascade.test.sql | 83 + .../skipped_deps_excluded_from_input.test.sql | 134 ++ .../basic_compile.test.sql | 21 +- .../condition_modes_compile.test.sql | 61 + .../map_step_compile.test.sql | 10 +- .../options_compile.test.sql | 8 +- .../allow_data_loss_recompiles.test.sql | 2 +- .../auto_recompiles_when_local.test.sql | 2 +- .../compiles_missing_flow.test.sql | 2 +- .../verifies_matching_shape.test.sql | 8 +- .../skip_decrements_remaining_deps.test.sql | 84 + .../skip_diamond_multiple_dependents.test.sql | 90 + .../skip_only_step_completes_run.test.sql | 59 + .../skip_partial_deps_waits.test.sql | 85 + .../skip_propagates_to_map_step.test.sql | 113 ++ ...ip_verifies_handler_failed_reason.test.sql | 78 + .../type_violation_always_hard_fails.test.sql | 57 + ...when_failed_fail_marks_run_failed.test.sql | 37 + ...led_skip_cascade_skips_dependents.test.sql | 68 + .../when_failed_skip_skips_step.test.sql | 51 + .../tests/get_flow_shape/basic_shape.test.sql | 6 +- .../tests/get_flow_shape/map_steps.test.sql | 4 +- .../multiple_deps_sorted.test.sql | 8 +- .../get_flow_shape/pattern_shape.test.sql | 44 + .../runtime/condition-options.test.ts | 288 +++ pkgs/dsl/__tests__/runtime/flow-shape.test.ts | 394 +++- .../runtime/when-failed-options.test.ts | 186 ++ .../types/condition-pattern.test-d.ts | 618 ++++++ .../types/extract-flow-steps.test-d.ts | 56 +- pkgs/dsl/__tests__/types/map-method.test-d.ts | 38 +- .../types/map-return-type-inference.test-d.ts | 22 +- .../__tests__/types/skippable-deps.test-d.ts | 562 ++++++ pkgs/dsl/src/compile-flow.ts | 24 +- pkgs/dsl/src/dsl.ts | 587 +++++- pkgs/dsl/src/flow-shape.ts | 73 +- .../flow/compilationAtStartup.test.ts | 148 +- prompt.md | 1 + x.md | 1 + 82 files changed, 8362 insertions(+), 309 deletions(-) create mode 100644 .changeset/add-when-failed-option.md create mode 100644 .changeset/skip-infrastructure-schema.md create mode 100644 pkgs/core/schemas/0100_function__cascade_force_skip_steps.sql create mode 100644 pkgs/core/schemas/0100_function_cascade_resolve_conditions.sql create mode 100644 pkgs/core/supabase/migrations/20260121095914_pgflow_step_conditions.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/broadcast_order.test.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_through_multiple_levels.test.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_to_single_dependent.test.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/multi_dependency_partial_skip.test.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/single_step_skip.test.sql create mode 100644 pkgs/core/supabase/tests/_cascade_force_skip_steps/skipped_event_payload.test.sql create mode 100644 pkgs/core/supabase/tests/add_step/condition_invalid_values.test.sql create mode 100644 pkgs/core/supabase/tests/add_step/condition_not_pattern.test.sql create mode 100644 pkgs/core/supabase/tests/add_step/condition_parameters.test.sql create mode 100644 pkgs/core/supabase/tests/compare_flow_shapes/condition_mode_drift.test.sql create mode 100644 pkgs/core/supabase/tests/compare_flow_shapes/pattern_differences.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/branching_opposite_conditions.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/combined_if_and_ifnot.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_met.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_unmet_skip.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/ifnot_empty_object_absent_dep.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_matches_fail.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_not_matches.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip_cascade.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/no_condition_always_executes.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/plain_skip_iterates_until_convergence.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/plain_skip_propagates_to_map.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/root_step_condition_met.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_fail.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip_cascade.test.sql create mode 100644 pkgs/core/supabase/tests/condition_evaluation/skipped_deps_excluded_from_input.test.sql create mode 100644 pkgs/core/supabase/tests/create_flow_from_shape/condition_modes_compile.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_decrements_remaining_deps.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_diamond_multiple_dependents.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_only_step_completes_run.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_partial_deps_waits.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_propagates_to_map_step.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/skip_verifies_handler_failed_reason.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/type_violation_always_hard_fails.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/when_failed_fail_marks_run_failed.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_cascade_skips_dependents.test.sql create mode 100644 pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_skips_step.test.sql create mode 100644 pkgs/core/supabase/tests/get_flow_shape/pattern_shape.test.sql create mode 100644 pkgs/dsl/__tests__/runtime/condition-options.test.ts create mode 100644 pkgs/dsl/__tests__/runtime/when-failed-options.test.ts create mode 100644 pkgs/dsl/__tests__/types/condition-pattern.test-d.ts create mode 100644 pkgs/dsl/__tests__/types/skippable-deps.test-d.ts create mode 100644 prompt.md create mode 100644 x.md diff --git a/.changeset/add-when-failed-option.md b/.changeset/add-when-failed-option.md new file mode 100644 index 000000000..254800c84 --- /dev/null +++ b/.changeset/add-when-failed-option.md @@ -0,0 +1,6 @@ +--- +'@pgflow/core': patch +'@pgflow/dsl': patch +--- + +Add whenFailed option for error handling after retries exhausted (fail, skip, skip-cascade) diff --git a/.changeset/skip-infrastructure-schema.md b/.changeset/skip-infrastructure-schema.md new file mode 100644 index 000000000..5fd952320 --- /dev/null +++ b/.changeset/skip-infrastructure-schema.md @@ -0,0 +1,5 @@ +--- +'@pgflow/core': patch +--- + +Add skip infrastructure schema for conditional execution - new columns (condition_pattern, when_unmet, when_failed, skip_reason, skipped_at), 'skipped' status, and cascade_skip_steps function diff --git a/pkgs/core/schemas/0050_tables_definitions.sql b/pkgs/core/schemas/0050_tables_definitions.sql index 42367280c..1ea94b079 100644 --- a/pkgs/core/schemas/0050_tables_definitions.sql +++ b/pkgs/core/schemas/0050_tables_definitions.sql @@ -24,6 +24,10 @@ create table pgflow.steps ( opt_base_delay int, opt_timeout int, opt_start_delay int, + required_input_pattern jsonb, -- JSON pattern for @> containment check (if) + forbidden_input_pattern jsonb, -- JSON pattern for NOT @> containment check (ifNot) + when_unmet text not null default 'skip', -- What to do when condition not met (skip is natural default) + when_failed text not null default 'fail', -- What to do when handler fails after retries created_at timestamptz not null default now(), primary key (flow_slug, step_slug), unique (flow_slug, step_index), -- Ensure step_index is unique within a flow @@ -32,7 +36,9 @@ create table pgflow.steps ( constraint opt_max_attempts_is_nonnegative check (opt_max_attempts is null or opt_max_attempts >= 0), constraint opt_base_delay_is_nonnegative check (opt_base_delay is null or opt_base_delay >= 0), constraint opt_timeout_is_positive check (opt_timeout is null or opt_timeout > 0), - constraint opt_start_delay_is_nonnegative check (opt_start_delay is null or opt_start_delay >= 0) + constraint opt_start_delay_is_nonnegative check (opt_start_delay is null or opt_start_delay >= 0), + constraint when_unmet_is_valid check (when_unmet in ('fail', 'skip', 'skip-cascade')), + constraint when_failed_is_valid check (when_failed in ('fail', 'skip', 'skip-cascade')) ); -- Dependencies table - stores relationships between steps diff --git a/pkgs/core/schemas/0060_tables_runtime.sql b/pkgs/core/schemas/0060_tables_runtime.sql index 5d610e3fa..9892828fe 100644 --- a/pkgs/core/schemas/0060_tables_runtime.sql +++ b/pkgs/core/schemas/0060_tables_runtime.sql @@ -31,18 +31,20 @@ create table pgflow.step_states ( remaining_deps int not null default 0 check (remaining_deps >= 0), output jsonb, -- Step output: stored atomically with status=completed transition error_message text, + skip_reason text, -- Why step was skipped: condition_unmet, handler_failed, dependency_skipped created_at timestamptz not null default now(), started_at timestamptz, completed_at timestamptz, failed_at timestamptz, + skipped_at timestamptz, primary key (run_id, step_slug), foreign key (flow_slug, step_slug) references pgflow.steps (flow_slug, step_slug), - constraint status_is_valid check (status in ('created', 'started', 'completed', 'failed')), + constraint status_is_valid check (status in ('created', 'started', 'completed', 'failed', 'skipped')), constraint status_and_remaining_tasks_match check (status != 'completed' or remaining_tasks = 0), -- Add constraint to ensure remaining_tasks is only set when step has started constraint remaining_tasks_state_consistency check ( - remaining_tasks is null or status != 'created' + remaining_tasks is null or status not in ('created', 'skipped') ), constraint initial_tasks_known_when_started check ( status != 'started' or initial_tasks is not null @@ -52,16 +54,29 @@ create table pgflow.step_states ( constraint output_only_for_completed_or_null check ( output is null or status = 'completed' ), - constraint completed_at_or_failed_at check (not (completed_at is not null and failed_at is not null)), + -- skip_reason is required for skipped status and forbidden for other statuses + constraint skip_reason_matches_status check ( + (status = 'skipped' and skip_reason is not null) or + (status != 'skipped' and skip_reason is null) + ), + constraint completed_at_or_failed_at_or_skipped_at check ( + ( + case when completed_at is not null then 1 else 0 end + + case when failed_at is not null then 1 else 0 end + + case when skipped_at is not null then 1 else 0 end + ) <= 1 + ), constraint started_at_is_after_created_at check (started_at is null or started_at >= created_at), constraint completed_at_is_after_started_at check (completed_at is null or completed_at >= started_at), - constraint failed_at_is_after_started_at check (failed_at is null or failed_at >= started_at) + constraint failed_at_is_after_started_at check (failed_at is null or failed_at >= started_at), + constraint skipped_at_is_after_created_at check (skipped_at is null or skipped_at >= created_at) ); create index if not exists idx_step_states_ready on pgflow.step_states (run_id, status, remaining_deps) where status = 'created' and remaining_deps = 0; create index if not exists idx_step_states_failed on pgflow.step_states (run_id, step_slug) where status = 'failed'; +create index if not exists idx_step_states_skipped on pgflow.step_states (run_id, step_slug) where status = 'skipped'; create index if not exists idx_step_states_flow_slug on pgflow.step_states (flow_slug); create index if not exists idx_step_states_run_id on pgflow.step_states (run_id); diff --git a/pkgs/core/schemas/0100_function__cascade_force_skip_steps.sql b/pkgs/core/schemas/0100_function__cascade_force_skip_steps.sql new file mode 100644 index 000000000..18d481d4e --- /dev/null +++ b/pkgs/core/schemas/0100_function__cascade_force_skip_steps.sql @@ -0,0 +1,105 @@ +-- _cascade_force_skip_steps: Skip a step and cascade to all downstream dependents +-- Used when a condition is unmet (whenUnmet: skip-cascade) or handler fails (whenFailed: skip-cascade) +create or replace function pgflow._cascade_force_skip_steps( + run_id uuid, + step_slug text, + skip_reason text +) +returns int +language plpgsql +as $$ +DECLARE + v_flow_slug text; + v_total_skipped int := 0; +BEGIN + -- Get flow_slug for this run + SELECT r.flow_slug INTO v_flow_slug + FROM pgflow.runs r + WHERE r.run_id = _cascade_force_skip_steps.run_id; + + IF v_flow_slug IS NULL THEN + RAISE EXCEPTION 'Run not found: %', _cascade_force_skip_steps.run_id; + END IF; + + -- ========================================== + -- SKIP STEPS IN TOPOLOGICAL ORDER + -- ========================================== + -- Use recursive CTE to find all downstream dependents, + -- then skip them in topological order (by step_index) + WITH RECURSIVE + -- ---------- Find all downstream steps ---------- + downstream_steps AS ( + -- Base case: the trigger step + SELECT + s.flow_slug, + s.step_slug, + s.step_index, + _cascade_force_skip_steps.skip_reason AS reason -- Original reason for trigger step + FROM pgflow.steps s + WHERE s.flow_slug = v_flow_slug + AND s.step_slug = _cascade_force_skip_steps.step_slug + + UNION ALL + + -- Recursive case: steps that depend on already-found steps + SELECT + s.flow_slug, + s.step_slug, + s.step_index, + 'dependency_skipped'::text AS reason -- Downstream steps get this reason + FROM pgflow.steps s + JOIN pgflow.deps d ON d.flow_slug = s.flow_slug AND d.step_slug = s.step_slug + JOIN downstream_steps ds ON ds.flow_slug = d.flow_slug AND ds.step_slug = d.dep_slug + ), + -- ---------- Deduplicate and order by step_index ---------- + steps_to_skip AS ( + SELECT DISTINCT ON (ds.step_slug) + ds.flow_slug, + ds.step_slug, + ds.step_index, + ds.reason + FROM downstream_steps ds + ORDER BY ds.step_slug, ds.step_index -- Keep first occurrence (trigger step has original reason) + ), + -- ---------- Skip the steps ---------- + skipped AS ( + UPDATE pgflow.step_states ss + SET status = 'skipped', + skip_reason = sts.reason, + skipped_at = now(), + remaining_tasks = NULL -- Clear remaining_tasks for skipped steps + FROM steps_to_skip sts + WHERE ss.run_id = _cascade_force_skip_steps.run_id + AND ss.step_slug = sts.step_slug + AND ss.status IN ('created', 'started') -- Only skip non-terminal steps + RETURNING + ss.*, + -- Broadcast step:skipped event + realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', ss.run_id, + 'flow_slug', ss.flow_slug, + 'step_slug', ss.step_slug, + 'status', 'skipped', + 'skip_reason', ss.skip_reason, + 'skipped_at', ss.skipped_at + ), + concat('step:', ss.step_slug, ':skipped'), + concat('pgflow:run:', ss.run_id), + false + ) as _broadcast_result + ), + -- ---------- Update run counters ---------- + run_updates AS ( + UPDATE pgflow.runs r + SET remaining_steps = r.remaining_steps - skipped_count.count + FROM (SELECT COUNT(*) AS count FROM skipped) skipped_count + WHERE r.run_id = _cascade_force_skip_steps.run_id + AND skipped_count.count > 0 + ) + SELECT COUNT(*) INTO v_total_skipped FROM skipped; + + RETURN v_total_skipped; +END; +$$; diff --git a/pkgs/core/schemas/0100_function_add_step.sql b/pkgs/core/schemas/0100_function_add_step.sql index 3fb8fbc54..0ed71bf69 100644 --- a/pkgs/core/schemas/0100_function_add_step.sql +++ b/pkgs/core/schemas/0100_function_add_step.sql @@ -6,7 +6,11 @@ create or replace function pgflow.add_step( base_delay int default null, timeout int default null, start_delay int default null, - step_type text default 'single' + step_type text default 'single', + required_input_pattern jsonb default null, + forbidden_input_pattern jsonb default null, + when_unmet text default 'skip', + when_failed text default 'fail' ) returns pgflow.steps language plpgsql @@ -22,7 +26,7 @@ BEGIN -- 0 dependencies (root map - maps over flow input array) -- 1 dependency (dependent map - maps over dependency output array) IF COALESCE(add_step.step_type, 'single') = 'map' AND COALESCE(array_length(add_step.deps_slugs, 1), 0) > 1 THEN - RAISE EXCEPTION 'Map step "%" can have at most one dependency, but % were provided: %', + RAISE EXCEPTION 'Map step "%" can have at most one dependency, but % were provided: %', add_step.step_slug, COALESCE(array_length(add_step.deps_slugs, 1), 0), array_to_string(add_step.deps_slugs, ', '); @@ -36,18 +40,23 @@ BEGIN -- Create the step INSERT INTO pgflow.steps ( flow_slug, step_slug, step_type, step_index, deps_count, - opt_max_attempts, opt_base_delay, opt_timeout, opt_start_delay + opt_max_attempts, opt_base_delay, opt_timeout, opt_start_delay, + required_input_pattern, forbidden_input_pattern, when_unmet, when_failed ) VALUES ( add_step.flow_slug, add_step.step_slug, COALESCE(add_step.step_type, 'single'), - next_idx, + next_idx, COALESCE(array_length(add_step.deps_slugs, 1), 0), add_step.max_attempts, add_step.base_delay, add_step.timeout, - add_step.start_delay + add_step.start_delay, + add_step.required_input_pattern, + add_step.forbidden_input_pattern, + add_step.when_unmet, + add_step.when_failed ) ON CONFLICT ON CONSTRAINT steps_pkey DO UPDATE SET step_slug = EXCLUDED.step_slug @@ -59,7 +68,7 @@ BEGIN FROM unnest(COALESCE(add_step.deps_slugs, '{}')) AS d(dep_slug) WHERE add_step.deps_slugs IS NOT NULL AND array_length(add_step.deps_slugs, 1) > 0 ON CONFLICT ON CONSTRAINT deps_pkey DO NOTHING; - + RETURN result_step; END; $$; diff --git a/pkgs/core/schemas/0100_function_cascade_resolve_conditions.sql b/pkgs/core/schemas/0100_function_cascade_resolve_conditions.sql new file mode 100644 index 000000000..927e4564b --- /dev/null +++ b/pkgs/core/schemas/0100_function_cascade_resolve_conditions.sql @@ -0,0 +1,274 @@ +-- cascade_resolve_conditions: Evaluate step conditions and handle skip/fail modes +-- Called before cascade_complete_taskless_steps to evaluate conditions on ready steps. +-- Must iterate until convergence since skipping a step can make dependents ready. +-- +-- Returns: +-- true = run can continue (or nothing to do) +-- false = run was failed (due to fail mode) +create or replace function pgflow.cascade_resolve_conditions(run_id uuid) +returns boolean +language plpgsql +set search_path to '' +as $$ +DECLARE + v_run_input jsonb; + v_run_status text; + v_first_fail record; + v_iteration_count int := 0; + v_max_iterations int := 50; + v_processed_count int; +BEGIN + -- ========================================== + -- GUARD: Early return if run is already terminal + -- ========================================== + SELECT r.status, r.input INTO v_run_status, v_run_input + FROM pgflow.runs r + WHERE r.run_id = cascade_resolve_conditions.run_id; + + IF v_run_status IN ('failed', 'completed') THEN + RETURN v_run_status != 'failed'; + END IF; + + -- ========================================== + -- ITERATE UNTIL CONVERGENCE + -- ========================================== + -- After skipping steps, dependents may become ready and need evaluation. + -- Loop until no more steps are processed. + LOOP + v_iteration_count := v_iteration_count + 1; + IF v_iteration_count > v_max_iterations THEN + RAISE EXCEPTION 'cascade_resolve_conditions exceeded safety limit of % iterations', v_max_iterations; + END IF; + + v_processed_count := 0; + + -- ========================================== + -- PHASE 1a: CHECK FOR FAIL CONDITIONS + -- ========================================== + -- Find first step (by topological order) with unmet condition and 'fail' mode. + -- Condition is unmet when: + -- (required_input_pattern is set AND input does NOT contain it) OR + -- (forbidden_input_pattern is set AND input DOES contain it) + WITH steps_with_conditions AS ( + SELECT + step_state.flow_slug, + step_state.step_slug, + step.required_input_pattern, + step.forbidden_input_pattern, + step.when_unmet, + step.deps_count, + step.step_index + FROM pgflow.step_states AS step_state + JOIN pgflow.steps AS step + ON step.flow_slug = step_state.flow_slug + AND step.step_slug = step_state.step_slug + WHERE step_state.run_id = cascade_resolve_conditions.run_id + AND step_state.status = 'created' + AND step_state.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + ), + step_deps_output AS ( + SELECT + swc.step_slug, + jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM steps_with_conditions swc + JOIN pgflow.deps dep ON dep.flow_slug = swc.flow_slug AND dep.step_slug = swc.step_slug + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE swc.deps_count > 0 + GROUP BY swc.step_slug + ), + condition_evaluations AS ( + SELECT + swc.*, + -- condition_met = (if IS NULL OR input @> if) AND (ifNot IS NULL OR NOT(input @> ifNot)) + (swc.required_input_pattern IS NULL OR + CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.required_input_pattern) + AND + (swc.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.forbidden_input_pattern)) + AS condition_met + FROM steps_with_conditions swc + LEFT JOIN step_deps_output sdo ON sdo.step_slug = swc.step_slug + ) + SELECT flow_slug, step_slug, required_input_pattern, forbidden_input_pattern + INTO v_first_fail + FROM condition_evaluations + WHERE NOT condition_met AND when_unmet = 'fail' + ORDER BY step_index + LIMIT 1; + + -- Handle fail mode: fail step and run, return false + -- Note: Cannot use "v_first_fail IS NOT NULL" because records with NULL fields + -- evaluate to NULL in IS NOT NULL checks. Use FOUND instead. + IF FOUND THEN + UPDATE pgflow.step_states + SET status = 'failed', + failed_at = now(), + error_message = 'Condition not met' + WHERE pgflow.step_states.run_id = cascade_resolve_conditions.run_id + AND pgflow.step_states.step_slug = v_first_fail.step_slug; + + UPDATE pgflow.runs + SET status = 'failed', + failed_at = now() + WHERE pgflow.runs.run_id = cascade_resolve_conditions.run_id; + + RETURN false; + END IF; + + -- ========================================== + -- PHASE 1b: HANDLE SKIP CONDITIONS (with propagation) + -- ========================================== + -- Skip steps with unmet conditions and whenUnmet='skip'. + -- Also decrement remaining_deps on dependents and set initial_tasks=0 for map dependents. + WITH steps_with_conditions AS ( + SELECT + step_state.flow_slug, + step_state.step_slug, + step.required_input_pattern, + step.forbidden_input_pattern, + step.when_unmet, + step.deps_count, + step.step_index + FROM pgflow.step_states AS step_state + JOIN pgflow.steps AS step + ON step.flow_slug = step_state.flow_slug + AND step.step_slug = step_state.step_slug + WHERE step_state.run_id = cascade_resolve_conditions.run_id + AND step_state.status = 'created' + AND step_state.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + ), + step_deps_output AS ( + SELECT + swc.step_slug, + jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM steps_with_conditions swc + JOIN pgflow.deps dep ON dep.flow_slug = swc.flow_slug AND dep.step_slug = swc.step_slug + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE swc.deps_count > 0 + GROUP BY swc.step_slug + ), + condition_evaluations AS ( + SELECT + swc.*, + -- condition_met = (if IS NULL OR input @> if) AND (ifNot IS NULL OR NOT(input @> ifNot)) + (swc.required_input_pattern IS NULL OR + CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.required_input_pattern) + AND + (swc.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.forbidden_input_pattern)) + AS condition_met + FROM steps_with_conditions swc + LEFT JOIN step_deps_output sdo ON sdo.step_slug = swc.step_slug + ), + unmet_skip_steps AS ( + SELECT * FROM condition_evaluations + WHERE NOT condition_met AND when_unmet = 'skip' + ), + skipped_steps AS ( + UPDATE pgflow.step_states ss + SET status = 'skipped', + skip_reason = 'condition_unmet', + skipped_at = now() + FROM unmet_skip_steps uss + WHERE ss.run_id = cascade_resolve_conditions.run_id + AND ss.step_slug = uss.step_slug + RETURNING + ss.*, + realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', ss.run_id, + 'flow_slug', ss.flow_slug, + 'step_slug', ss.step_slug, + 'status', 'skipped', + 'skip_reason', 'condition_unmet', + 'skipped_at', ss.skipped_at + ), + concat('step:', ss.step_slug, ':skipped'), + concat('pgflow:run:', ss.run_id), + false + ) AS _broadcast_result + ), + -- NEW: Update dependent steps (decrement remaining_deps, set initial_tasks=0 for maps) + dependent_updates AS ( + UPDATE pgflow.step_states child_state + SET remaining_deps = child_state.remaining_deps - 1, + -- If child is a map step and this skipped step is its only dependency, + -- set initial_tasks = 0 (skipped dep = empty array) + initial_tasks = CASE + WHEN child_step.step_type = 'map' AND child_step.deps_count = 1 THEN 0 + ELSE child_state.initial_tasks + END + FROM skipped_steps parent + JOIN pgflow.deps dep ON dep.flow_slug = parent.flow_slug AND dep.dep_slug = parent.step_slug + JOIN pgflow.steps child_step ON child_step.flow_slug = dep.flow_slug AND child_step.step_slug = dep.step_slug + WHERE child_state.run_id = cascade_resolve_conditions.run_id + AND child_state.step_slug = dep.step_slug + ), + run_update AS ( + UPDATE pgflow.runs r + SET remaining_steps = r.remaining_steps - (SELECT COUNT(*) FROM skipped_steps) + WHERE r.run_id = cascade_resolve_conditions.run_id + AND (SELECT COUNT(*) FROM skipped_steps) > 0 + ) + SELECT COUNT(*)::int INTO v_processed_count FROM skipped_steps; + + -- ========================================== + -- PHASE 1c: HANDLE SKIP-CASCADE CONDITIONS + -- ========================================== + -- Call _cascade_force_skip_steps for each step with unmet condition and whenUnmet='skip-cascade'. + -- Process in topological order; _cascade_force_skip_steps is idempotent. + PERFORM pgflow._cascade_force_skip_steps(cascade_resolve_conditions.run_id, ready_step.step_slug, 'condition_unmet') + FROM pgflow.step_states AS ready_step + JOIN pgflow.steps AS step + ON step.flow_slug = ready_step.flow_slug + AND step.step_slug = ready_step.step_slug + LEFT JOIN LATERAL ( + SELECT jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM pgflow.deps dep + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE dep.flow_slug = ready_step.flow_slug + AND dep.step_slug = ready_step.step_slug + ) AS agg_deps ON step.deps_count > 0 + WHERE ready_step.run_id = cascade_resolve_conditions.run_id + AND ready_step.status = 'created' + AND ready_step.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + AND step.when_unmet = 'skip-cascade' + -- Condition is NOT met when: (if fails) OR (ifNot fails) + AND NOT ( + (step.required_input_pattern IS NULL OR + CASE WHEN step.deps_count = 0 THEN v_run_input ELSE COALESCE(agg_deps.deps_output, '{}'::jsonb) END @> step.required_input_pattern) + AND + (step.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN step.deps_count = 0 THEN v_run_input ELSE COALESCE(agg_deps.deps_output, '{}'::jsonb) END @> step.forbidden_input_pattern)) + ) + ORDER BY step.step_index; + + -- Check if run was failed during cascade (e.g., if _cascade_force_skip_steps triggers fail) + SELECT r.status INTO v_run_status + FROM pgflow.runs r + WHERE r.run_id = cascade_resolve_conditions.run_id; + + IF v_run_status IN ('failed', 'completed') THEN + RETURN v_run_status != 'failed'; + END IF; + + -- Exit loop if no steps were processed in this iteration + EXIT WHEN v_processed_count = 0; + END LOOP; + + RETURN true; +END; +$$; diff --git a/pkgs/core/schemas/0100_function_compare_flow_shapes.sql b/pkgs/core/schemas/0100_function_compare_flow_shapes.sql index 193d8c564..b91839faf 100644 --- a/pkgs/core/schemas/0100_function_compare_flow_shapes.sql +++ b/pkgs/core/schemas/0100_function_compare_flow_shapes.sql @@ -107,6 +107,60 @@ BEGIN ) ); END IF; + + -- Compare whenUnmet (structural - affects DAG execution semantics) + IF v_local_step->>'whenUnmet' != v_db_step->>'whenUnmet' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: whenUnmet differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'whenUnmet', + v_db_step->>'whenUnmet' + ) + ); + END IF; + + -- Compare whenFailed (structural - affects DAG execution semantics) + IF v_local_step->>'whenFailed' != v_db_step->>'whenFailed' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: whenFailed differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'whenFailed', + v_db_step->>'whenFailed' + ) + ); + END IF; + + -- Compare requiredInputPattern (structural - affects DAG execution semantics) + -- Uses -> (jsonb) not ->> (text) to properly compare wrapper objects + IF v_local_step->'requiredInputPattern' IS DISTINCT FROM v_db_step->'requiredInputPattern' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: requiredInputPattern differs '%s' vs '%s'$$, + v_idx, + v_local_step->'requiredInputPattern', + v_db_step->'requiredInputPattern' + ) + ); + END IF; + + -- Compare forbiddenInputPattern (structural - affects DAG execution semantics) + -- Uses -> (jsonb) not ->> (text) to properly compare wrapper objects + IF v_local_step->'forbiddenInputPattern' IS DISTINCT FROM v_db_step->'forbiddenInputPattern' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: forbiddenInputPattern differs '%s' vs '%s'$$, + v_idx, + v_local_step->'forbiddenInputPattern', + v_db_step->'forbiddenInputPattern' + ) + ); + END IF; END IF; END LOOP; diff --git a/pkgs/core/schemas/0100_function_complete_task.sql b/pkgs/core/schemas/0100_function_complete_task.sql index 9daa172dc..521e8a3af 100644 --- a/pkgs/core/schemas/0100_function_complete_task.sql +++ b/pkgs/core/schemas/0100_function_complete_task.sql @@ -312,6 +312,26 @@ IF v_step_state.status = 'completed' THEN false ); + -- THEN evaluate conditions on newly-ready dependent steps + -- This must happen before cascade_complete_taskless_steps so that + -- skipped steps can set initial_tasks=0 for their map dependents + IF NOT pgflow.cascade_resolve_conditions(complete_task.run_id) THEN + -- Run was failed due to a condition with when_unmet='fail' + -- Archive the current task's message before returning + PERFORM pgmq.archive( + (SELECT r.flow_slug FROM pgflow.runs r WHERE r.run_id = complete_task.run_id), + (SELECT st.message_id FROM pgflow.step_tasks st + WHERE st.run_id = complete_task.run_id + AND st.step_slug = complete_task.step_slug + AND st.task_index = complete_task.task_index) + ); + RETURN QUERY SELECT * FROM pgflow.step_tasks + WHERE pgflow.step_tasks.run_id = complete_task.run_id + AND pgflow.step_tasks.step_slug = complete_task.step_slug + AND pgflow.step_tasks.task_index = complete_task.task_index; + RETURN; + END IF; + -- THEN cascade complete any taskless steps that are now ready -- This ensures dependent children broadcast AFTER their parent PERFORM pgflow.cascade_complete_taskless_steps(complete_task.run_id); diff --git a/pkgs/core/schemas/0100_function_create_flow_from_shape.sql b/pkgs/core/schemas/0100_function_create_flow_from_shape.sql index daf2dc548..51b055482 100644 --- a/pkgs/core/schemas/0100_function_create_flow_from_shape.sql +++ b/pkgs/core/schemas/0100_function_create_flow_from_shape.sql @@ -47,7 +47,19 @@ BEGIN base_delay => (v_step_options->>'baseDelay')::int, timeout => (v_step_options->>'timeout')::int, start_delay => (v_step_options->>'startDelay')::int, - step_type => v_step->>'stepType' + step_type => v_step->>'stepType', + when_unmet => v_step->>'whenUnmet', + when_failed => v_step->>'whenFailed', + required_input_pattern => CASE + WHEN (v_step->'requiredInputPattern'->>'defined')::boolean + THEN v_step->'requiredInputPattern'->'value' + ELSE NULL + END, + forbidden_input_pattern => CASE + WHEN (v_step->'forbiddenInputPattern'->>'defined')::boolean + THEN v_step->'forbiddenInputPattern'->'value' + ELSE NULL + END ); END LOOP; END; diff --git a/pkgs/core/schemas/0100_function_fail_task.sql b/pkgs/core/schemas/0100_function_fail_task.sql index cab0cd846..b8840d28b 100644 --- a/pkgs/core/schemas/0100_function_fail_task.sql +++ b/pkgs/core/schemas/0100_function_fail_task.sql @@ -12,6 +12,10 @@ as $$ DECLARE v_run_failed boolean; v_step_failed boolean; + v_step_skipped boolean; + v_when_failed text; + v_task_exhausted boolean; -- True if task has exhausted retries + v_flow_slug_for_deps text; -- Used for decrementing remaining_deps on plain skip begin -- If run is already failed, no retries allowed @@ -62,7 +66,8 @@ flow_info AS ( config AS ( SELECT COALESCE(s.opt_max_attempts, f.opt_max_attempts) AS opt_max_attempts, - COALESCE(s.opt_base_delay, f.opt_base_delay) AS opt_base_delay + COALESCE(s.opt_base_delay, f.opt_base_delay) AS opt_base_delay, + s.when_failed FROM pgflow.steps s JOIN pgflow.flows f ON f.flow_slug = s.flow_slug JOIN flow_info fi ON fi.flow_slug = s.flow_slug @@ -90,27 +95,53 @@ fail_or_retry_task as ( AND task.status = 'started' RETURNING * ), +-- Determine if task exhausted retries and get when_failed mode +task_status AS ( + SELECT + (select status from fail_or_retry_task) AS new_task_status, + (select when_failed from config) AS when_failed_mode, + -- Task is exhausted when it's failed (no more retries) + ((select status from fail_or_retry_task) = 'failed') AS is_exhausted +), maybe_fail_step AS ( UPDATE pgflow.step_states SET + -- Status logic: + -- - If task not exhausted (retrying): keep current status + -- - If exhausted AND when_failed='fail': set to 'failed' + -- - If exhausted AND when_failed IN ('skip', 'skip-cascade'): set to 'skipped' status = CASE - WHEN (select fail_or_retry_task.status from fail_or_retry_task) = 'failed' THEN 'failed' - ELSE pgflow.step_states.status + WHEN NOT (select is_exhausted from task_status) THEN pgflow.step_states.status + WHEN (select when_failed_mode from task_status) = 'fail' THEN 'failed' + ELSE 'skipped' -- skip or skip-cascade END, failed_at = CASE - WHEN (select fail_or_retry_task.status from fail_or_retry_task) = 'failed' THEN now() + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) = 'fail' THEN now() ELSE NULL END, error_message = CASE - WHEN (select fail_or_retry_task.status from fail_or_retry_task) = 'failed' THEN fail_task.error_message + WHEN (select is_exhausted from task_status) THEN fail_task.error_message ELSE NULL - END + END, + skip_reason = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN 'handler_failed' + ELSE pgflow.step_states.skip_reason + END, + skipped_at = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN now() + ELSE pgflow.step_states.skipped_at + END, + -- Clear remaining_tasks when skipping (required by remaining_tasks_state_consistency constraint) + remaining_tasks = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN NULL + ELSE pgflow.step_states.remaining_tasks + END FROM fail_or_retry_task WHERE pgflow.step_states.run_id = fail_task.run_id AND pgflow.step_states.step_slug = fail_task.step_slug RETURNING pgflow.step_states.* ) --- Update run status +-- Update run status: only fail when when_failed='fail' and step was failed UPDATE pgflow.runs SET status = CASE WHEN (select status from maybe_fail_step) = 'failed' THEN 'failed' @@ -119,10 +150,27 @@ SET status = CASE failed_at = CASE WHEN (select status from maybe_fail_step) = 'failed' THEN now() ELSE NULL - END + END, + -- Decrement remaining_steps when step was skipped (not failed, run continues) + remaining_steps = CASE + WHEN (select status from maybe_fail_step) = 'skipped' THEN pgflow.runs.remaining_steps - 1 + ELSE pgflow.runs.remaining_steps + END WHERE pgflow.runs.run_id = fail_task.run_id RETURNING (status = 'failed') INTO v_run_failed; +-- Capture when_failed mode and check if step was skipped for later processing +SELECT s.when_failed INTO v_when_failed +FROM pgflow.steps s +JOIN pgflow.runs r ON r.flow_slug = s.flow_slug +WHERE r.run_id = fail_task.run_id + AND s.step_slug = fail_task.step_slug; + +SELECT (status = 'skipped') INTO v_step_skipped +FROM pgflow.step_states +WHERE pgflow.step_states.run_id = fail_task.run_id + AND pgflow.step_states.step_slug = fail_task.step_slug; + -- Check if step failed by querying the step_states table SELECT (status = 'failed') INTO v_step_failed FROM pgflow.step_states @@ -146,6 +194,60 @@ IF v_step_failed THEN ); END IF; +-- Handle step skipping (when_failed = 'skip' or 'skip-cascade') +IF v_step_skipped THEN + -- Send broadcast event for step skipped + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', fail_task.run_id, + 'step_slug', fail_task.step_slug, + 'status', 'skipped', + 'skip_reason', 'handler_failed', + 'error_message', fail_task.error_message, + 'skipped_at', now() + ), + concat('step:', fail_task.step_slug, ':skipped'), + concat('pgflow:run:', fail_task.run_id), + false + ); + + -- For skip-cascade: cascade skip to all downstream dependents + IF v_when_failed = 'skip-cascade' THEN + PERFORM pgflow._cascade_force_skip_steps(fail_task.run_id, fail_task.step_slug, 'handler_failed'); + ELSE + -- For plain 'skip': decrement remaining_deps on dependent steps + -- (This mirrors the pattern in cascade_resolve_conditions.sql for when_unmet='skip') + SELECT flow_slug INTO v_flow_slug_for_deps + FROM pgflow.runs + WHERE pgflow.runs.run_id = fail_task.run_id; + + UPDATE pgflow.step_states AS child_state + SET remaining_deps = child_state.remaining_deps - 1, + -- If child is a map step and this skipped step is its only dependency, + -- set initial_tasks = 0 (skipped dep = empty array) + initial_tasks = CASE + WHEN child_step.step_type = 'map' AND child_step.deps_count = 1 THEN 0 + ELSE child_state.initial_tasks + END + FROM pgflow.deps AS dep + JOIN pgflow.steps AS child_step ON child_step.flow_slug = dep.flow_slug AND child_step.step_slug = dep.step_slug + WHERE child_state.run_id = fail_task.run_id + AND dep.flow_slug = v_flow_slug_for_deps + AND dep.dep_slug = fail_task.step_slug + AND child_state.step_slug = dep.step_slug; + + -- Start any steps that became ready after decrementing remaining_deps + PERFORM pgflow.start_ready_steps(fail_task.run_id); + + -- Auto-complete taskless steps (e.g., map steps with initial_tasks=0 from skipped dep) + PERFORM pgflow.cascade_complete_taskless_steps(fail_task.run_id); + END IF; + + -- Try to complete the run (remaining_steps may now be 0) + PERFORM pgflow.maybe_complete_run(fail_task.run_id); +END IF; + -- Send broadcast event for run failure if the run was failed IF v_run_failed THEN DECLARE diff --git a/pkgs/core/schemas/0100_function_get_flow_shape.sql b/pkgs/core/schemas/0100_function_get_flow_shape.sql index a725c820c..985f0d4d1 100644 --- a/pkgs/core/schemas/0100_function_get_flow_shape.sql +++ b/pkgs/core/schemas/0100_function_get_flow_shape.sql @@ -22,7 +22,19 @@ as $$ AND dep.step_slug = step.step_slug ), '[]'::jsonb - ) + ), + 'whenUnmet', step.when_unmet, + 'whenFailed', step.when_failed, + 'requiredInputPattern', CASE + WHEN step.required_input_pattern IS NULL + THEN '{"defined": false}'::jsonb + ELSE jsonb_build_object('defined', true, 'value', step.required_input_pattern) + END, + 'forbiddenInputPattern', CASE + WHEN step.forbidden_input_pattern IS NULL + THEN '{"defined": false}'::jsonb + ELSE jsonb_build_object('defined', true, 'value', step.forbidden_input_pattern) + END ) ORDER BY step.step_index ), diff --git a/pkgs/core/schemas/0100_function_start_flow.sql b/pkgs/core/schemas/0100_function_start_flow.sql index f0a2bfed3..abae94696 100644 --- a/pkgs/core/schemas/0100_function_start_flow.sql +++ b/pkgs/core/schemas/0100_function_start_flow.sql @@ -110,6 +110,14 @@ PERFORM realtime.send( false ); +-- ---------- Evaluate conditions on ready steps ---------- +-- Skip steps with unmet conditions, propagate to dependents +IF NOT pgflow.cascade_resolve_conditions(v_created_run.run_id) THEN + -- Run was failed due to a condition with when_unmet='fail' + RETURN QUERY SELECT * FROM pgflow.runs where pgflow.runs.run_id = v_created_run.run_id; + RETURN; +END IF; + -- ---------- Complete taskless steps ---------- -- Handle empty array maps that should auto-complete PERFORM pgflow.cascade_complete_taskless_steps(v_created_run.run_id); diff --git a/pkgs/core/schemas/0100_function_start_ready_steps.sql b/pkgs/core/schemas/0100_function_start_ready_steps.sql index 5e82a3659..a70ca9f26 100644 --- a/pkgs/core/schemas/0100_function_start_ready_steps.sql +++ b/pkgs/core/schemas/0100_function_start_ready_steps.sql @@ -3,69 +3,25 @@ returns void language plpgsql set search_path to '' as $$ -begin +BEGIN -- ========================================== --- GUARD: No mutations on failed runs +-- GUARD: No mutations on terminal runs -- ========================================== -IF EXISTS (SELECT 1 FROM pgflow.runs WHERE pgflow.runs.run_id = start_ready_steps.run_id AND pgflow.runs.status = 'failed') THEN +IF EXISTS ( + SELECT 1 FROM pgflow.runs + WHERE pgflow.runs.run_id = start_ready_steps.run_id + AND pgflow.runs.status IN ('failed', 'completed') +) THEN RETURN; END IF; -- ========================================== --- HANDLE EMPTY ARRAY MAPS (initial_tasks = 0) --- ========================================== --- These complete immediately without spawning tasks -WITH empty_map_steps AS ( - SELECT step_state.* - FROM pgflow.step_states AS step_state - JOIN pgflow.steps AS step - ON step.flow_slug = step_state.flow_slug - AND step.step_slug = step_state.step_slug - WHERE step_state.run_id = start_ready_steps.run_id - AND step_state.status = 'created' - AND step_state.remaining_deps = 0 - AND step.step_type = 'map' - AND step_state.initial_tasks = 0 - ORDER BY step_state.step_slug - FOR UPDATE OF step_state -), --- ---------- Complete empty map steps ---------- -completed_empty_steps AS ( - UPDATE pgflow.step_states - SET status = 'completed', - started_at = now(), - completed_at = now(), - remaining_tasks = 0, - output = '[]'::jsonb -- Empty map produces empty array output - FROM empty_map_steps - WHERE pgflow.step_states.run_id = start_ready_steps.run_id - AND pgflow.step_states.step_slug = empty_map_steps.step_slug - RETURNING - pgflow.step_states.*, - -- Broadcast step:completed event atomically with the UPDATE - -- Using RETURNING ensures this executes during row processing - -- and cannot be optimized away by the query planner - realtime.send( - jsonb_build_object( - 'event_type', 'step:completed', - 'run_id', pgflow.step_states.run_id, - 'step_slug', pgflow.step_states.step_slug, - 'status', 'completed', - 'started_at', pgflow.step_states.started_at, - 'completed_at', pgflow.step_states.completed_at, - 'remaining_tasks', 0, - 'remaining_deps', 0, - 'output', pgflow.step_states.output -- Use stored output instead of hardcoded [] - ), - concat('step:', pgflow.step_states.step_slug, ':completed'), - concat('pgflow:run:', pgflow.step_states.run_id), - false - ) as _broadcast_completed -- Prefix with _ to indicate internal use only -), - --- ========================================== --- HANDLE NORMAL STEPS (initial_tasks > 0) +-- PHASE 1: START READY STEPS -- ========================================== +-- NOTE: Condition evaluation and empty map handling are done by +-- cascade_resolve_conditions() and cascade_complete_taskless_steps() +-- which are called before this function. +WITH -- ---------- Find ready steps ---------- -- Steps with no remaining deps and known task count ready_steps AS ( @@ -74,14 +30,8 @@ ready_steps AS ( WHERE step_state.run_id = start_ready_steps.run_id AND step_state.status = 'created' AND step_state.remaining_deps = 0 - AND step_state.initial_tasks IS NOT NULL -- NEW: Cannot start with unknown count - AND step_state.initial_tasks > 0 -- Don't start taskless steps - -- Exclude empty map steps already handled - AND NOT EXISTS ( - SELECT 1 FROM empty_map_steps - WHERE empty_map_steps.run_id = step_state.run_id - AND empty_map_steps.step_slug = step_state.step_slug - ) + AND step_state.initial_tasks IS NOT NULL -- Cannot start with unknown count + AND step_state.initial_tasks > 0 -- Don't start taskless steps (handled by cascade_complete_taskless_steps) ORDER BY step_state.step_slug FOR UPDATE ), @@ -115,7 +65,7 @@ started_step_states AS ( ), -- ========================================== --- TASK GENERATION AND QUEUE MESSAGES +-- PHASE 2: TASK GENERATION AND QUEUE MESSAGES -- ========================================== -- ---------- Generate tasks and batch messages ---------- -- Single steps: 1 task (index 0) @@ -136,8 +86,8 @@ message_batches AS ( ) AS messages, array_agg(task_idx.task_index ORDER BY task_idx.task_index) AS task_indices FROM started_step_states AS started_step - JOIN pgflow.steps AS step - ON step.flow_slug = started_step.flow_slug + JOIN pgflow.steps AS step + ON step.flow_slug = started_step.flow_slug AND step.step_slug = started_step.step_slug -- Generate task indices from 0 to initial_tasks-1 CROSS JOIN LATERAL generate_series(0, started_step.initial_tasks - 1) AS task_idx(task_index) @@ -159,7 +109,7 @@ sent_messages AS ( ) -- ========================================== --- RECORD TASKS IN DATABASE +-- PHASE 3: RECORD TASKS IN DATABASE -- ========================================== INSERT INTO pgflow.step_tasks (flow_slug, run_id, step_slug, task_index, message_id) SELECT @@ -170,13 +120,5 @@ SELECT sent_messages.msg_id FROM sent_messages; --- ========================================== --- BROADCAST REALTIME EVENTS --- ========================================== --- Note: Both step:completed events for empty maps and step:started events --- are now broadcast atomically in their respective CTEs using RETURNING pattern. --- This ensures correct ordering, prevents duplicate broadcasts, and guarantees --- that events are sent for exactly the rows that were updated. - -end; +END; $$; diff --git a/pkgs/core/schemas/0120_function_start_tasks.sql b/pkgs/core/schemas/0120_function_start_tasks.sql index 9fbe69a49..627497e3a 100644 --- a/pkgs/core/schemas/0120_function_start_tasks.sql +++ b/pkgs/core/schemas/0120_function_start_tasks.sql @@ -53,7 +53,8 @@ as $$ join pgflow.deps dep on dep.flow_slug = st.flow_slug and dep.step_slug = st.step_slug join pgflow.step_states dep_state on dep_state.run_id = st.run_id and - dep_state.step_slug = dep.dep_slug + dep_state.step_slug = dep.dep_slug and + dep_state.status = 'completed' -- Only include completed deps (not skipped) ), deps_outputs as ( select diff --git a/pkgs/core/src/database-types.ts b/pkgs/core/src/database-types.ts index ffcdb94a0..690700c01 100644 --- a/pkgs/core/src/database-types.ts +++ b/pkgs/core/src/database-types.ts @@ -132,6 +132,8 @@ export type Database = { remaining_deps: number remaining_tasks: number | null run_id: string + skip_reason: string | null + skipped_at: string | null started_at: string | null status: string step_slug: string @@ -147,6 +149,8 @@ export type Database = { remaining_deps?: number remaining_tasks?: number | null run_id: string + skip_reason?: string | null + skipped_at?: string | null started_at?: string | null status?: string step_slug: string @@ -162,6 +166,8 @@ export type Database = { remaining_deps?: number remaining_tasks?: number | null run_id?: string + skip_reason?: string | null + skipped_at?: string | null started_at?: string | null status?: string step_slug?: string @@ -284,37 +290,49 @@ export type Database = { created_at: string deps_count: number flow_slug: string + forbidden_input_pattern: Json | null opt_base_delay: number | null opt_max_attempts: number | null opt_start_delay: number | null opt_timeout: number | null + required_input_pattern: Json | null step_index: number step_slug: string step_type: string + when_failed: string + when_unmet: string } Insert: { created_at?: string deps_count?: number flow_slug: string + forbidden_input_pattern?: Json | null opt_base_delay?: number | null opt_max_attempts?: number | null opt_start_delay?: number | null opt_timeout?: number | null + required_input_pattern?: Json | null step_index?: number step_slug: string step_type?: string + when_failed?: string + when_unmet?: string } Update: { created_at?: string deps_count?: number flow_slug?: string + forbidden_input_pattern?: Json | null opt_base_delay?: number | null opt_max_attempts?: number | null opt_start_delay?: number | null opt_timeout?: number | null + required_input_pattern?: Json | null step_index?: number step_slug?: string step_type?: string + when_failed?: string + when_unmet?: string } Relationships: [ { @@ -388,6 +406,10 @@ export type Database = { [_ in never]: never } Functions: { + _cascade_force_skip_steps: { + Args: { run_id: string; skip_reason: string; step_slug: string } + Returns: number + } _compare_flow_shapes: { Args: { p_db: Json; p_local: Json } Returns: string[] @@ -402,23 +424,31 @@ export type Database = { base_delay?: number deps_slugs?: string[] flow_slug: string + forbidden_input_pattern?: Json max_attempts?: number + required_input_pattern?: Json start_delay?: number step_slug: string step_type?: string timeout?: number + when_failed?: string + when_unmet?: string } Returns: { created_at: string deps_count: number flow_slug: string + forbidden_input_pattern: Json | null opt_base_delay: number | null opt_max_attempts: number | null opt_start_delay: number | null opt_timeout: number | null + required_input_pattern: Json | null step_index: number step_slug: string step_type: string + when_failed: string + when_unmet: string } SetofOptions: { from: "*" @@ -435,6 +465,7 @@ export type Database = { Args: { run_id: string } Returns: number } + cascade_resolve_conditions: { Args: { run_id: string }; Returns: boolean } cleanup_ensure_workers_logs: { Args: { retention_hours?: number } Returns: { diff --git a/pkgs/core/supabase/migrations/20260121095914_pgflow_step_conditions.sql b/pkgs/core/supabase/migrations/20260121095914_pgflow_step_conditions.sql new file mode 100644 index 000000000..ae7702559 --- /dev/null +++ b/pkgs/core/supabase/migrations/20260121095914_pgflow_step_conditions.sql @@ -0,0 +1,1798 @@ +-- Modify "step_states" table +ALTER TABLE "pgflow"."step_states" DROP CONSTRAINT "completed_at_or_failed_at", DROP CONSTRAINT "remaining_tasks_state_consistency", ADD CONSTRAINT "remaining_tasks_state_consistency" CHECK ((remaining_tasks IS NULL) OR (status <> ALL (ARRAY['created'::text, 'skipped'::text]))), DROP CONSTRAINT "status_is_valid", ADD CONSTRAINT "status_is_valid" CHECK (status = ANY (ARRAY['created'::text, 'started'::text, 'completed'::text, 'failed'::text, 'skipped'::text])), ADD CONSTRAINT "completed_at_or_failed_at_or_skipped_at" CHECK ((( +CASE + WHEN (completed_at IS NOT NULL) THEN 1 + ELSE 0 +END + +CASE + WHEN (failed_at IS NOT NULL) THEN 1 + ELSE 0 +END) + +CASE + WHEN (skipped_at IS NOT NULL) THEN 1 + ELSE 0 +END) <= 1), ADD CONSTRAINT "skip_reason_matches_status" CHECK (((status = 'skipped'::text) AND (skip_reason IS NOT NULL)) OR ((status <> 'skipped'::text) AND (skip_reason IS NULL))), ADD CONSTRAINT "skipped_at_is_after_created_at" CHECK ((skipped_at IS NULL) OR (skipped_at >= created_at)), ADD COLUMN "skip_reason" text NULL, ADD COLUMN "skipped_at" timestamptz NULL; +-- Create index "idx_step_states_skipped" to table: "step_states" +CREATE INDEX "idx_step_states_skipped" ON "pgflow"."step_states" ("run_id", "step_slug") WHERE (status = 'skipped'::text); +-- Modify "steps" table +ALTER TABLE "pgflow"."steps" ADD CONSTRAINT "when_failed_is_valid" CHECK (when_failed = ANY (ARRAY['fail'::text, 'skip'::text, 'skip-cascade'::text])), ADD CONSTRAINT "when_unmet_is_valid" CHECK (when_unmet = ANY (ARRAY['fail'::text, 'skip'::text, 'skip-cascade'::text])), ADD COLUMN "required_input_pattern" jsonb NULL, ADD COLUMN "forbidden_input_pattern" jsonb NULL, ADD COLUMN "when_unmet" text NOT NULL DEFAULT 'skip', ADD COLUMN "when_failed" text NOT NULL DEFAULT 'fail'; +-- Modify "_compare_flow_shapes" function +CREATE OR REPLACE FUNCTION "pgflow"."_compare_flow_shapes" ("p_local" jsonb, "p_db" jsonb) RETURNS text[] LANGUAGE plpgsql STABLE SET "search_path" = '' AS $BODY$ +DECLARE + v_differences text[] := '{}'; + v_local_steps jsonb; + v_db_steps jsonb; + v_local_count int; + v_db_count int; + v_max_count int; + v_idx int; + v_local_step jsonb; + v_db_step jsonb; + v_local_deps text; + v_db_deps text; +BEGIN + v_local_steps := p_local->'steps'; + v_db_steps := p_db->'steps'; + v_local_count := jsonb_array_length(COALESCE(v_local_steps, '[]'::jsonb)); + v_db_count := jsonb_array_length(COALESCE(v_db_steps, '[]'::jsonb)); + + -- Compare step counts + IF v_local_count != v_db_count THEN + v_differences := array_append( + v_differences, + format('Step count differs: %s vs %s', v_local_count, v_db_count) + ); + END IF; + + -- Compare steps by index + v_max_count := GREATEST(v_local_count, v_db_count); + + FOR v_idx IN 0..(v_max_count - 1) LOOP + v_local_step := v_local_steps->v_idx; + v_db_step := v_db_steps->v_idx; + + IF v_local_step IS NULL THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: missing in first shape (second has '%s')$$, + v_idx, + v_db_step->>'slug' + ) + ); + ELSIF v_db_step IS NULL THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: missing in second shape (first has '%s')$$, + v_idx, + v_local_step->>'slug' + ) + ); + ELSE + -- Compare slug + IF v_local_step->>'slug' != v_db_step->>'slug' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: slug differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'slug', + v_db_step->>'slug' + ) + ); + END IF; + + -- Compare step type + IF v_local_step->>'stepType' != v_db_step->>'stepType' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: type differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'stepType', + v_db_step->>'stepType' + ) + ); + END IF; + + -- Compare dependencies (convert arrays to comma-separated strings) + SELECT string_agg(dep, ', ' ORDER BY dep) + INTO v_local_deps + FROM jsonb_array_elements_text(COALESCE(v_local_step->'dependencies', '[]'::jsonb)) AS dep; + + SELECT string_agg(dep, ', ' ORDER BY dep) + INTO v_db_deps + FROM jsonb_array_elements_text(COALESCE(v_db_step->'dependencies', '[]'::jsonb)) AS dep; + + IF COALESCE(v_local_deps, '') != COALESCE(v_db_deps, '') THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: dependencies differ [%s] vs [%s]$$, + v_idx, + COALESCE(v_local_deps, ''), + COALESCE(v_db_deps, '') + ) + ); + END IF; + + -- Compare whenUnmet (structural - affects DAG execution semantics) + IF v_local_step->>'whenUnmet' != v_db_step->>'whenUnmet' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: whenUnmet differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'whenUnmet', + v_db_step->>'whenUnmet' + ) + ); + END IF; + + -- Compare whenFailed (structural - affects DAG execution semantics) + IF v_local_step->>'whenFailed' != v_db_step->>'whenFailed' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: whenFailed differs '%s' vs '%s'$$, + v_idx, + v_local_step->>'whenFailed', + v_db_step->>'whenFailed' + ) + ); + END IF; + + -- Compare requiredInputPattern (structural - affects DAG execution semantics) + -- Uses -> (jsonb) not ->> (text) to properly compare wrapper objects + IF v_local_step->'requiredInputPattern' IS DISTINCT FROM v_db_step->'requiredInputPattern' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: requiredInputPattern differs '%s' vs '%s'$$, + v_idx, + v_local_step->'requiredInputPattern', + v_db_step->'requiredInputPattern' + ) + ); + END IF; + + -- Compare forbiddenInputPattern (structural - affects DAG execution semantics) + -- Uses -> (jsonb) not ->> (text) to properly compare wrapper objects + IF v_local_step->'forbiddenInputPattern' IS DISTINCT FROM v_db_step->'forbiddenInputPattern' THEN + v_differences := array_append( + v_differences, + format( + $$Step at index %s: forbiddenInputPattern differs '%s' vs '%s'$$, + v_idx, + v_local_step->'forbiddenInputPattern', + v_db_step->'forbiddenInputPattern' + ) + ); + END IF; + END IF; + END LOOP; + + RETURN v_differences; +END; +$BODY$; +-- Create "add_step" function +CREATE FUNCTION "pgflow"."add_step" ("flow_slug" text, "step_slug" text, "deps_slugs" text[] DEFAULT '{}', "max_attempts" integer DEFAULT NULL::integer, "base_delay" integer DEFAULT NULL::integer, "timeout" integer DEFAULT NULL::integer, "start_delay" integer DEFAULT NULL::integer, "step_type" text DEFAULT 'single', "required_input_pattern" jsonb DEFAULT NULL::jsonb, "forbidden_input_pattern" jsonb DEFAULT NULL::jsonb, "when_unmet" text DEFAULT 'skip', "when_failed" text DEFAULT 'fail') RETURNS "pgflow"."steps" LANGUAGE plpgsql SET "search_path" = '' AS $$ +DECLARE + result_step pgflow.steps; + next_idx int; +BEGIN + -- Validate map step constraints + -- Map steps can have either: + -- 0 dependencies (root map - maps over flow input array) + -- 1 dependency (dependent map - maps over dependency output array) + IF COALESCE(add_step.step_type, 'single') = 'map' AND COALESCE(array_length(add_step.deps_slugs, 1), 0) > 1 THEN + RAISE EXCEPTION 'Map step "%" can have at most one dependency, but % were provided: %', + add_step.step_slug, + COALESCE(array_length(add_step.deps_slugs, 1), 0), + array_to_string(add_step.deps_slugs, ', '); + END IF; + + -- Get next step index + SELECT COALESCE(MAX(s.step_index) + 1, 0) INTO next_idx + FROM pgflow.steps s + WHERE s.flow_slug = add_step.flow_slug; + + -- Create the step + INSERT INTO pgflow.steps ( + flow_slug, step_slug, step_type, step_index, deps_count, + opt_max_attempts, opt_base_delay, opt_timeout, opt_start_delay, + required_input_pattern, forbidden_input_pattern, when_unmet, when_failed + ) + VALUES ( + add_step.flow_slug, + add_step.step_slug, + COALESCE(add_step.step_type, 'single'), + next_idx, + COALESCE(array_length(add_step.deps_slugs, 1), 0), + add_step.max_attempts, + add_step.base_delay, + add_step.timeout, + add_step.start_delay, + add_step.required_input_pattern, + add_step.forbidden_input_pattern, + add_step.when_unmet, + add_step.when_failed + ) + ON CONFLICT ON CONSTRAINT steps_pkey + DO UPDATE SET step_slug = EXCLUDED.step_slug + RETURNING * INTO result_step; + + -- Insert dependencies + INSERT INTO pgflow.deps (flow_slug, dep_slug, step_slug) + SELECT add_step.flow_slug, d.dep_slug, add_step.step_slug + FROM unnest(COALESCE(add_step.deps_slugs, '{}')) AS d(dep_slug) + WHERE add_step.deps_slugs IS NOT NULL AND array_length(add_step.deps_slugs, 1) > 0 + ON CONFLICT ON CONSTRAINT deps_pkey DO NOTHING; + + RETURN result_step; +END; +$$; +-- Modify "_create_flow_from_shape" function +CREATE OR REPLACE FUNCTION "pgflow"."_create_flow_from_shape" ("p_flow_slug" text, "p_shape" jsonb) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$ +DECLARE + v_step jsonb; + v_deps text[]; + v_flow_options jsonb; + v_step_options jsonb; +BEGIN + -- Extract flow-level options (may be null) + v_flow_options := p_shape->'options'; + + -- Create the flow with options (NULL = use default) + PERFORM pgflow.create_flow( + p_flow_slug, + (v_flow_options->>'maxAttempts')::int, + (v_flow_options->>'baseDelay')::int, + (v_flow_options->>'timeout')::int + ); + + -- Iterate over steps in order and add each one + FOR v_step IN SELECT * FROM jsonb_array_elements(p_shape->'steps') + LOOP + -- Convert dependencies jsonb array to text array + SELECT COALESCE(array_agg(dep), '{}') + INTO v_deps + FROM jsonb_array_elements_text(COALESCE(v_step->'dependencies', '[]'::jsonb)) AS dep; + + -- Extract step options (may be null) + v_step_options := v_step->'options'; + + -- Add the step with options (NULL = use default/inherit) + PERFORM pgflow.add_step( + flow_slug => p_flow_slug, + step_slug => v_step->>'slug', + deps_slugs => v_deps, + max_attempts => (v_step_options->>'maxAttempts')::int, + base_delay => (v_step_options->>'baseDelay')::int, + timeout => (v_step_options->>'timeout')::int, + start_delay => (v_step_options->>'startDelay')::int, + step_type => v_step->>'stepType', + when_unmet => v_step->>'whenUnmet', + when_failed => v_step->>'whenFailed', + required_input_pattern => CASE + WHEN (v_step->'requiredInputPattern'->>'defined')::boolean + THEN v_step->'requiredInputPattern'->'value' + ELSE NULL + END, + forbidden_input_pattern => CASE + WHEN (v_step->'forbiddenInputPattern'->>'defined')::boolean + THEN v_step->'forbiddenInputPattern'->'value' + ELSE NULL + END + ); + END LOOP; +END; +$$; +-- Modify "_get_flow_shape" function +CREATE OR REPLACE FUNCTION "pgflow"."_get_flow_shape" ("p_flow_slug" text) RETURNS jsonb LANGUAGE sql STABLE SET "search_path" = '' AS $$ +SELECT jsonb_build_object( + 'steps', + COALESCE( + jsonb_agg( + jsonb_build_object( + 'slug', step.step_slug, + 'stepType', step.step_type, + 'dependencies', COALESCE( + ( + SELECT jsonb_agg(dep.dep_slug ORDER BY dep.dep_slug) + FROM pgflow.deps AS dep + WHERE dep.flow_slug = step.flow_slug + AND dep.step_slug = step.step_slug + ), + '[]'::jsonb + ), + 'whenUnmet', step.when_unmet, + 'whenFailed', step.when_failed, + 'requiredInputPattern', CASE + WHEN step.required_input_pattern IS NULL + THEN '{"defined": false}'::jsonb + ELSE jsonb_build_object('defined', true, 'value', step.required_input_pattern) + END, + 'forbiddenInputPattern', CASE + WHEN step.forbidden_input_pattern IS NULL + THEN '{"defined": false}'::jsonb + ELSE jsonb_build_object('defined', true, 'value', step.forbidden_input_pattern) + END + ) + ORDER BY step.step_index + ), + '[]'::jsonb + ) + ) + FROM pgflow.steps AS step + WHERE step.flow_slug = p_flow_slug; +$$; +-- Create "_cascade_force_skip_steps" function +CREATE FUNCTION "pgflow"."_cascade_force_skip_steps" ("run_id" uuid, "step_slug" text, "skip_reason" text) RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + v_flow_slug text; + v_total_skipped int := 0; +BEGIN + -- Get flow_slug for this run + SELECT r.flow_slug INTO v_flow_slug + FROM pgflow.runs r + WHERE r.run_id = _cascade_force_skip_steps.run_id; + + IF v_flow_slug IS NULL THEN + RAISE EXCEPTION 'Run not found: %', _cascade_force_skip_steps.run_id; + END IF; + + -- ========================================== + -- SKIP STEPS IN TOPOLOGICAL ORDER + -- ========================================== + -- Use recursive CTE to find all downstream dependents, + -- then skip them in topological order (by step_index) + WITH RECURSIVE + -- ---------- Find all downstream steps ---------- + downstream_steps AS ( + -- Base case: the trigger step + SELECT + s.flow_slug, + s.step_slug, + s.step_index, + _cascade_force_skip_steps.skip_reason AS reason -- Original reason for trigger step + FROM pgflow.steps s + WHERE s.flow_slug = v_flow_slug + AND s.step_slug = _cascade_force_skip_steps.step_slug + + UNION ALL + + -- Recursive case: steps that depend on already-found steps + SELECT + s.flow_slug, + s.step_slug, + s.step_index, + 'dependency_skipped'::text AS reason -- Downstream steps get this reason + FROM pgflow.steps s + JOIN pgflow.deps d ON d.flow_slug = s.flow_slug AND d.step_slug = s.step_slug + JOIN downstream_steps ds ON ds.flow_slug = d.flow_slug AND ds.step_slug = d.dep_slug + ), + -- ---------- Deduplicate and order by step_index ---------- + steps_to_skip AS ( + SELECT DISTINCT ON (ds.step_slug) + ds.flow_slug, + ds.step_slug, + ds.step_index, + ds.reason + FROM downstream_steps ds + ORDER BY ds.step_slug, ds.step_index -- Keep first occurrence (trigger step has original reason) + ), + -- ---------- Skip the steps ---------- + skipped AS ( + UPDATE pgflow.step_states ss + SET status = 'skipped', + skip_reason = sts.reason, + skipped_at = now(), + remaining_tasks = NULL -- Clear remaining_tasks for skipped steps + FROM steps_to_skip sts + WHERE ss.run_id = _cascade_force_skip_steps.run_id + AND ss.step_slug = sts.step_slug + AND ss.status IN ('created', 'started') -- Only skip non-terminal steps + RETURNING + ss.*, + -- Broadcast step:skipped event + realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', ss.run_id, + 'flow_slug', ss.flow_slug, + 'step_slug', ss.step_slug, + 'status', 'skipped', + 'skip_reason', ss.skip_reason, + 'skipped_at', ss.skipped_at + ), + concat('step:', ss.step_slug, ':skipped'), + concat('pgflow:run:', ss.run_id), + false + ) as _broadcast_result + ), + -- ---------- Update run counters ---------- + run_updates AS ( + UPDATE pgflow.runs r + SET remaining_steps = r.remaining_steps - skipped_count.count + FROM (SELECT COUNT(*) AS count FROM skipped) skipped_count + WHERE r.run_id = _cascade_force_skip_steps.run_id + AND skipped_count.count > 0 + ) + SELECT COUNT(*) INTO v_total_skipped FROM skipped; + + RETURN v_total_skipped; +END; +$$; +-- Create "cascade_resolve_conditions" function +CREATE FUNCTION "pgflow"."cascade_resolve_conditions" ("run_id" uuid) RETURNS boolean LANGUAGE plpgsql SET "search_path" = '' AS $$ +DECLARE + v_run_input jsonb; + v_run_status text; + v_first_fail record; + v_iteration_count int := 0; + v_max_iterations int := 50; + v_processed_count int; +BEGIN + -- ========================================== + -- GUARD: Early return if run is already terminal + -- ========================================== + SELECT r.status, r.input INTO v_run_status, v_run_input + FROM pgflow.runs r + WHERE r.run_id = cascade_resolve_conditions.run_id; + + IF v_run_status IN ('failed', 'completed') THEN + RETURN v_run_status != 'failed'; + END IF; + + -- ========================================== + -- ITERATE UNTIL CONVERGENCE + -- ========================================== + -- After skipping steps, dependents may become ready and need evaluation. + -- Loop until no more steps are processed. + LOOP + v_iteration_count := v_iteration_count + 1; + IF v_iteration_count > v_max_iterations THEN + RAISE EXCEPTION 'cascade_resolve_conditions exceeded safety limit of % iterations', v_max_iterations; + END IF; + + v_processed_count := 0; + + -- ========================================== + -- PHASE 1a: CHECK FOR FAIL CONDITIONS + -- ========================================== + -- Find first step (by topological order) with unmet condition and 'fail' mode. + -- Condition is unmet when: + -- (required_input_pattern is set AND input does NOT contain it) OR + -- (forbidden_input_pattern is set AND input DOES contain it) + WITH steps_with_conditions AS ( + SELECT + step_state.flow_slug, + step_state.step_slug, + step.required_input_pattern, + step.forbidden_input_pattern, + step.when_unmet, + step.deps_count, + step.step_index + FROM pgflow.step_states AS step_state + JOIN pgflow.steps AS step + ON step.flow_slug = step_state.flow_slug + AND step.step_slug = step_state.step_slug + WHERE step_state.run_id = cascade_resolve_conditions.run_id + AND step_state.status = 'created' + AND step_state.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + ), + step_deps_output AS ( + SELECT + swc.step_slug, + jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM steps_with_conditions swc + JOIN pgflow.deps dep ON dep.flow_slug = swc.flow_slug AND dep.step_slug = swc.step_slug + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE swc.deps_count > 0 + GROUP BY swc.step_slug + ), + condition_evaluations AS ( + SELECT + swc.*, + -- condition_met = (if IS NULL OR input @> if) AND (ifNot IS NULL OR NOT(input @> ifNot)) + (swc.required_input_pattern IS NULL OR + CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.required_input_pattern) + AND + (swc.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.forbidden_input_pattern)) + AS condition_met + FROM steps_with_conditions swc + LEFT JOIN step_deps_output sdo ON sdo.step_slug = swc.step_slug + ) + SELECT flow_slug, step_slug, required_input_pattern, forbidden_input_pattern + INTO v_first_fail + FROM condition_evaluations + WHERE NOT condition_met AND when_unmet = 'fail' + ORDER BY step_index + LIMIT 1; + + -- Handle fail mode: fail step and run, return false + -- Note: Cannot use "v_first_fail IS NOT NULL" because records with NULL fields + -- evaluate to NULL in IS NOT NULL checks. Use FOUND instead. + IF FOUND THEN + UPDATE pgflow.step_states + SET status = 'failed', + failed_at = now(), + error_message = 'Condition not met' + WHERE pgflow.step_states.run_id = cascade_resolve_conditions.run_id + AND pgflow.step_states.step_slug = v_first_fail.step_slug; + + UPDATE pgflow.runs + SET status = 'failed', + failed_at = now() + WHERE pgflow.runs.run_id = cascade_resolve_conditions.run_id; + + RETURN false; + END IF; + + -- ========================================== + -- PHASE 1b: HANDLE SKIP CONDITIONS (with propagation) + -- ========================================== + -- Skip steps with unmet conditions and whenUnmet='skip'. + -- Also decrement remaining_deps on dependents and set initial_tasks=0 for map dependents. + WITH steps_with_conditions AS ( + SELECT + step_state.flow_slug, + step_state.step_slug, + step.required_input_pattern, + step.forbidden_input_pattern, + step.when_unmet, + step.deps_count, + step.step_index + FROM pgflow.step_states AS step_state + JOIN pgflow.steps AS step + ON step.flow_slug = step_state.flow_slug + AND step.step_slug = step_state.step_slug + WHERE step_state.run_id = cascade_resolve_conditions.run_id + AND step_state.status = 'created' + AND step_state.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + ), + step_deps_output AS ( + SELECT + swc.step_slug, + jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM steps_with_conditions swc + JOIN pgflow.deps dep ON dep.flow_slug = swc.flow_slug AND dep.step_slug = swc.step_slug + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE swc.deps_count > 0 + GROUP BY swc.step_slug + ), + condition_evaluations AS ( + SELECT + swc.*, + -- condition_met = (if IS NULL OR input @> if) AND (ifNot IS NULL OR NOT(input @> ifNot)) + (swc.required_input_pattern IS NULL OR + CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.required_input_pattern) + AND + (swc.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN swc.deps_count = 0 THEN v_run_input ELSE COALESCE(sdo.deps_output, '{}'::jsonb) END @> swc.forbidden_input_pattern)) + AS condition_met + FROM steps_with_conditions swc + LEFT JOIN step_deps_output sdo ON sdo.step_slug = swc.step_slug + ), + unmet_skip_steps AS ( + SELECT * FROM condition_evaluations + WHERE NOT condition_met AND when_unmet = 'skip' + ), + skipped_steps AS ( + UPDATE pgflow.step_states ss + SET status = 'skipped', + skip_reason = 'condition_unmet', + skipped_at = now() + FROM unmet_skip_steps uss + WHERE ss.run_id = cascade_resolve_conditions.run_id + AND ss.step_slug = uss.step_slug + RETURNING + ss.*, + realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', ss.run_id, + 'flow_slug', ss.flow_slug, + 'step_slug', ss.step_slug, + 'status', 'skipped', + 'skip_reason', 'condition_unmet', + 'skipped_at', ss.skipped_at + ), + concat('step:', ss.step_slug, ':skipped'), + concat('pgflow:run:', ss.run_id), + false + ) AS _broadcast_result + ), + -- NEW: Update dependent steps (decrement remaining_deps, set initial_tasks=0 for maps) + dependent_updates AS ( + UPDATE pgflow.step_states child_state + SET remaining_deps = child_state.remaining_deps - 1, + -- If child is a map step and this skipped step is its only dependency, + -- set initial_tasks = 0 (skipped dep = empty array) + initial_tasks = CASE + WHEN child_step.step_type = 'map' AND child_step.deps_count = 1 THEN 0 + ELSE child_state.initial_tasks + END + FROM skipped_steps parent + JOIN pgflow.deps dep ON dep.flow_slug = parent.flow_slug AND dep.dep_slug = parent.step_slug + JOIN pgflow.steps child_step ON child_step.flow_slug = dep.flow_slug AND child_step.step_slug = dep.step_slug + WHERE child_state.run_id = cascade_resolve_conditions.run_id + AND child_state.step_slug = dep.step_slug + ), + run_update AS ( + UPDATE pgflow.runs r + SET remaining_steps = r.remaining_steps - (SELECT COUNT(*) FROM skipped_steps) + WHERE r.run_id = cascade_resolve_conditions.run_id + AND (SELECT COUNT(*) FROM skipped_steps) > 0 + ) + SELECT COUNT(*)::int INTO v_processed_count FROM skipped_steps; + + -- ========================================== + -- PHASE 1c: HANDLE SKIP-CASCADE CONDITIONS + -- ========================================== + -- Call _cascade_force_skip_steps for each step with unmet condition and whenUnmet='skip-cascade'. + -- Process in topological order; _cascade_force_skip_steps is idempotent. + PERFORM pgflow._cascade_force_skip_steps(cascade_resolve_conditions.run_id, ready_step.step_slug, 'condition_unmet') + FROM pgflow.step_states AS ready_step + JOIN pgflow.steps AS step + ON step.flow_slug = ready_step.flow_slug + AND step.step_slug = ready_step.step_slug + LEFT JOIN LATERAL ( + SELECT jsonb_object_agg(dep_state.step_slug, dep_state.output) AS deps_output + FROM pgflow.deps dep + JOIN pgflow.step_states dep_state + ON dep_state.run_id = cascade_resolve_conditions.run_id + AND dep_state.step_slug = dep.dep_slug + AND dep_state.status = 'completed' -- Only completed deps (not skipped) + WHERE dep.flow_slug = ready_step.flow_slug + AND dep.step_slug = ready_step.step_slug + ) AS agg_deps ON step.deps_count > 0 + WHERE ready_step.run_id = cascade_resolve_conditions.run_id + AND ready_step.status = 'created' + AND ready_step.remaining_deps = 0 + AND (step.required_input_pattern IS NOT NULL OR step.forbidden_input_pattern IS NOT NULL) + AND step.when_unmet = 'skip-cascade' + -- Condition is NOT met when: (if fails) OR (ifNot fails) + AND NOT ( + (step.required_input_pattern IS NULL OR + CASE WHEN step.deps_count = 0 THEN v_run_input ELSE COALESCE(agg_deps.deps_output, '{}'::jsonb) END @> step.required_input_pattern) + AND + (step.forbidden_input_pattern IS NULL OR + NOT (CASE WHEN step.deps_count = 0 THEN v_run_input ELSE COALESCE(agg_deps.deps_output, '{}'::jsonb) END @> step.forbidden_input_pattern)) + ) + ORDER BY step.step_index; + + -- Check if run was failed during cascade (e.g., if _cascade_force_skip_steps triggers fail) + SELECT r.status INTO v_run_status + FROM pgflow.runs r + WHERE r.run_id = cascade_resolve_conditions.run_id; + + IF v_run_status IN ('failed', 'completed') THEN + RETURN v_run_status != 'failed'; + END IF; + + -- Exit loop if no steps were processed in this iteration + EXIT WHEN v_processed_count = 0; + END LOOP; + + RETURN true; +END; +$$; +-- Modify "start_ready_steps" function +CREATE OR REPLACE FUNCTION "pgflow"."start_ready_steps" ("run_id" uuid) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$ +BEGIN +-- ========================================== +-- GUARD: No mutations on terminal runs +-- ========================================== +IF EXISTS ( + SELECT 1 FROM pgflow.runs + WHERE pgflow.runs.run_id = start_ready_steps.run_id + AND pgflow.runs.status IN ('failed', 'completed') +) THEN + RETURN; +END IF; + +-- ========================================== +-- PHASE 1: START READY STEPS +-- ========================================== +-- NOTE: Condition evaluation and empty map handling are done by +-- cascade_resolve_conditions() and cascade_complete_taskless_steps() +-- which are called before this function. +WITH +-- ---------- Find ready steps ---------- +-- Steps with no remaining deps and known task count +ready_steps AS ( + SELECT * + FROM pgflow.step_states AS step_state + WHERE step_state.run_id = start_ready_steps.run_id + AND step_state.status = 'created' + AND step_state.remaining_deps = 0 + AND step_state.initial_tasks IS NOT NULL -- Cannot start with unknown count + AND step_state.initial_tasks > 0 -- Don't start taskless steps (handled by cascade_complete_taskless_steps) + ORDER BY step_state.step_slug + FOR UPDATE +), +-- ---------- Mark steps as started ---------- +started_step_states AS ( + UPDATE pgflow.step_states + SET status = 'started', + started_at = now(), + remaining_tasks = ready_steps.initial_tasks -- Copy initial_tasks to remaining_tasks when starting + FROM ready_steps + WHERE pgflow.step_states.run_id = start_ready_steps.run_id + AND pgflow.step_states.step_slug = ready_steps.step_slug + RETURNING pgflow.step_states.*, + -- Broadcast step:started event atomically with the UPDATE + -- Using RETURNING ensures this executes during row processing + -- and cannot be optimized away by the query planner + realtime.send( + jsonb_build_object( + 'event_type', 'step:started', + 'run_id', pgflow.step_states.run_id, + 'step_slug', pgflow.step_states.step_slug, + 'status', 'started', + 'started_at', pgflow.step_states.started_at, + 'remaining_tasks', pgflow.step_states.remaining_tasks, + 'remaining_deps', pgflow.step_states.remaining_deps + ), + concat('step:', pgflow.step_states.step_slug, ':started'), + concat('pgflow:run:', pgflow.step_states.run_id), + false + ) as _broadcast_result -- Prefix with _ to indicate internal use only +), + +-- ========================================== +-- PHASE 2: TASK GENERATION AND QUEUE MESSAGES +-- ========================================== +-- ---------- Generate tasks and batch messages ---------- +-- Single steps: 1 task (index 0) +-- Map steps: N tasks (indices 0..N-1) +message_batches AS ( + SELECT + started_step.flow_slug, + started_step.run_id, + started_step.step_slug, + COALESCE(step.opt_start_delay, 0) as delay, + array_agg( + jsonb_build_object( + 'flow_slug', started_step.flow_slug, + 'run_id', started_step.run_id, + 'step_slug', started_step.step_slug, + 'task_index', task_idx.task_index + ) ORDER BY task_idx.task_index + ) AS messages, + array_agg(task_idx.task_index ORDER BY task_idx.task_index) AS task_indices + FROM started_step_states AS started_step + JOIN pgflow.steps AS step + ON step.flow_slug = started_step.flow_slug + AND step.step_slug = started_step.step_slug + -- Generate task indices from 0 to initial_tasks-1 + CROSS JOIN LATERAL generate_series(0, started_step.initial_tasks - 1) AS task_idx(task_index) + GROUP BY started_step.flow_slug, started_step.run_id, started_step.step_slug, step.opt_start_delay +), +-- ---------- Send messages to queue ---------- +-- Uses batch sending for performance with large arrays +sent_messages AS ( + SELECT + mb.flow_slug, + mb.run_id, + mb.step_slug, + task_indices.task_index, + msg_ids.msg_id + FROM message_batches mb + CROSS JOIN LATERAL unnest(mb.task_indices) WITH ORDINALITY AS task_indices(task_index, idx_ord) + CROSS JOIN LATERAL pgmq.send_batch(mb.flow_slug, mb.messages, mb.delay) WITH ORDINALITY AS msg_ids(msg_id, msg_ord) + WHERE task_indices.idx_ord = msg_ids.msg_ord +) + +-- ========================================== +-- PHASE 3: RECORD TASKS IN DATABASE +-- ========================================== +INSERT INTO pgflow.step_tasks (flow_slug, run_id, step_slug, task_index, message_id) +SELECT + sent_messages.flow_slug, + sent_messages.run_id, + sent_messages.step_slug, + sent_messages.task_index, + sent_messages.msg_id +FROM sent_messages; + +END; +$$; +-- Modify "complete_task" function +CREATE OR REPLACE FUNCTION "pgflow"."complete_task" ("run_id" uuid, "step_slug" text, "task_index" integer, "output" jsonb) RETURNS SETOF "pgflow"."step_tasks" LANGUAGE plpgsql SET "search_path" = '' AS $$ +declare + v_step_state pgflow.step_states%ROWTYPE; + v_dependent_map_slug text; + v_run_record pgflow.runs%ROWTYPE; + v_step_record pgflow.step_states%ROWTYPE; +begin + +-- ========================================== +-- GUARD: No mutations on failed runs +-- ========================================== +IF EXISTS (SELECT 1 FROM pgflow.runs WHERE pgflow.runs.run_id = complete_task.run_id AND pgflow.runs.status = 'failed') THEN + RETURN QUERY SELECT * FROM pgflow.step_tasks + WHERE pgflow.step_tasks.run_id = complete_task.run_id + AND pgflow.step_tasks.step_slug = complete_task.step_slug + AND pgflow.step_tasks.task_index = complete_task.task_index; + RETURN; +END IF; + +-- ========================================== +-- LOCK ACQUISITION AND TYPE VALIDATION +-- ========================================== +-- Acquire locks first to prevent race conditions +SELECT * INTO v_run_record FROM pgflow.runs +WHERE pgflow.runs.run_id = complete_task.run_id +FOR UPDATE; + +SELECT * INTO v_step_record FROM pgflow.step_states +WHERE pgflow.step_states.run_id = complete_task.run_id + AND pgflow.step_states.step_slug = complete_task.step_slug +FOR UPDATE; + +-- Check for type violations AFTER acquiring locks +SELECT child_step.step_slug INTO v_dependent_map_slug +FROM pgflow.deps dependency +JOIN pgflow.steps child_step ON child_step.flow_slug = dependency.flow_slug + AND child_step.step_slug = dependency.step_slug +JOIN pgflow.steps parent_step ON parent_step.flow_slug = dependency.flow_slug + AND parent_step.step_slug = dependency.dep_slug +JOIN pgflow.step_states child_state ON child_state.flow_slug = child_step.flow_slug + AND child_state.step_slug = child_step.step_slug +WHERE dependency.dep_slug = complete_task.step_slug -- parent is the completing step + AND dependency.flow_slug = v_run_record.flow_slug + AND parent_step.step_type = 'single' -- Only validate single steps + AND child_step.step_type = 'map' + AND child_state.run_id = complete_task.run_id + AND child_state.initial_tasks IS NULL + AND (complete_task.output IS NULL OR jsonb_typeof(complete_task.output) != 'array') +LIMIT 1; + +-- Handle type violation if detected +IF v_dependent_map_slug IS NOT NULL THEN + -- Mark run as failed immediately + UPDATE pgflow.runs + SET status = 'failed', + failed_at = now() + WHERE pgflow.runs.run_id = complete_task.run_id; + + -- Broadcast run:failed event + -- Uses PERFORM pattern to ensure execution (proven reliable pattern in this function) + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'run:failed', + 'run_id', complete_task.run_id, + 'flow_slug', v_run_record.flow_slug, + 'status', 'failed', + 'failed_at', now() + ), + 'run:failed', + concat('pgflow:run:', complete_task.run_id), + false + ); + + -- Archive all active messages (both queued and started) to prevent orphaned messages + PERFORM pgmq.archive( + v_run_record.flow_slug, + array_agg(st.message_id) + ) + FROM pgflow.step_tasks st + WHERE st.run_id = complete_task.run_id + AND st.status IN ('queued', 'started') + AND st.message_id IS NOT NULL + HAVING count(*) > 0; -- Only call archive if there are messages to archive + + -- Mark current task as failed and store the output + UPDATE pgflow.step_tasks + SET status = 'failed', + failed_at = now(), + output = complete_task.output, -- Store the output that caused the violation + error_message = '[TYPE_VIOLATION] Produced ' || + CASE WHEN complete_task.output IS NULL THEN 'null' + ELSE jsonb_typeof(complete_task.output) END || + ' instead of array' + WHERE pgflow.step_tasks.run_id = complete_task.run_id + AND pgflow.step_tasks.step_slug = complete_task.step_slug + AND pgflow.step_tasks.task_index = complete_task.task_index; + + -- Mark step state as failed + UPDATE pgflow.step_states + SET status = 'failed', + failed_at = now(), + error_message = '[TYPE_VIOLATION] Map step ' || v_dependent_map_slug || + ' expects array input but dependency ' || complete_task.step_slug || + ' produced ' || CASE WHEN complete_task.output IS NULL THEN 'null' + ELSE jsonb_typeof(complete_task.output) END + WHERE pgflow.step_states.run_id = complete_task.run_id + AND pgflow.step_states.step_slug = complete_task.step_slug; + + -- Broadcast step:failed event + -- Uses PERFORM pattern to ensure execution (proven reliable pattern in this function) + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'step:failed', + 'run_id', complete_task.run_id, + 'step_slug', complete_task.step_slug, + 'status', 'failed', + 'error_message', '[TYPE_VIOLATION] Map step ' || v_dependent_map_slug || + ' expects array input but dependency ' || complete_task.step_slug || + ' produced ' || CASE WHEN complete_task.output IS NULL THEN 'null' + ELSE jsonb_typeof(complete_task.output) END, + 'failed_at', now() + ), + concat('step:', complete_task.step_slug, ':failed'), + concat('pgflow:run:', complete_task.run_id), + false + ); + + -- Archive the current task's message (it was started, now failed) + PERFORM pgmq.archive( + v_run_record.flow_slug, + st.message_id -- Single message, use scalar form + ) + FROM pgflow.step_tasks st + WHERE st.run_id = complete_task.run_id + AND st.step_slug = complete_task.step_slug + AND st.task_index = complete_task.task_index + AND st.message_id IS NOT NULL; + + -- Return empty result + RETURN QUERY SELECT * FROM pgflow.step_tasks WHERE false; + RETURN; +END IF; + +-- ========================================== +-- MAIN CTE CHAIN: Update task and propagate changes +-- ========================================== +WITH +-- ---------- Task completion ---------- +-- Update the task record with completion status and output +task AS ( + UPDATE pgflow.step_tasks + SET + status = 'completed', + completed_at = now(), + output = complete_task.output + WHERE pgflow.step_tasks.run_id = complete_task.run_id + AND pgflow.step_tasks.step_slug = complete_task.step_slug + AND pgflow.step_tasks.task_index = complete_task.task_index + AND pgflow.step_tasks.status = 'started' + RETURNING * +), +-- ---------- Get step type for output handling ---------- +step_def AS ( + SELECT step.step_type + FROM pgflow.steps step + JOIN pgflow.runs run ON run.flow_slug = step.flow_slug + WHERE run.run_id = complete_task.run_id + AND step.step_slug = complete_task.step_slug +), +-- ---------- Step state update ---------- +-- Decrement remaining_tasks and potentially mark step as completed +-- Also store output atomically with status transition to completed +step_state AS ( + UPDATE pgflow.step_states + SET + status = CASE + WHEN pgflow.step_states.remaining_tasks = 1 THEN 'completed' -- Will be 0 after decrement + ELSE 'started' + END, + completed_at = CASE + WHEN pgflow.step_states.remaining_tasks = 1 THEN now() -- Will be 0 after decrement + ELSE NULL + END, + remaining_tasks = pgflow.step_states.remaining_tasks - 1, + -- Store output atomically with completion (only when remaining_tasks = 1, meaning step completes) + output = CASE + -- Single step: store task output directly when completing + WHEN (SELECT step_type FROM step_def) = 'single' AND pgflow.step_states.remaining_tasks = 1 THEN + complete_task.output + -- Map step: aggregate on completion (ordered by task_index) + WHEN (SELECT step_type FROM step_def) = 'map' AND pgflow.step_states.remaining_tasks = 1 THEN + (SELECT COALESCE(jsonb_agg(all_outputs.output ORDER BY all_outputs.task_index), '[]'::jsonb) + FROM ( + -- All previously completed tasks + SELECT st.output, st.task_index + FROM pgflow.step_tasks st + WHERE st.run_id = complete_task.run_id + AND st.step_slug = complete_task.step_slug + AND st.status = 'completed' + UNION ALL + -- Current task being completed (not yet visible as completed in snapshot) + SELECT complete_task.output, complete_task.task_index + ) all_outputs) + ELSE pgflow.step_states.output + END + FROM task + WHERE pgflow.step_states.run_id = complete_task.run_id + AND pgflow.step_states.step_slug = complete_task.step_slug + RETURNING pgflow.step_states.* +), +-- ---------- Dependency resolution ---------- +-- Find all child steps that depend on the completed parent step (only if parent completed) +child_steps AS ( + SELECT deps.step_slug AS child_step_slug + FROM pgflow.deps deps + JOIN step_state parent_state ON parent_state.status = 'completed' AND deps.flow_slug = parent_state.flow_slug + WHERE deps.dep_slug = complete_task.step_slug -- dep_slug is the parent, step_slug is the child + ORDER BY deps.step_slug -- Ensure consistent ordering +), +-- ---------- Lock child steps ---------- +-- Acquire locks on all child steps before updating them +child_steps_lock AS ( + SELECT * FROM pgflow.step_states + WHERE pgflow.step_states.run_id = complete_task.run_id + AND pgflow.step_states.step_slug IN (SELECT child_step_slug FROM child_steps) + FOR UPDATE +), +-- ---------- Update child steps ---------- +-- Decrement remaining_deps and resolve NULL initial_tasks for map steps +child_steps_update AS ( + UPDATE pgflow.step_states child_state + SET remaining_deps = child_state.remaining_deps - 1, + -- Resolve NULL initial_tasks for child map steps + -- This is where child maps learn their array size from the parent + -- This CTE only runs when the parent step is complete (see child_steps JOIN) + initial_tasks = CASE + WHEN child_step.step_type = 'map' AND child_state.initial_tasks IS NULL THEN + CASE + WHEN parent_step.step_type = 'map' THEN + -- Map->map: Count all completed tasks from parent map + -- We add 1 because the current task is being completed in this transaction + -- but isn't yet visible as 'completed' in the step_tasks table + -- TODO: Refactor to use future column step_states.total_tasks + -- Would eliminate the COUNT query and just use parent_state.total_tasks + (SELECT COUNT(*)::int + 1 + FROM pgflow.step_tasks parent_tasks + WHERE parent_tasks.run_id = complete_task.run_id + AND parent_tasks.step_slug = complete_task.step_slug + AND parent_tasks.status = 'completed' + AND parent_tasks.task_index != complete_task.task_index) + ELSE + -- Single->map: Use output array length (single steps complete immediately) + CASE + WHEN complete_task.output IS NOT NULL + AND jsonb_typeof(complete_task.output) = 'array' THEN + jsonb_array_length(complete_task.output) + ELSE NULL -- Keep NULL if not an array + END + END + ELSE child_state.initial_tasks -- Keep existing value (including NULL) + END + FROM child_steps children + JOIN pgflow.steps child_step ON child_step.flow_slug = (SELECT r.flow_slug FROM pgflow.runs r WHERE r.run_id = complete_task.run_id) + AND child_step.step_slug = children.child_step_slug + JOIN pgflow.steps parent_step ON parent_step.flow_slug = (SELECT r.flow_slug FROM pgflow.runs r WHERE r.run_id = complete_task.run_id) + AND parent_step.step_slug = complete_task.step_slug + WHERE child_state.run_id = complete_task.run_id + AND child_state.step_slug = children.child_step_slug +) +-- ---------- Update run remaining_steps ---------- +-- Decrement the run's remaining_steps counter if step completed +UPDATE pgflow.runs +SET remaining_steps = pgflow.runs.remaining_steps - 1 +FROM step_state +WHERE pgflow.runs.run_id = complete_task.run_id + AND step_state.status = 'completed'; + +-- ========================================== +-- POST-COMPLETION ACTIONS +-- ========================================== + +-- ---------- Get updated state for broadcasting ---------- +SELECT * INTO v_step_state FROM pgflow.step_states +WHERE pgflow.step_states.run_id = complete_task.run_id AND pgflow.step_states.step_slug = complete_task.step_slug; + +-- ---------- Handle step completion ---------- +IF v_step_state.status = 'completed' THEN + -- Broadcast step:completed event FIRST (before cascade) + -- This ensures parent broadcasts before its dependent children + -- Use stored output from step_states (set atomically during status transition) + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'step:completed', + 'run_id', complete_task.run_id, + 'step_slug', complete_task.step_slug, + 'status', 'completed', + 'output', v_step_state.output, -- Use stored output instead of re-aggregating + 'completed_at', v_step_state.completed_at + ), + concat('step:', complete_task.step_slug, ':completed'), + concat('pgflow:run:', complete_task.run_id), + false + ); + + -- THEN evaluate conditions on newly-ready dependent steps + -- This must happen before cascade_complete_taskless_steps so that + -- skipped steps can set initial_tasks=0 for their map dependents + IF NOT pgflow.cascade_resolve_conditions(complete_task.run_id) THEN + -- Run was failed due to a condition with when_unmet='fail' + -- Archive the current task's message before returning + PERFORM pgmq.archive( + (SELECT r.flow_slug FROM pgflow.runs r WHERE r.run_id = complete_task.run_id), + (SELECT st.message_id FROM pgflow.step_tasks st + WHERE st.run_id = complete_task.run_id + AND st.step_slug = complete_task.step_slug + AND st.task_index = complete_task.task_index) + ); + RETURN QUERY SELECT * FROM pgflow.step_tasks + WHERE pgflow.step_tasks.run_id = complete_task.run_id + AND pgflow.step_tasks.step_slug = complete_task.step_slug + AND pgflow.step_tasks.task_index = complete_task.task_index; + RETURN; + END IF; + + -- THEN cascade complete any taskless steps that are now ready + -- This ensures dependent children broadcast AFTER their parent + PERFORM pgflow.cascade_complete_taskless_steps(complete_task.run_id); +END IF; + +-- ---------- Archive completed task message ---------- +-- Move message from active queue to archive table +PERFORM ( + WITH completed_tasks AS ( + SELECT r.flow_slug, st.message_id + FROM pgflow.step_tasks st + JOIN pgflow.runs r ON st.run_id = r.run_id + WHERE st.run_id = complete_task.run_id + AND st.step_slug = complete_task.step_slug + AND st.task_index = complete_task.task_index + AND st.status = 'completed' + ) + SELECT pgmq.archive(ct.flow_slug, ct.message_id) + FROM completed_tasks ct + WHERE EXISTS (SELECT 1 FROM completed_tasks) +); + +-- ---------- Trigger next steps ---------- +-- Start any steps that are now ready (deps satisfied) +PERFORM pgflow.start_ready_steps(complete_task.run_id); + +-- Check if the entire run is complete +PERFORM pgflow.maybe_complete_run(complete_task.run_id); + +-- ---------- Return completed task ---------- +RETURN QUERY SELECT * +FROM pgflow.step_tasks AS step_task +WHERE step_task.run_id = complete_task.run_id + AND step_task.step_slug = complete_task.step_slug + AND step_task.task_index = complete_task.task_index; + +end; +$$; +-- Modify "fail_task" function +CREATE OR REPLACE FUNCTION "pgflow"."fail_task" ("run_id" uuid, "step_slug" text, "task_index" integer, "error_message" text) RETURNS SETOF "pgflow"."step_tasks" LANGUAGE plpgsql SET "search_path" = '' AS $$ +DECLARE + v_run_failed boolean; + v_step_failed boolean; + v_step_skipped boolean; + v_when_failed text; + v_task_exhausted boolean; -- True if task has exhausted retries + v_flow_slug_for_deps text; -- Used for decrementing remaining_deps on plain skip +begin + +-- If run is already failed, no retries allowed +IF EXISTS (SELECT 1 FROM pgflow.runs WHERE pgflow.runs.run_id = fail_task.run_id AND pgflow.runs.status = 'failed') THEN + UPDATE pgflow.step_tasks + SET status = 'failed', + failed_at = now(), + error_message = fail_task.error_message + WHERE pgflow.step_tasks.run_id = fail_task.run_id + AND pgflow.step_tasks.step_slug = fail_task.step_slug + AND pgflow.step_tasks.task_index = fail_task.task_index + AND pgflow.step_tasks.status = 'started'; + + -- Archive the task's message + PERFORM pgmq.archive(r.flow_slug, ARRAY_AGG(st.message_id)) + FROM pgflow.step_tasks st + JOIN pgflow.runs r ON st.run_id = r.run_id + WHERE st.run_id = fail_task.run_id + AND st.step_slug = fail_task.step_slug + AND st.task_index = fail_task.task_index + AND st.message_id IS NOT NULL + GROUP BY r.flow_slug + HAVING COUNT(st.message_id) > 0; + + RETURN QUERY SELECT * FROM pgflow.step_tasks + WHERE pgflow.step_tasks.run_id = fail_task.run_id + AND pgflow.step_tasks.step_slug = fail_task.step_slug + AND pgflow.step_tasks.task_index = fail_task.task_index; + RETURN; +END IF; + +WITH run_lock AS ( + SELECT * FROM pgflow.runs + WHERE pgflow.runs.run_id = fail_task.run_id + FOR UPDATE +), +step_lock AS ( + SELECT * FROM pgflow.step_states + WHERE pgflow.step_states.run_id = fail_task.run_id + AND pgflow.step_states.step_slug = fail_task.step_slug + FOR UPDATE +), +flow_info AS ( + SELECT r.flow_slug + FROM pgflow.runs r + WHERE r.run_id = fail_task.run_id +), +config AS ( + SELECT + COALESCE(s.opt_max_attempts, f.opt_max_attempts) AS opt_max_attempts, + COALESCE(s.opt_base_delay, f.opt_base_delay) AS opt_base_delay, + s.when_failed + FROM pgflow.steps s + JOIN pgflow.flows f ON f.flow_slug = s.flow_slug + JOIN flow_info fi ON fi.flow_slug = s.flow_slug + WHERE s.flow_slug = fi.flow_slug AND s.step_slug = fail_task.step_slug +), +fail_or_retry_task as ( + UPDATE pgflow.step_tasks as task + SET + status = CASE + WHEN task.attempts_count < (SELECT opt_max_attempts FROM config) THEN 'queued' + ELSE 'failed' + END, + failed_at = CASE + WHEN task.attempts_count >= (SELECT opt_max_attempts FROM config) THEN now() + ELSE NULL + END, + started_at = CASE + WHEN task.attempts_count < (SELECT opt_max_attempts FROM config) THEN NULL + ELSE task.started_at + END, + error_message = fail_task.error_message + WHERE task.run_id = fail_task.run_id + AND task.step_slug = fail_task.step_slug + AND task.task_index = fail_task.task_index + AND task.status = 'started' + RETURNING * +), +-- Determine if task exhausted retries and get when_failed mode +task_status AS ( + SELECT + (select status from fail_or_retry_task) AS new_task_status, + (select when_failed from config) AS when_failed_mode, + -- Task is exhausted when it's failed (no more retries) + ((select status from fail_or_retry_task) = 'failed') AS is_exhausted +), +maybe_fail_step AS ( + UPDATE pgflow.step_states + SET + -- Status logic: + -- - If task not exhausted (retrying): keep current status + -- - If exhausted AND when_failed='fail': set to 'failed' + -- - If exhausted AND when_failed IN ('skip', 'skip-cascade'): set to 'skipped' + status = CASE + WHEN NOT (select is_exhausted from task_status) THEN pgflow.step_states.status + WHEN (select when_failed_mode from task_status) = 'fail' THEN 'failed' + ELSE 'skipped' -- skip or skip-cascade + END, + failed_at = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) = 'fail' THEN now() + ELSE NULL + END, + error_message = CASE + WHEN (select is_exhausted from task_status) THEN fail_task.error_message + ELSE NULL + END, + skip_reason = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN 'handler_failed' + ELSE pgflow.step_states.skip_reason + END, + skipped_at = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN now() + ELSE pgflow.step_states.skipped_at + END, + -- Clear remaining_tasks when skipping (required by remaining_tasks_state_consistency constraint) + remaining_tasks = CASE + WHEN (select is_exhausted from task_status) AND (select when_failed_mode from task_status) IN ('skip', 'skip-cascade') THEN NULL + ELSE pgflow.step_states.remaining_tasks + END + FROM fail_or_retry_task + WHERE pgflow.step_states.run_id = fail_task.run_id + AND pgflow.step_states.step_slug = fail_task.step_slug + RETURNING pgflow.step_states.* +) +-- Update run status: only fail when when_failed='fail' and step was failed +UPDATE pgflow.runs +SET status = CASE + WHEN (select status from maybe_fail_step) = 'failed' THEN 'failed' + ELSE status + END, + failed_at = CASE + WHEN (select status from maybe_fail_step) = 'failed' THEN now() + ELSE NULL + END, + -- Decrement remaining_steps when step was skipped (not failed, run continues) + remaining_steps = CASE + WHEN (select status from maybe_fail_step) = 'skipped' THEN pgflow.runs.remaining_steps - 1 + ELSE pgflow.runs.remaining_steps + END +WHERE pgflow.runs.run_id = fail_task.run_id +RETURNING (status = 'failed') INTO v_run_failed; + +-- Capture when_failed mode and check if step was skipped for later processing +SELECT s.when_failed INTO v_when_failed +FROM pgflow.steps s +JOIN pgflow.runs r ON r.flow_slug = s.flow_slug +WHERE r.run_id = fail_task.run_id + AND s.step_slug = fail_task.step_slug; + +SELECT (status = 'skipped') INTO v_step_skipped +FROM pgflow.step_states +WHERE pgflow.step_states.run_id = fail_task.run_id + AND pgflow.step_states.step_slug = fail_task.step_slug; + +-- Check if step failed by querying the step_states table +SELECT (status = 'failed') INTO v_step_failed +FROM pgflow.step_states +WHERE pgflow.step_states.run_id = fail_task.run_id + AND pgflow.step_states.step_slug = fail_task.step_slug; + +-- Send broadcast event for step failure if the step was failed +IF v_step_failed THEN + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'step:failed', + 'run_id', fail_task.run_id, + 'step_slug', fail_task.step_slug, + 'status', 'failed', + 'error_message', fail_task.error_message, + 'failed_at', now() + ), + concat('step:', fail_task.step_slug, ':failed'), + concat('pgflow:run:', fail_task.run_id), + false + ); +END IF; + +-- Handle step skipping (when_failed = 'skip' or 'skip-cascade') +IF v_step_skipped THEN + -- Send broadcast event for step skipped + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'step:skipped', + 'run_id', fail_task.run_id, + 'step_slug', fail_task.step_slug, + 'status', 'skipped', + 'skip_reason', 'handler_failed', + 'error_message', fail_task.error_message, + 'skipped_at', now() + ), + concat('step:', fail_task.step_slug, ':skipped'), + concat('pgflow:run:', fail_task.run_id), + false + ); + + -- For skip-cascade: cascade skip to all downstream dependents + IF v_when_failed = 'skip-cascade' THEN + PERFORM pgflow._cascade_force_skip_steps(fail_task.run_id, fail_task.step_slug, 'handler_failed'); + ELSE + -- For plain 'skip': decrement remaining_deps on dependent steps + -- (This mirrors the pattern in cascade_resolve_conditions.sql for when_unmet='skip') + SELECT flow_slug INTO v_flow_slug_for_deps + FROM pgflow.runs + WHERE pgflow.runs.run_id = fail_task.run_id; + + UPDATE pgflow.step_states AS child_state + SET remaining_deps = child_state.remaining_deps - 1, + -- If child is a map step and this skipped step is its only dependency, + -- set initial_tasks = 0 (skipped dep = empty array) + initial_tasks = CASE + WHEN child_step.step_type = 'map' AND child_step.deps_count = 1 THEN 0 + ELSE child_state.initial_tasks + END + FROM pgflow.deps AS dep + JOIN pgflow.steps AS child_step ON child_step.flow_slug = dep.flow_slug AND child_step.step_slug = dep.step_slug + WHERE child_state.run_id = fail_task.run_id + AND dep.flow_slug = v_flow_slug_for_deps + AND dep.dep_slug = fail_task.step_slug + AND child_state.step_slug = dep.step_slug; + + -- Start any steps that became ready after decrementing remaining_deps + PERFORM pgflow.start_ready_steps(fail_task.run_id); + + -- Auto-complete taskless steps (e.g., map steps with initial_tasks=0 from skipped dep) + PERFORM pgflow.cascade_complete_taskless_steps(fail_task.run_id); + END IF; + + -- Try to complete the run (remaining_steps may now be 0) + PERFORM pgflow.maybe_complete_run(fail_task.run_id); +END IF; + +-- Send broadcast event for run failure if the run was failed +IF v_run_failed THEN + DECLARE + v_flow_slug text; + BEGIN + SELECT flow_slug INTO v_flow_slug FROM pgflow.runs WHERE pgflow.runs.run_id = fail_task.run_id; + + PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'run:failed', + 'run_id', fail_task.run_id, + 'flow_slug', v_flow_slug, + 'status', 'failed', + 'error_message', fail_task.error_message, + 'failed_at', now() + ), + 'run:failed', + concat('pgflow:run:', fail_task.run_id), + false + ); + END; +END IF; + +-- Archive all active messages (both queued and started) when run fails +IF v_run_failed THEN + PERFORM pgmq.archive(r.flow_slug, ARRAY_AGG(st.message_id)) + FROM pgflow.step_tasks st + JOIN pgflow.runs r ON st.run_id = r.run_id + WHERE st.run_id = fail_task.run_id + AND st.status IN ('queued', 'started') + AND st.message_id IS NOT NULL + GROUP BY r.flow_slug + HAVING COUNT(st.message_id) > 0; +END IF; + +-- For queued tasks: delay the message for retry with exponential backoff +PERFORM ( + WITH retry_config AS ( + SELECT + COALESCE(s.opt_base_delay, f.opt_base_delay) AS base_delay + FROM pgflow.steps s + JOIN pgflow.flows f ON f.flow_slug = s.flow_slug + JOIN pgflow.runs r ON r.flow_slug = f.flow_slug + WHERE r.run_id = fail_task.run_id + AND s.step_slug = fail_task.step_slug + ), + queued_tasks AS ( + SELECT + r.flow_slug, + st.message_id, + pgflow.calculate_retry_delay((SELECT base_delay FROM retry_config), st.attempts_count) AS calculated_delay + FROM pgflow.step_tasks st + JOIN pgflow.runs r ON st.run_id = r.run_id + WHERE st.run_id = fail_task.run_id + AND st.step_slug = fail_task.step_slug + AND st.task_index = fail_task.task_index + AND st.status = 'queued' + ) + SELECT pgmq.set_vt(qt.flow_slug, qt.message_id, qt.calculated_delay) + FROM queued_tasks qt + WHERE EXISTS (SELECT 1 FROM queued_tasks) +); + +-- For failed tasks: archive the message +PERFORM pgmq.archive(r.flow_slug, ARRAY_AGG(st.message_id)) +FROM pgflow.step_tasks st +JOIN pgflow.runs r ON st.run_id = r.run_id +WHERE st.run_id = fail_task.run_id + AND st.step_slug = fail_task.step_slug + AND st.task_index = fail_task.task_index + AND st.status = 'failed' + AND st.message_id IS NOT NULL +GROUP BY r.flow_slug +HAVING COUNT(st.message_id) > 0; + +return query select * +from pgflow.step_tasks st +where st.run_id = fail_task.run_id + and st.step_slug = fail_task.step_slug + and st.task_index = fail_task.task_index; + +end; +$$; +-- Modify "start_flow" function +CREATE OR REPLACE FUNCTION "pgflow"."start_flow" ("flow_slug" text, "input" jsonb, "run_id" uuid DEFAULT NULL::uuid) RETURNS SETOF "pgflow"."runs" LANGUAGE plpgsql SET "search_path" = '' AS $$ +declare + v_created_run pgflow.runs%ROWTYPE; + v_root_map_count int; +begin + +-- ========================================== +-- VALIDATION: Root map array input +-- ========================================== +WITH root_maps AS ( + SELECT step_slug + FROM pgflow.steps + WHERE steps.flow_slug = start_flow.flow_slug + AND steps.step_type = 'map' + AND steps.deps_count = 0 +) +SELECT COUNT(*) INTO v_root_map_count FROM root_maps; + +-- If we have root map steps, validate that input is an array +IF v_root_map_count > 0 THEN + -- First check for NULL (should be caught by NOT NULL constraint, but be defensive) + IF start_flow.input IS NULL THEN + RAISE EXCEPTION 'Flow % has root map steps but input is NULL', start_flow.flow_slug; + END IF; + + -- Then check if it's not an array + IF jsonb_typeof(start_flow.input) != 'array' THEN + RAISE EXCEPTION 'Flow % has root map steps but input is not an array (got %)', + start_flow.flow_slug, jsonb_typeof(start_flow.input); + END IF; +END IF; + +-- ========================================== +-- MAIN CTE CHAIN: Create run and step states +-- ========================================== +WITH + -- ---------- Gather flow metadata ---------- + flow_steps AS ( + SELECT steps.flow_slug, steps.step_slug, steps.step_type, steps.deps_count + FROM pgflow.steps + WHERE steps.flow_slug = start_flow.flow_slug + ), + -- ---------- Create run record ---------- + created_run AS ( + INSERT INTO pgflow.runs (run_id, flow_slug, input, remaining_steps) + VALUES ( + COALESCE(start_flow.run_id, gen_random_uuid()), + start_flow.flow_slug, + start_flow.input, + (SELECT count(*) FROM flow_steps) + ) + RETURNING * + ), + -- ---------- Create step states ---------- + -- Sets initial_tasks: known for root maps, NULL for dependent maps + created_step_states AS ( + INSERT INTO pgflow.step_states (flow_slug, run_id, step_slug, remaining_deps, initial_tasks) + SELECT + fs.flow_slug, + (SELECT created_run.run_id FROM created_run), + fs.step_slug, + fs.deps_count, + -- Updated logic for initial_tasks: + CASE + WHEN fs.step_type = 'map' AND fs.deps_count = 0 THEN + -- Root map: get array length from input + CASE + WHEN jsonb_typeof(start_flow.input) = 'array' THEN + jsonb_array_length(start_flow.input) + ELSE + 1 + END + WHEN fs.step_type = 'map' AND fs.deps_count > 0 THEN + -- Dependent map: unknown until dependencies complete + NULL + ELSE + -- Single steps: always 1 task + 1 + END + FROM flow_steps fs + ) +SELECT * FROM created_run INTO v_created_run; + +-- ========================================== +-- POST-CREATION ACTIONS +-- ========================================== + +-- ---------- Broadcast run:started event ---------- +PERFORM realtime.send( + jsonb_build_object( + 'event_type', 'run:started', + 'run_id', v_created_run.run_id, + 'flow_slug', v_created_run.flow_slug, + 'input', v_created_run.input, + 'status', 'started', + 'remaining_steps', v_created_run.remaining_steps, + 'started_at', v_created_run.started_at + ), + 'run:started', + concat('pgflow:run:', v_created_run.run_id), + false +); + +-- ---------- Evaluate conditions on ready steps ---------- +-- Skip steps with unmet conditions, propagate to dependents +IF NOT pgflow.cascade_resolve_conditions(v_created_run.run_id) THEN + -- Run was failed due to a condition with when_unmet='fail' + RETURN QUERY SELECT * FROM pgflow.runs where pgflow.runs.run_id = v_created_run.run_id; + RETURN; +END IF; + +-- ---------- Complete taskless steps ---------- +-- Handle empty array maps that should auto-complete +PERFORM pgflow.cascade_complete_taskless_steps(v_created_run.run_id); + +-- ---------- Start initial steps ---------- +-- Start root steps (those with no dependencies) +PERFORM pgflow.start_ready_steps(v_created_run.run_id); + +-- ---------- Check for run completion ---------- +-- If cascade completed all steps (zero-task flows), finalize the run +PERFORM pgflow.maybe_complete_run(v_created_run.run_id); + +RETURN QUERY SELECT * FROM pgflow.runs where pgflow.runs.run_id = v_created_run.run_id; + +end; +$$; +-- Modify "start_tasks" function +CREATE OR REPLACE FUNCTION "pgflow"."start_tasks" ("flow_slug" text, "msg_ids" bigint[], "worker_id" uuid) RETURNS SETOF "pgflow"."step_task_record" LANGUAGE sql SET "search_path" = '' AS $$ +with tasks as ( + select + task.flow_slug, + task.run_id, + task.step_slug, + task.task_index, + task.message_id + from pgflow.step_tasks as task + join pgflow.runs r on r.run_id = task.run_id + where task.flow_slug = start_tasks.flow_slug + and task.message_id = any(msg_ids) + and task.status = 'queued' + -- MVP: Don't start tasks on failed runs + and r.status != 'failed' + ), + start_tasks_update as ( + update pgflow.step_tasks + set + attempts_count = attempts_count + 1, + status = 'started', + started_at = now(), + last_worker_id = worker_id + from tasks + where step_tasks.message_id = tasks.message_id + and step_tasks.flow_slug = tasks.flow_slug + and step_tasks.status = 'queued' + ), + runs as ( + select + r.run_id, + r.input + from pgflow.runs r + where r.run_id in (select run_id from tasks) + ), + deps as ( + select + st.run_id, + st.step_slug, + dep.dep_slug, + -- Read output directly from step_states (already aggregated by writers) + dep_state.output as dep_output + from tasks st + join pgflow.deps dep on dep.flow_slug = st.flow_slug and dep.step_slug = st.step_slug + join pgflow.step_states dep_state on + dep_state.run_id = st.run_id and + dep_state.step_slug = dep.dep_slug and + dep_state.status = 'completed' -- Only include completed deps (not skipped) + ), + deps_outputs as ( + select + d.run_id, + d.step_slug, + jsonb_object_agg(d.dep_slug, d.dep_output) as deps_output, + count(*) as dep_count + from deps d + group by d.run_id, d.step_slug + ), + timeouts as ( + select + task.message_id, + task.flow_slug, + coalesce(step.opt_timeout, flow.opt_timeout) + 2 as vt_delay + from tasks task + join pgflow.flows flow on flow.flow_slug = task.flow_slug + join pgflow.steps step on step.flow_slug = task.flow_slug and step.step_slug = task.step_slug + ), + -- Batch update visibility timeouts for all messages + set_vt_batch as ( + select pgflow.set_vt_batch( + start_tasks.flow_slug, + array_agg(t.message_id order by t.message_id), + array_agg(t.vt_delay order by t.message_id) + ) + from timeouts t + ) + select + st.flow_slug, + st.run_id, + st.step_slug, + -- ========================================== + -- INPUT CONSTRUCTION LOGIC + -- ========================================== + -- This nested CASE statement determines how to construct the input + -- for each task based on the step type (map vs non-map). + -- + -- The fundamental difference: + -- - Map steps: Receive RAW array elements (e.g., just 42 or "hello") + -- - Non-map steps: Receive structured objects with named keys + -- (e.g., {"run": {...}, "dependency1": {...}}) + -- ========================================== + CASE + -- -------------------- MAP STEPS -------------------- + -- Map steps process arrays element-by-element. + -- Each task receives ONE element from the array at its task_index position. + WHEN step.step_type = 'map' THEN + -- Map steps get raw array elements without any wrapper object + CASE + -- ROOT MAP: Gets array from run input + -- Example: run input = [1, 2, 3] + -- task 0 gets: 1 + -- task 1 gets: 2 + -- task 2 gets: 3 + WHEN step.deps_count = 0 THEN + -- Root map (deps_count = 0): no dependencies, reads from run input. + -- Extract the element at task_index from the run's input array. + -- Note: If run input is not an array, this will return NULL + -- and the flow will fail (validated in start_flow). + jsonb_array_element(r.input, st.task_index) + + -- DEPENDENT MAP: Gets array from its single dependency + -- Example: dependency output = ["a", "b", "c"] + -- task 0 gets: "a" + -- task 1 gets: "b" + -- task 2 gets: "c" + ELSE + -- Has dependencies (should be exactly 1 for map steps). + -- Extract the element at task_index from the dependency's output array. + -- + -- Why the subquery with jsonb_each? + -- - The dependency outputs a raw array: [1, 2, 3] + -- - deps_outputs aggregates it into: {"dep_name": [1, 2, 3]} + -- - We need to unwrap and get just the array value + -- - Map steps have exactly 1 dependency (enforced by add_step) + -- - So jsonb_each will return exactly 1 row + -- - We extract the 'value' which is the raw array [1, 2, 3] + -- - Then get the element at task_index from that array + (SELECT jsonb_array_element(value, st.task_index) + FROM jsonb_each(dep_out.deps_output) + LIMIT 1) + END + + -- -------------------- NON-MAP STEPS -------------------- + -- Regular (non-map) steps receive dependency outputs as a structured object. + -- Root steps (no dependencies) get empty object - they access flowInput via context. + -- Dependent steps get only their dependency outputs. + ELSE + -- Non-map steps get structured input with dependency keys only + -- Example for dependent step: { + -- "step1": {"output": "from_step1"}, + -- "step2": {"output": "from_step2"} + -- } + -- Example for root step: {} + -- + -- Note: flow_input is available separately in the returned record + -- for workers to access via context.flowInput + coalesce(dep_out.deps_output, '{}'::jsonb) + END as input, + st.message_id as msg_id, + st.task_index as task_index, + -- flow_input: Original run input for worker context + -- Only included for root non-map steps to avoid data duplication. + -- Root map steps: flowInput IS the array, useless to include + -- Dependent steps: lazy load via ctx.flowInput when needed + CASE + WHEN step.step_type != 'map' AND step.deps_count = 0 + THEN r.input + ELSE NULL + END as flow_input + from tasks st + join runs r on st.run_id = r.run_id + join pgflow.steps step on + step.flow_slug = st.flow_slug and + step.step_slug = st.step_slug + left join deps_outputs dep_out on + dep_out.run_id = st.run_id and + dep_out.step_slug = st.step_slug +$$; +-- Drop "add_step" function +DROP FUNCTION "pgflow"."add_step" (text, text, text[], integer, integer, integer, integer, text); diff --git a/pkgs/core/supabase/migrations/atlas.sum b/pkgs/core/supabase/migrations/atlas.sum index 1d66f78c4..3ba07e792 100644 --- a/pkgs/core/supabase/migrations/atlas.sum +++ b/pkgs/core/supabase/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:dzKOHL+hbunxWTZaGOIDWQG9THDva7Pk7VVDGASJkps= +h1:DjkOzhpajvyHsiYEjEsthnLUW8q+BFDiffT3B06DFYw= 20250429164909_pgflow_initial.sql h1:I3n/tQIg5Q5nLg7RDoU3BzqHvFVjmumQxVNbXTPG15s= 20250517072017_pgflow_fix_poll_for_tasks_to_use_separate_statement_for_polling.sql h1:wTuXuwMxVniCr3ONCpodpVWJcHktoQZIbqMZ3sUHKMY= 20250609105135_pgflow_add_start_tasks_and_started_status.sql h1:ggGanW4Wyt8Kv6TWjnZ00/qVb3sm+/eFVDjGfT8qyPg= @@ -17,3 +17,4 @@ h1:dzKOHL+hbunxWTZaGOIDWQG9THDva7Pk7VVDGASJkps= 20251225163110_pgflow_add_flow_input_column.sql h1:734uCbTgKmPhTK3TY56uNYZ31T8u59yll9ea7nwtEoc= 20260103145141_pgflow_step_output_storage.sql h1:mgVHSFDLdtYy//SZ6C03j9Str1iS9xCM8Rz/wyFwn3o= 20260120205547_pgflow_requeue_stalled_tasks.sql h1:4wCBBvjtETCgJf1eXmlH5wCTKDUhiLi0uzsFG1V528E= +20260121095914_pgflow_step_conditions.sql h1:sM3dEKD2L1OIurVI/Bu8qM7orMNPty+0ID3AvpqfPqI= diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/broadcast_order.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/broadcast_order.test.sql new file mode 100644 index 000000000..012f3368e --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/broadcast_order.test.sql @@ -0,0 +1,64 @@ +-- Test: _cascade_force_skip_steps - Broadcast order respects dependency graph +-- Verifies step:skipped events are sent in topological order +begin; +select plan(2); + +-- Reset database and create a chain: A -> B -> C +select pgflow_tests.reset_db(); +select pgflow.create_flow('order_test'); +select pgflow.add_step('order_test', 'step_a'); +select pgflow.add_step('order_test', 'step_b', ARRAY['step_a']); +select pgflow.add_step('order_test', 'step_c', ARRAY['step_b']); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('order_test', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Skip step_a (cascades to B and C) +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'condition_unmet' +); + +-- Test 1: All 3 step:skipped events should exist +select is( + (select count(*) from realtime.messages + where payload->>'event_type' = 'step:skipped' + and payload->>'run_id' = (select run_id::text from run_ids)), + 3::bigint, + 'Should have 3 step:skipped events' +); + +-- Test 2: Events should be in dependency order (A before B before C) +with ordered_events as ( + select + inserted_at, + payload->>'step_slug' as step_slug, + row_number() over (order by inserted_at) as event_order + from realtime.messages + where payload->>'event_type' = 'step:skipped' + and payload->>'run_id' = (select run_id::text from run_ids) +), +step_a_event as ( + select event_order from ordered_events where step_slug = 'step_a' +), +step_b_event as ( + select event_order from ordered_events where step_slug = 'step_b' +), +step_c_event as ( + select event_order from ordered_events where step_slug = 'step_c' +) +select ok( + (select event_order from step_a_event) < (select event_order from step_b_event) + AND (select event_order from step_b_event) < (select event_order from step_c_event), + 'Events must be in dependency order (A -> B -> C)' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_through_multiple_levels.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_through_multiple_levels.test.sql new file mode 100644 index 000000000..5faa21cff --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_through_multiple_levels.test.sql @@ -0,0 +1,95 @@ +-- Test: _cascade_force_skip_steps - Cascade through multiple DAG levels +-- Verifies skipping A cascades through A -> B -> C chain +begin; +select plan(8); + +-- Reset database and create a flow: A -> B -> C +select pgflow_tests.reset_db(); +select pgflow.create_flow('deep_cascade'); +select pgflow.add_step('deep_cascade', 'step_a'); +select pgflow.add_step('deep_cascade', 'step_b', ARRAY['step_a']); +select pgflow.add_step('deep_cascade', 'step_c', ARRAY['step_b']); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('deep_cascade', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Skip step_a (should cascade to step_b and step_c) +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'handler_failed' +); + +-- Test 1: step_a should be skipped with handler_failed reason +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'handler_failed', + 'step_a skip_reason should be handler_failed' +); + +-- Test 2: step_b should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'skipped', + 'step_b should be skipped (direct dependent of step_a)' +); + +-- Test 3: step_b should have dependency_skipped reason +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'dependency_skipped', + 'step_b skip_reason should be dependency_skipped' +); + +-- Test 4: step_c should also be skipped (transitive) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'skipped', + 'step_c should be skipped (transitive cascade)' +); + +-- Test 5: step_c should have dependency_skipped reason +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'dependency_skipped', + 'step_c skip_reason should be dependency_skipped' +); + +-- Test 6: All three steps should be skipped +select is( + (select count(*) from pgflow.step_states + where run_id = (select run_id from run_ids) and status = 'skipped'), + 3::bigint, + 'All 3 steps should be skipped' +); + +-- Test 7: remaining_steps should be 0 +select is( + (select remaining_steps from pgflow.runs + where run_id = (select run_id from run_ids)), + 0::int, + 'remaining_steps should be 0' +); + +-- Test 8: step:skipped events should be sent for all 3 steps +select is( + (select count(*) from realtime.messages + where payload->>'event_type' = 'step:skipped' + and payload->>'run_id' = (select run_id::text from run_ids)), + 3::bigint, + 'Should send 3 step:skipped events' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_to_single_dependent.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_to_single_dependent.test.sql new file mode 100644 index 000000000..e6be5c41f --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/cascade_to_single_dependent.test.sql @@ -0,0 +1,86 @@ +-- Test: _cascade_force_skip_steps - Cascade to single dependent +-- Verifies skipping a step cascades to its direct dependent +begin; +select plan(7); + +-- Reset database and create a flow: A -> B +select pgflow_tests.reset_db(); +select pgflow.create_flow('cascade_flow'); +select pgflow.add_step('cascade_flow', 'step_a'); +select pgflow.add_step('cascade_flow', 'step_b', ARRAY['step_a']); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('cascade_flow', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Skip step_a (should cascade to step_b) +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'condition_unmet' +); + +-- Test 1: step_a should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'skipped', + 'step_a should be skipped' +); + +-- Test 2: step_a should have skip_reason = condition_unmet +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'condition_unmet', + 'step_a skip_reason should be condition_unmet' +); + +-- Test 3: step_b should also be skipped (cascade) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'skipped', + 'step_b should be skipped due to cascade' +); + +-- Test 4: step_b should have skip_reason = dependency_skipped +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'dependency_skipped', + 'step_b skip_reason should be dependency_skipped' +); + +-- Test 5: Both steps should have skipped_at timestamp set +select ok( + (select count(*) = 2 from pgflow.step_states + where run_id = (select run_id from run_ids) + and skipped_at is not null), + 'Both steps should have skipped_at timestamp' +); + +-- Test 6: remaining_steps should be 0 (both skipped) +select is( + (select remaining_steps from pgflow.runs + where run_id = (select run_id from run_ids)), + 0::int, + 'remaining_steps should be 0 (both steps skipped)' +); + +-- Test 7: step:skipped events should be sent for both steps +select is( + (select count(*) from realtime.messages + where payload->>'event_type' = 'step:skipped' + and payload->>'run_id' = (select run_id::text from run_ids)), + 2::bigint, + 'Should send step:skipped events for both steps' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/multi_dependency_partial_skip.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/multi_dependency_partial_skip.test.sql new file mode 100644 index 000000000..fc14700d5 --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/multi_dependency_partial_skip.test.sql @@ -0,0 +1,80 @@ +-- Test: _cascade_force_skip_steps - Multi-dependency scenario +-- Flow: A -> C, B -> C (C depends on both A and B) +-- Skipping A should cascade to C, even though B is still runnable +begin; +select plan(6); + +-- Reset database and create a diamond-ish flow +select pgflow_tests.reset_db(); +select pgflow.create_flow('multi_dep'); +select pgflow.add_step('multi_dep', 'step_a'); +select pgflow.add_step('multi_dep', 'step_b'); +select pgflow.add_step('multi_dep', 'step_c', ARRAY['step_a', 'step_b']); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('multi_dep', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Skip step_a (should cascade to step_c) +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'condition_unmet' +); + +-- Test 1: step_a should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'skipped', + 'step_a should be skipped' +); + +-- Test 2: step_b should NOT be skipped (independent of step_a, root step so started) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'started', + 'step_b should remain in started status (independent root step)' +); + +-- Test 3: step_c should be skipped (depends on skipped step_a) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'skipped', + 'step_c should be skipped (one of its deps was skipped)' +); + +-- Test 4: step_c skip_reason should be dependency_skipped +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'dependency_skipped', + 'step_c skip_reason should be dependency_skipped' +); + +-- Test 5: remaining_steps should be 1 (only step_b) +select is( + (select remaining_steps from pgflow.runs + where run_id = (select run_id from run_ids)), + 1::int, + 'remaining_steps should be 1 (only step_b remains)' +); + +-- Test 6: 2 step:skipped events (step_a and step_c) +select is( + (select count(*) from realtime.messages + where payload->>'event_type' = 'step:skipped' + and payload->>'run_id' = (select run_id::text from run_ids)), + 2::bigint, + 'Should send 2 step:skipped events (step_a and step_c)' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/single_step_skip.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/single_step_skip.test.sql new file mode 100644 index 000000000..e892ef21b --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/single_step_skip.test.sql @@ -0,0 +1,69 @@ +-- Test: _cascade_force_skip_steps - Single step skip (base case) +-- Verifies the function can skip a single step without dependencies +begin; +select plan(5); + +-- Reset database and create a simple flow with no dependencies +select pgflow_tests.reset_db(); +select pgflow.create_flow('simple_flow'); +select pgflow.add_step('simple_flow', 'step_a'); +select pgflow.add_step('simple_flow', 'step_b'); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('simple_flow', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: Verify step_a starts in 'started' status (root steps auto-start) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'started', + 'step_a should start in started status (root step auto-starts)' +); + +-- Skip step_a +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'condition_unmet' +); + +-- Test 2: step_a should now have status 'skipped' +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'skipped', + 'step_a should be skipped after _cascade_force_skip_steps' +); + +-- Test 3: step_a should have skip_reason set +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'condition_unmet', + 'step_a should have skip_reason = condition_unmet' +); + +-- Test 4: step_b should remain unaffected (still started, independent root step) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'started', + 'step_b (independent step) should remain in started status' +); + +-- Test 5: remaining_steps on run should be decremented by 1 +select is( + (select remaining_steps from pgflow.runs + where run_id = (select run_id from run_ids)), + 1::int, + 'remaining_steps should be decremented by 1 (was 2, now 1)' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/_cascade_force_skip_steps/skipped_event_payload.test.sql b/pkgs/core/supabase/tests/_cascade_force_skip_steps/skipped_event_payload.test.sql new file mode 100644 index 000000000..015f31c29 --- /dev/null +++ b/pkgs/core/supabase/tests/_cascade_force_skip_steps/skipped_event_payload.test.sql @@ -0,0 +1,88 @@ +-- Test: _cascade_force_skip_steps - step:skipped event payload format +-- Verifies the realtime event contains all required fields +begin; +select plan(8); + +-- Reset database and create a simple flow +select pgflow_tests.reset_db(); +select pgflow.create_flow('event_test'); +select pgflow.add_step('event_test', 'step_a'); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('event_test', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Skip step_a +select pgflow._cascade_force_skip_steps( + (select run_id from run_ids), + 'step_a', + 'condition_unmet' +); + +-- Get the event for assertions +select * into temporary skip_event +from pgflow_tests.get_realtime_message('step:skipped', (select run_id from run_ids), 'step_a'); + +-- Test 1: Event type should be step:skipped +select is( + (select payload->>'event_type' from skip_event), + 'step:skipped', + 'Event type should be step:skipped' +); + +-- Test 2: step_slug should be in payload +select is( + (select payload->>'step_slug' from skip_event), + 'step_a', + 'Payload should contain step_slug' +); + +-- Test 3: flow_slug should be in payload +select is( + (select payload->>'flow_slug' from skip_event), + 'event_test', + 'Payload should contain flow_slug' +); + +-- Test 4: run_id should be in payload +select is( + (select payload->>'run_id' from skip_event), + (select run_id::text from run_ids), + 'Payload should contain run_id' +); + +-- Test 5: status should be skipped +select is( + (select payload->>'status' from skip_event), + 'skipped', + 'Payload status should be skipped' +); + +-- Test 6: skip_reason should be in payload +select is( + (select payload->>'skip_reason' from skip_event), + 'condition_unmet', + 'Payload should contain skip_reason' +); + +-- Test 7: skipped_at timestamp should be present +select ok( + (select (payload->>'skipped_at')::timestamptz is not null from skip_event), + 'Payload should include skipped_at timestamp' +); + +-- Test 8: Event name format should be step::skipped +select is( + (select event from skip_event), + 'step:step_a:skipped', + 'Event name should be step::skipped' +); + +-- Clean up +drop table if exists run_ids; +drop table if exists skip_event; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/add_step/condition_invalid_values.test.sql b/pkgs/core/supabase/tests/add_step/condition_invalid_values.test.sql new file mode 100644 index 000000000..17458192e --- /dev/null +++ b/pkgs/core/supabase/tests/add_step/condition_invalid_values.test.sql @@ -0,0 +1,24 @@ +-- Test: add_step - Invalid condition parameter values +-- Verifies CHECK constraints reject invalid when_unmet and when_failed values +begin; +select plan(2); + +select pgflow_tests.reset_db(); +select pgflow.create_flow('invalid_test'); + +-- Test 1: Invalid when_unmet value should fail +select throws_ok( + $$ SELECT pgflow.add_step('invalid_test', 'bad_step', when_unmet => 'invalid_value') $$, + 'new row for relation "steps" violates check constraint "when_unmet_is_valid"', + 'Invalid when_unmet value should be rejected' +); + +-- Test 2: Invalid when_failed value should fail +select throws_ok( + $$ SELECT pgflow.add_step('invalid_test', 'bad_step2', when_failed => 'invalid_value') $$, + 'new row for relation "steps" violates check constraint "when_failed_is_valid"', + 'Invalid when_failed value should be rejected' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/add_step/condition_not_pattern.test.sql b/pkgs/core/supabase/tests/add_step/condition_not_pattern.test.sql new file mode 100644 index 000000000..5451537fe --- /dev/null +++ b/pkgs/core/supabase/tests/add_step/condition_not_pattern.test.sql @@ -0,0 +1,102 @@ +-- Test: add_step - forbidden_input_pattern parameter +-- Verifies the ifNot pattern (forbidden_input_pattern) is stored correctly +begin; +select plan(6); + +select pgflow_tests.reset_db(); +select pgflow.create_flow('ifnot_test'); + +-- Test 1: Add step with forbidden_input_pattern only +select pgflow.add_step( + 'ifnot_test', + 'step_with_ifnot', + forbidden_input_pattern => '{"role": "admin"}'::jsonb +); + +select is( + (select forbidden_input_pattern from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'step_with_ifnot'), + '{"role": "admin"}'::jsonb, + 'forbidden_input_pattern should be stored correctly' +); + +-- Test 2: Default forbidden_input_pattern should be NULL +select pgflow.add_step('ifnot_test', 'step_default_not'); + +select is( + (select forbidden_input_pattern from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'step_default_not'), + NULL::jsonb, + 'Default forbidden_input_pattern should be NULL' +); + +-- Test 3: Both required_input_pattern and forbidden_input_pattern together +select pgflow.add_step( + 'ifnot_test', + 'step_with_both', + required_input_pattern => '{"active": true}'::jsonb, + forbidden_input_pattern => '{"suspended": true}'::jsonb +); + +select ok( + (select + required_input_pattern = '{"active": true}'::jsonb + AND forbidden_input_pattern = '{"suspended": true}'::jsonb + from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'step_with_both'), + 'Both required_input_pattern and forbidden_input_pattern should be stored together' +); + +-- Test 4: forbidden_input_pattern with all other options +select pgflow.add_step( + 'ifnot_test', + 'step_all_options', + max_attempts => 5, + timeout => 30, + forbidden_input_pattern => '{"status": "disabled"}'::jsonb, + when_unmet => 'skip' +); + +select ok( + (select + opt_max_attempts = 5 + AND opt_timeout = 30 + AND forbidden_input_pattern = '{"status": "disabled"}'::jsonb + AND when_unmet = 'skip' + from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'step_all_options'), + 'forbidden_input_pattern should work with all other step options' +); + +-- Test 5: Complex nested forbidden_input_pattern +select pgflow.add_step( + 'ifnot_test', + 'step_nested_not', + forbidden_input_pattern => '{"user": {"role": "admin", "department": "IT"}}'::jsonb +); + +select is( + (select forbidden_input_pattern from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'step_nested_not'), + '{"user": {"role": "admin", "department": "IT"}}'::jsonb, + 'Nested forbidden_input_pattern should be stored correctly' +); + +-- Test 6: forbidden_input_pattern on dependent step +select pgflow.add_step('ifnot_test', 'first_step'); +select pgflow.add_step( + 'ifnot_test', + 'dependent_step', + deps_slugs => ARRAY['first_step'], + forbidden_input_pattern => '{"first_step": {"error": true}}'::jsonb +); + +select is( + (select forbidden_input_pattern from pgflow.steps + where flow_slug = 'ifnot_test' and step_slug = 'dependent_step'), + '{"first_step": {"error": true}}'::jsonb, + 'forbidden_input_pattern should be stored for dependent step' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/add_step/condition_parameters.test.sql b/pkgs/core/supabase/tests/add_step/condition_parameters.test.sql new file mode 100644 index 000000000..04344ca0a --- /dev/null +++ b/pkgs/core/supabase/tests/add_step/condition_parameters.test.sql @@ -0,0 +1,125 @@ +-- Test: add_step - New condition parameters +-- Verifies required_input_pattern, when_unmet, when_failed parameters work correctly +begin; +select plan(9); + +select pgflow_tests.reset_db(); +select pgflow.create_flow('condition_test'); + +-- Test 1: Add step with required_input_pattern +select pgflow.add_step( + 'condition_test', + 'step_with_condition', + required_input_pattern => '{"type": "premium"}'::jsonb +); + +select is( + (select required_input_pattern from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_with_condition'), + '{"type": "premium"}'::jsonb, + 'required_input_pattern should be stored correctly' +); + +-- Test 2: Add step with when_unmet = skip +select pgflow.add_step( + 'condition_test', + 'step_skip_unmet', + when_unmet => 'skip' +); + +select is( + (select when_unmet from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_skip_unmet'), + 'skip', + 'when_unmet should be skip' +); + +-- Test 3: Add step with when_unmet = skip-cascade +select pgflow.add_step( + 'condition_test', + 'step_skip_cascade_unmet', + when_unmet => 'skip-cascade' +); + +select is( + (select when_unmet from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_skip_cascade_unmet'), + 'skip-cascade', + 'when_unmet should be skip-cascade' +); + +-- Test 4: Add step with when_failed = skip +select pgflow.add_step( + 'condition_test', + 'step_skip_failed', + when_failed => 'skip' +); + +select is( + (select when_failed from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_skip_failed'), + 'skip', + 'when_failed should be skip' +); + +-- Test 5: Add step with when_failed = skip-cascade +select pgflow.add_step( + 'condition_test', + 'step_skip_cascade_failed', + when_failed => 'skip-cascade' +); + +select is( + (select when_failed from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_skip_cascade_failed'), + 'skip-cascade', + 'when_failed should be skip-cascade' +); + +-- Test 6: Default when_unmet should be skip (natural default for conditions) +select pgflow.add_step('condition_test', 'step_default_unmet'); + +select is( + (select when_unmet from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_default_unmet'), + 'skip', + 'Default when_unmet should be skip' +); + +-- Test 7: Default when_failed should be fail +select is( + (select when_failed from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_default_unmet'), + 'fail', + 'Default when_failed should be fail' +); + +-- Test 8: Default required_input_pattern should be NULL +select is( + (select required_input_pattern from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_default_unmet'), + NULL::jsonb, + 'Default required_input_pattern should be NULL' +); + +-- Test 9: Add step with all condition parameters +select pgflow.add_step( + 'condition_test', + 'step_all_params', + required_input_pattern => '{"status": "active"}'::jsonb, + when_unmet => 'skip', + when_failed => 'skip-cascade' +); + +select ok( + (select + required_input_pattern = '{"status": "active"}'::jsonb + AND when_unmet = 'skip' + AND when_failed = 'skip-cascade' + from pgflow.steps + where flow_slug = 'condition_test' and step_slug = 'step_all_params'), + 'All condition parameters should be stored correctly together' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/compare_flow_shapes/condition_mode_drift.test.sql b/pkgs/core/supabase/tests/compare_flow_shapes/condition_mode_drift.test.sql new file mode 100644 index 000000000..328c29cce --- /dev/null +++ b/pkgs/core/supabase/tests/compare_flow_shapes/condition_mode_drift.test.sql @@ -0,0 +1,60 @@ +begin; +select plan(3); +select pgflow_tests.reset_db(); + +-- Setup: Create a flow with specific condition modes +select pgflow.create_flow('drift_test'); +select pgflow.add_step( + flow_slug => 'drift_test', + step_slug => 'step1', + when_unmet => 'skip', + when_failed => 'fail' +); + +-- Test: Detect whenUnmet drift +select is( + pgflow._compare_flow_shapes( + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip-cascade", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + pgflow._get_flow_shape('drift_test') + ), + ARRAY[$$Step at index 0: whenUnmet differs 'skip-cascade' vs 'skip'$$], + 'Should detect whenUnmet mismatch' +); + +-- Test: Detect whenFailed drift +select is( + pgflow._compare_flow_shapes( + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "skip-cascade", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + pgflow._get_flow_shape('drift_test') + ), + ARRAY[$$Step at index 0: whenFailed differs 'skip-cascade' vs 'fail'$$], + 'Should detect whenFailed mismatch' +); + +-- Test: Detect both whenUnmet and whenFailed drift simultaneously +select is( + pgflow._compare_flow_shapes( + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "fail", "whenFailed": "skip", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + pgflow._get_flow_shape('drift_test') + ), + ARRAY[ + $$Step at index 0: whenUnmet differs 'fail' vs 'skip'$$, + $$Step at index 0: whenFailed differs 'skip' vs 'fail'$$ + ], + 'Should detect both condition mode mismatches' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/compare_flow_shapes/pattern_differences.test.sql b/pkgs/core/supabase/tests/compare_flow_shapes/pattern_differences.test.sql new file mode 100644 index 000000000..fea92b812 --- /dev/null +++ b/pkgs/core/supabase/tests/compare_flow_shapes/pattern_differences.test.sql @@ -0,0 +1,42 @@ +begin; +select plan(2); +select pgflow_tests.reset_db(); + +-- Test: Patterns with same value should match +select is( + pgflow._compare_flow_shapes( + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "active"}}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "active"}}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb + ), + '{}'::text[], + 'Shapes with identical patterns should have no differences' +); + +-- Test: Different requiredInputPattern should be detected +select is( + pgflow._compare_flow_shapes( + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "active"}}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + '{ + "steps": [ + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "pending"}}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb + ), + ARRAY['Step at index 0: requiredInputPattern differs ''{"value": {"status": "active"}, "defined": true}'' vs ''{"value": {"status": "pending"}, "defined": true}''']::text[], + 'Different requiredInputPattern values should be detected' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/branching_opposite_conditions.test.sql b/pkgs/core/supabase/tests/condition_evaluation/branching_opposite_conditions.test.sql new file mode 100644 index 000000000..c24beb5b2 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/branching_opposite_conditions.test.sql @@ -0,0 +1,136 @@ +-- Test: Branching pattern - two steps with opposite conditions +-- For any input, exactly ONE step runs (mutual exclusion) +-- step1: if: { role: 'admin' } whenUnmet: 'skip' +-- step2: ifNot: { role: 'admin' } whenUnmet: 'skip' +begin; +select plan(9); + +select pgflow_tests.reset_db(); + +-- Create flow with two mutually exclusive branches +select pgflow.create_flow('branch_flow'); +-- Admin branch: only runs when role=admin +select pgflow.add_step( + flow_slug => 'branch_flow', + step_slug => 'admin_branch', + required_input_pattern => '{"role": "admin"}'::jsonb, -- if: role=admin + when_unmet => 'skip' +); +-- Regular branch: only runs when role!=admin +select pgflow.add_step( + flow_slug => 'branch_flow', + step_slug => 'regular_branch', + forbidden_input_pattern => '{"role": "admin"}'::jsonb, -- ifNot: role=admin + when_unmet => 'skip' +); + +-- Test case 1: Admin user -> admin_branch runs, regular_branch skipped +with flow as ( + select * from pgflow.start_flow('branch_flow', '{"role": "admin", "name": "Alice"}'::jsonb) +) + +select run_id into temporary run1 from flow; + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run1) and step_slug = 'admin_branch' + ), + 'started', + 'Admin user: admin_branch should start' +); + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run1) and step_slug = 'regular_branch' + ), + 'skipped', + 'Admin user: regular_branch should be skipped' +); + +-- Verify exactly one step started for admin user +select is( + ( + select count(*)::int from pgflow.step_states + where run_id = (select run_id from run1) and status = 'started' + ), + 1, + 'Admin user: exactly one step should start' +); + +-- Test case 2: Regular user -> admin_branch skipped, regular_branch runs +with flow as ( + select * from pgflow.start_flow('branch_flow', '{"role": "user", "name": "Bob"}'::jsonb) +) + +select run_id into temporary run2 from flow; + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run2) and step_slug = 'admin_branch' + ), + 'skipped', + 'Regular user: admin_branch should be skipped' +); + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run2) and step_slug = 'regular_branch' + ), + 'started', + 'Regular user: regular_branch should start' +); + +-- Verify exactly one step started for regular user +select is( + ( + select count(*)::int from pgflow.step_states + where run_id = (select run_id from run2) and status = 'started' + ), + 1, + 'Regular user: exactly one step should start' +); + +-- Test case 3: No role field -> admin_branch skipped, regular_branch runs +-- (Missing field means input does NOT contain role=admin) +with flow as ( + select * from pgflow.start_flow('branch_flow', '{"name": "Charlie"}'::jsonb) +) + +select run_id into temporary run3 from flow; + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run3) and step_slug = 'admin_branch' + ), + 'skipped', + 'No role: admin_branch should be skipped (pattern not matched)' +); + +select is( + ( + select status from pgflow.step_states + where run_id = (select run_id from run3) and step_slug = 'regular_branch' + ), + 'started', + 'No role: regular_branch should start (pattern not matched = ifNot passes)' +); + +-- Verify exactly one step started for no-role user +select is( + ( + select count(*)::int from pgflow.step_states + where run_id = (select run_id from run3) and status = 'started' + ), + 1, + 'No role: exactly one step should start' +); + +drop table if exists run1, run2, run3; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/combined_if_and_ifnot.test.sql b/pkgs/core/supabase/tests/condition_evaluation/combined_if_and_ifnot.test.sql new file mode 100644 index 000000000..03cf35f68 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/combined_if_and_ifnot.test.sql @@ -0,0 +1,92 @@ +-- Test: Combined if+ifNot - BOTH conditions must pass (AND semantics) +-- Pattern: "active admin who is NOT suspended" +-- if: { role: 'admin', active: true } +-- ifNot: { suspended: true } +begin; +select plan(6); + +select pgflow_tests.reset_db(); + +-- Create flow with step that has both if and ifNot conditions +select pgflow.create_flow('combined_flow'); +select pgflow.add_step( + flow_slug => 'combined_flow', + step_slug => 'admin_action', + required_input_pattern => '{"role": "admin", "active": true}'::jsonb, -- if + forbidden_input_pattern => '{"suspended": true}'::jsonb, -- ifNot + when_unmet => 'skip' +); +-- Add another step without conditions +select pgflow.add_step('combined_flow', 'always_step'); + +-- Test case 1: Active admin NOT suspended -> BOTH conditions met -> step runs +with flow as ( + select * from pgflow.start_flow('combined_flow', '{"role": "admin", "active": true}'::jsonb) +) +select run_id into temporary run1 from flow; + +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run1) and step_slug = 'admin_action'), + 'started', + 'Active admin not suspended: both conditions met, step should start' +); + +-- Test case 2: Active admin BUT suspended -> if passes, ifNot fails -> step skipped +with flow as ( + select * from pgflow.start_flow('combined_flow', '{"role": "admin", "active": true, "suspended": true}'::jsonb) +) +select run_id into temporary run2 from flow; + +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run2) and step_slug = 'admin_action'), + 'skipped', + 'Active admin but suspended: ifNot fails, step should be skipped' +); + +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run2) and step_slug = 'admin_action'), + 'condition_unmet', + 'Skip reason should be condition_unmet' +); + +-- Test case 3: Regular user NOT suspended -> if fails -> step skipped +with flow as ( + select * from pgflow.start_flow('combined_flow', '{"role": "user", "active": true}'::jsonb) +) +select run_id into temporary run3 from flow; + +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run3) and step_slug = 'admin_action'), + 'skipped', + 'Regular user: if condition fails, step should be skipped' +); + +-- Test case 4: Inactive admin -> if fails -> step skipped +with flow as ( + select * from pgflow.start_flow('combined_flow', '{"role": "admin", "active": false}'::jsonb) +) +select run_id into temporary run4 from flow; + +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run4) and step_slug = 'admin_action'), + 'skipped', + 'Inactive admin: if condition fails (active!=true), step should be skipped' +); + +-- Test case 5: always_step should run in all cases (checking last run) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run4) and step_slug = 'always_step'), + 'started', + 'Step without condition should always start' +); + +drop table if exists run1, run2, run3, run4; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_met.test.sql b/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_met.test.sql new file mode 100644 index 000000000..8a60c18da --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_met.test.sql @@ -0,0 +1,66 @@ +-- Test: Dependent step condition met - step executes normally +-- Verifies that a dependent step with a condition pattern that matches +-- the aggregated dependency outputs starts normally +begin; +select plan(3); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step and a conditional dependent step +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step('conditional_flow', 'first'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + deps_slugs => ARRAY['first'], + required_input_pattern => '{"first": {"success": true}}'::jsonb, -- first.success must be true + when_unmet => 'skip' +); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Read and start the first step's task +select pgflow_tests.read_and_start('conditional_flow'); + +-- Complete first step with output that MATCHES condition +select pgflow.complete_task( + (select run_id from run_ids), + 'first', + 0, + '{"success": true, "data": "hello"}'::jsonb +); + +-- Test 1: checked_step should be 'started' (condition met) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'started', + 'Dependent step with met condition should start' +); + +-- Test 2: skip_reason should be NULL +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + NULL, + 'Step with met condition should have no skip_reason' +); + +-- Test 3: Task should be created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 1, + 'Task should be created for step with met condition' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_unmet_skip.test.sql b/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_unmet_skip.test.sql new file mode 100644 index 000000000..0e0d7d81f --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/dependent_step_condition_unmet_skip.test.sql @@ -0,0 +1,73 @@ +-- Test: Dependent step condition unmet with whenUnmet='skip' +-- Verifies that a dependent step with unmet condition is skipped +-- when its dependency output doesn't match the pattern +begin; +select plan(4); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step and a conditional dependent step +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step('conditional_flow', 'first'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + deps_slugs => ARRAY['first'], + required_input_pattern => '{"first": {"success": true}}'::jsonb, -- first.success must be true + when_unmet => 'skip' +); + +-- Start flow +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Read and start the first step's task +select pgflow_tests.read_and_start('conditional_flow'); + +-- Complete first step with output that does NOT match condition +select pgflow.complete_task( + (select run_id from run_ids), + 'first', + 0, + '{"success": false, "error": "something went wrong"}'::jsonb +); + +-- Test 1: checked_step should be 'skipped' (condition unmet) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'skipped', + 'Dependent step with unmet condition should be skipped' +); + +-- Test 2: skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'condition_unmet', + 'Step should have skip_reason = condition_unmet' +); + +-- Test 3: No task should be created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 0, + 'No task should be created for skipped step' +); + +-- Test 4: Run should complete (all steps done) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'completed', + 'Run should complete when skipped step was the last step' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/ifnot_empty_object_absent_dep.test.sql b/pkgs/core/supabase/tests/condition_evaluation/ifnot_empty_object_absent_dep.test.sql new file mode 100644 index 000000000..f8450fa10 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/ifnot_empty_object_absent_dep.test.sql @@ -0,0 +1,59 @@ +-- Test: ifNot empty object pattern - step runs when dependency is absent/skipped +-- +-- Verifies that ifNot: { dep: {} } (empty object pattern) correctly detects +-- when a dependency is absent (skipped) and causes the fallback step to run. +-- +-- PostgreSQL containment semantics: +-- - When dep is skipped, deps_output is {} (empty object) +-- - {} @> { dep: {} } = FALSE (empty object doesn't contain dep key) +-- - NOT(FALSE) = TRUE, so ifNot condition is met -> step runs +begin; +select plan(2); + +select pgflow_tests.reset_db(); + +-- Create flow: skippable_dep -> fallback (with ifNot: { skippable_dep: {} }) +select pgflow.create_flow('empty_pattern_test'); + +-- Step A: Skippable based on input pattern +select pgflow.add_step( + flow_slug => 'empty_pattern_test', + step_slug => 'skippable_dep', + required_input_pattern => '{"run_dep": true}'::jsonb, + when_unmet => 'skip' +); + +-- Step B: Fallback - runs when A is absent (empty object pattern) +select pgflow.add_step( + flow_slug => 'empty_pattern_test', + step_slug => 'fallback', + deps_slugs => ARRAY['skippable_dep'], + forbidden_input_pattern => '{"skippable_dep": {}}'::jsonb, + when_unmet => 'skip' +); + +-- Start flow with input that causes dep to skip (run_dep: false) +with flow as ( + select * from pgflow.start_flow('empty_pattern_test', '{"run_dep": false}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: skippable_dep should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'skippable_dep'), + 'skipped', + 'Dependency with unmet condition should be skipped' +); + +-- Test 2: fallback should be started (empty object pattern matched -> ifNot passed) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'fallback'), + 'started', + 'Step with ifNot: {dep: {}} should start when dep is absent' +); + +drop table if exists run_ids; +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_matches_fail.test.sql b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_matches_fail.test.sql new file mode 100644 index 000000000..fe7a7111b --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_matches_fail.test.sql @@ -0,0 +1,59 @@ +-- Test: ifNot pattern MATCHES (negative condition fails) with whenUnmet='fail' +-- When ifNot pattern MATCHES the input, the condition is NOT met (pattern should NOT match) +-- With whenUnmet='fail', this should fail the step and run +begin; +select plan(4); + +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has ifNot condition +select pgflow.create_flow('ifnot_fail_flow'); +select pgflow.add_step( + flow_slug => 'ifnot_fail_flow', + step_slug => 'no_admin_step', + forbidden_input_pattern => '{"role": "admin"}'::jsonb, -- must NOT contain role=admin + when_unmet => 'fail' +); + +-- Start flow with input that MATCHES the ifNot pattern (role=admin) +-- Since input @> pattern, the ifNot condition is NOT met +with flow as ( + select * from pgflow.start_flow('ifnot_fail_flow', '{"role": "admin", "name": "Alice"}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: Step should be 'failed' (ifNot condition not met because pattern matched) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'failed', + 'Step with matched ifNot pattern and whenUnmet=fail should be failed' +); + +-- Test 2: Error message should indicate condition not met +select is( + (select error_message from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'Condition not met', + 'Error message should indicate condition not met' +); + +-- Test 3: No task should be created for failed step +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 0, + 'No task should be created for failed step' +); + +-- Test 4: Run should be 'failed' +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'failed', + 'Run should be failed when step fails due to unmet ifNot condition' +); + +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_not_matches.test.sql b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_not_matches.test.sql new file mode 100644 index 000000000..cc9dcbc81 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_pattern_not_matches.test.sql @@ -0,0 +1,51 @@ +-- Test: ifNot pattern does NOT match - step should execute +-- When ifNot pattern does NOT match the input, the condition IS met +-- The step should execute normally +begin; +select plan(3); + +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has ifNot condition +select pgflow.create_flow('ifnot_pass_flow'); +select pgflow.add_step( + flow_slug => 'ifnot_pass_flow', + step_slug => 'no_admin_step', + forbidden_input_pattern => '{"role": "admin"}'::jsonb, -- must NOT contain role=admin + when_unmet => 'fail' -- (doesn't matter for this test since condition is met) +); + +-- Start flow with input that does NOT match the ifNot pattern (role=user) +-- Since input does NOT contain role=admin, the ifNot condition IS met +with flow as ( + select * from pgflow.start_flow('ifnot_pass_flow', '{"role": "user", "name": "Bob"}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: Step should be 'started' (condition met, step executes) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'started', + 'Step should start when ifNot pattern does not match input' +); + +-- Test 2: Task should be created for the step +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 1, + 'Task should be created for step when condition is met' +); + +-- Test 3: Run should be 'started' +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should continue when ifNot condition is met' +); + +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip.test.sql b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip.test.sql new file mode 100644 index 000000000..d340f822f --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip.test.sql @@ -0,0 +1,68 @@ +-- Test: ifNot pattern MATCHES (condition not met) with whenUnmet='skip' +-- Step should be skipped but run continues +begin; +select plan(5); + +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has ifNot condition +select pgflow.create_flow('ifnot_skip_flow'); +select pgflow.add_step( + flow_slug => 'ifnot_skip_flow', + step_slug => 'no_admin_step', + forbidden_input_pattern => '{"role": "admin"}'::jsonb, -- must NOT contain role=admin + when_unmet => 'skip' +); +-- Add another root step without condition +select pgflow.add_step('ifnot_skip_flow', 'other_step'); + +-- Start flow with input that MATCHES the ifNot pattern (role=admin) +-- The ifNot condition is NOT met, so step should be skipped +with flow as ( + select * from pgflow.start_flow('ifnot_skip_flow', '{"role": "admin", "name": "Alice"}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: no_admin_step should be 'skipped' +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'skipped', + 'Step with matched ifNot pattern and whenUnmet=skip should be skipped' +); + +-- Test 2: skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'condition_unmet', + 'Skip reason should be condition_unmet' +); + +-- Test 3: No task should be created for skipped step +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 0, + 'No task should be created for skipped step' +); + +-- Test 4: other_step should be started normally +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'other_step'), + 'started', + 'Other step without condition should start normally' +); + +-- Test 5: Run should continue (not failed) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should continue when step is skipped' +); + +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip_cascade.test.sql b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip_cascade.test.sql new file mode 100644 index 000000000..6d79fa2cb --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/ifnot_root_step_skip_cascade.test.sql @@ -0,0 +1,77 @@ +-- Test: ifNot pattern MATCHES (condition not met) with whenUnmet='skip-cascade' +-- Step and all dependents should be skipped +begin; +select plan(6); + +select pgflow_tests.reset_db(); + +-- Create flow with ifNot step and a dependent +select pgflow.create_flow('ifnot_cascade_flow'); +select pgflow.add_step( + flow_slug => 'ifnot_cascade_flow', + step_slug => 'no_admin_step', + forbidden_input_pattern => '{"role": "admin"}'::jsonb, -- must NOT contain role=admin + when_unmet => 'skip-cascade' +); +-- Add a dependent step +select pgflow.add_step('ifnot_cascade_flow', 'dependent_step', ARRAY['no_admin_step']); +-- Add an independent step +select pgflow.add_step('ifnot_cascade_flow', 'independent_step'); + +-- Start flow with input that MATCHES the ifNot pattern (role=admin) +with flow as ( + select * from pgflow.start_flow('ifnot_cascade_flow', '{"role": "admin", "name": "Alice"}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: no_admin_step should be 'skipped' +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'skipped', + 'Step with matched ifNot pattern and whenUnmet=skip-cascade should be skipped' +); + +-- Test 2: skip_reason for no_admin_step should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'no_admin_step'), + 'condition_unmet', + 'Skip reason should be condition_unmet' +); + +-- Test 3: dependent_step should also be 'skipped' (cascade) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'dependent_step'), + 'skipped', + 'Dependent step should be skipped due to cascade' +); + +-- Test 4: skip_reason for dependent_step should be 'dependency_skipped' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'dependent_step'), + 'dependency_skipped', + 'Dependent skip reason should be dependency_skipped' +); + +-- Test 5: independent_step should be started normally +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'independent_step'), + 'started', + 'Independent step should start normally' +); + +-- Test 6: Run should continue +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should continue when step is skip-cascaded' +); + +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/no_condition_always_executes.test.sql b/pkgs/core/supabase/tests/condition_evaluation/no_condition_always_executes.test.sql new file mode 100644 index 000000000..78d0de5a2 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/no_condition_always_executes.test.sql @@ -0,0 +1,40 @@ +-- Test: Step with no condition (NULL pattern) always executes +-- Verifies that steps without required_input_pattern execute normally +-- regardless of input content +begin; +select plan(2); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a step that has no condition (default) +select pgflow.create_flow('simple_flow'); +select pgflow.add_step('simple_flow', 'unconditioned'); + +-- Start flow with any input +with flow as ( + select * from pgflow.start_flow('simple_flow', '{"anything": "goes"}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: Step should be started (no condition means always execute) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'unconditioned'), + 'started', + 'Step with no condition should start regardless of input' +); + +-- Test 2: Task should be created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'unconditioned'), + 1, + 'Task should be created for step with no condition' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/plain_skip_iterates_until_convergence.test.sql b/pkgs/core/supabase/tests/condition_evaluation/plain_skip_iterates_until_convergence.test.sql new file mode 100644 index 000000000..a35ce708d --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/plain_skip_iterates_until_convergence.test.sql @@ -0,0 +1,115 @@ +-- Test: Plain skip iterates until convergence +-- Verifies that after skipping a step: +-- 1. Dependents' remaining_deps are decremented +-- 2. Those newly-ready dependents get their conditions evaluated +-- 3. If they also have unmet conditions, they're also skipped +-- 4. Process repeats until no more steps need skipping +-- +-- Flow: a (skip) -> b (skip) -> c (no condition) +-- When 'a' is skipped, 'b' becomes ready and should also be skipped +-- Then 'c' becomes ready (but has no condition, so starts normally) +begin; +select plan(8); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with chain: a -> b -> c +-- a has unmet condition (skip) +-- b depends on a.success (also skip) +-- c has no condition +select pgflow.create_flow('chain_skip'); +select pgflow.add_step( + flow_slug => 'chain_skip', + step_slug => 'step_a', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'skip' -- plain skip +); +select pgflow.add_step( + flow_slug => 'chain_skip', + step_slug => 'step_b', + deps_slugs => ARRAY['step_a'], + required_input_pattern => '{"step_a": {"success": true}}'::jsonb, -- a.success must be true + when_unmet => 'skip' -- plain skip (won't be met since a was skipped) +); +select pgflow.add_step( + flow_slug => 'chain_skip', + step_slug => 'step_c', + deps_slugs => ARRAY['step_b'] + -- no condition +); + +-- Start flow with input that does NOT match step_a's condition +with flow as ( + select * from pgflow.start_flow('chain_skip', '{"enabled": false}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: step_a should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'skipped', + 'step_a with unmet condition should be skipped' +); + +-- Test 2: step_b should also be skipped (its condition references skipped step_a) +-- The condition '{"step_a": {"success": true}}' cannot be met when step_a is skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'skipped', + 'step_b should be skipped (condition references skipped dependency)' +); + +-- Test 3: step_b skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'condition_unmet', + 'step_b should have skip_reason = condition_unmet' +); + +-- Test 4: step_c remaining_deps should be 0 +select is( + (select remaining_deps from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 0, + 'step_c remaining_deps should be 0 (both a and b skipped)' +); + +-- Test 5: step_c should be started (has no condition) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'started', + 'step_c with no condition should be started' +); + +-- Test 6: step_c should have a task created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 1, + 'step_c should have one task created' +); + +-- Test 7: Run remaining_steps should be 1 (only step_c) +select is( + (select remaining_steps from pgflow.runs where run_id = (select run_id from run_ids)), + 1, + 'Run remaining_steps should be 1 (only step_c remaining)' +); + +-- Test 8: Run should be started (not completed yet, step_c still running) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should be started while step_c is running' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/plain_skip_propagates_to_map.test.sql b/pkgs/core/supabase/tests/condition_evaluation/plain_skip_propagates_to_map.test.sql new file mode 100644 index 000000000..fa2c88c8f --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/plain_skip_propagates_to_map.test.sql @@ -0,0 +1,104 @@ +-- Test: Plain skip (whenUnmet='skip') propagates correctly to dependent map step +-- Verifies that when a step is skipped with plain 'skip' mode: +-- 1. remaining_deps on dependents is decremented +-- 2. initial_tasks is set to 0 for map dependents +-- 3. The run completes properly (not hanging) +-- +-- This tests the bug fix: Before this fix, plain skip didn't update +-- remaining_deps on dependents, causing runs to hang forever. +begin; +select plan(8); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow: +-- producer (conditional, skip) -> map_consumer (map step) +select pgflow.create_flow('skip_to_map'); +select pgflow.add_step( + flow_slug => 'skip_to_map', + step_slug => 'producer', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'skip' -- plain skip (not skip-cascade) +); +-- Map consumer: no condition, just depends on producer +select pgflow.add_step( + flow_slug => 'skip_to_map', + step_slug => 'map_consumer', + deps_slugs => ARRAY['producer'], + step_type => 'map' +); + +-- Start flow with input that does NOT match producer's condition +with flow as ( + select * from pgflow.start_flow('skip_to_map', '{"enabled": false}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: producer should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'producer'), + 'skipped', + 'Producer with unmet condition should be skipped' +); + +-- Test 2: producer skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'producer'), + 'condition_unmet', + 'Producer should have skip_reason = condition_unmet' +); + +-- Test 3: map_consumer remaining_deps should be 0 (decremented from 1) +select is( + (select remaining_deps from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'map_consumer'), + 0, + 'Map consumer remaining_deps should be decremented to 0' +); + +-- Test 4: map_consumer initial_tasks should be 0 (skipped parent = empty array) +select is( + (select initial_tasks from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'map_consumer'), + 0, + 'Map consumer initial_tasks should be 0 (skipped dep = empty array)' +); + +-- Test 5: map_consumer should be completed (cascade_complete_taskless_steps handles 0 tasks) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'map_consumer'), + 'completed', + 'Map consumer should be completed (empty map auto-completes)' +); + +-- Test 6: map_consumer output should be empty array +select is( + (select output from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'map_consumer'), + '[]'::jsonb, + 'Map consumer output should be empty array' +); + +-- Test 7: Run remaining_steps should be 0 +select is( + (select remaining_steps from pgflow.runs where run_id = (select run_id from run_ids)), + 0, + 'Run remaining_steps should be 0' +); + +-- Test 8: Run should be completed (not hanging!) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'completed', + 'Run should complete (not hang) when skip propagates to map' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_met.test.sql b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_met.test.sql new file mode 100644 index 000000000..03063a4a2 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_met.test.sql @@ -0,0 +1,53 @@ +-- Test: Root step condition met - step executes normally +-- Verifies that a root step with a condition pattern that matches the flow input +-- starts normally without being skipped +begin; +select plan(3); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has a condition +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'skip' +); + +-- Start flow with input that matches condition +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{"enabled": true, "value": 42}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: Step should be in 'started' status (condition met, step executes) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'started', + 'Step with met condition should start normally' +); + +-- Test 2: skip_reason should be NULL (not skipped) +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + NULL, + 'Step with met condition should have no skip_reason' +); + +-- Test 3: Task should be created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 1, + 'Task should be created for step with met condition' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_fail.test.sql b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_fail.test.sql new file mode 100644 index 000000000..97ae0039a --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_fail.test.sql @@ -0,0 +1,59 @@ +-- Test: Root step condition unmet with whenUnmet='fail' - run fails +-- Verifies that a root step with unmet condition and whenUnmet='fail' +-- causes the run to fail immediately +begin; +select plan(4); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has a condition with fail mode +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'fail' -- causes run to fail +); + +-- Start flow with input that does NOT match condition +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{"enabled": false, "value": 42}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: checked_step should be 'failed' (condition unmet + fail mode) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'failed', + 'Step with unmet condition and whenUnmet=fail should be failed' +); + +-- Test 2: error_message should indicate condition unmet +select ok( + (select error_message from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step') ILIKE '%condition%', + 'Failed step should have error message about condition' +); + +-- Test 3: No task should be created +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 0, + 'No task should be created for failed step' +); + +-- Test 4: Run should be failed +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'failed', + 'Run should fail when step condition fails with fail mode' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip.test.sql b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip.test.sql new file mode 100644 index 000000000..67970f326 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip.test.sql @@ -0,0 +1,70 @@ +-- Test: Root step condition unmet with whenUnmet='skip' - step skipped +-- Verifies that a root step with unmet condition and whenUnmet='skip' +-- is skipped but the run continues +begin; +select plan(5); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has a condition +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'skip' +); +-- Add another root step without condition +select pgflow.add_step('conditional_flow', 'other_step'); + +-- Start flow with input that does NOT match condition +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{"enabled": false, "value": 42}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: checked_step should be 'skipped' (condition unmet) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'skipped', + 'Step with unmet condition and whenUnmet=skip should be skipped' +); + +-- Test 2: skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'condition_unmet', + 'Step with unmet condition should have skip_reason = condition_unmet' +); + +-- Test 3: No task should be created for skipped step +select is( + (select count(*)::int from pgflow.step_tasks + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 0, + 'No task should be created for skipped step' +); + +-- Test 4: other_step should be started (independent root step) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'other_step'), + 'started', + 'Other step without condition should start normally' +); + +-- Test 5: Run should continue (not failed) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should continue when step is skipped' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip_cascade.test.sql b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip_cascade.test.sql new file mode 100644 index 000000000..c9104f5e2 --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/root_step_condition_unmet_skip_cascade.test.sql @@ -0,0 +1,83 @@ +-- Test: Root step condition unmet with whenUnmet='skip-cascade' - step and dependents skipped +-- Verifies that a root step with unmet condition and whenUnmet='skip-cascade' +-- skips the step AND all its dependents +begin; +select plan(6); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with a root step that has a condition and a dependent +select pgflow.create_flow('conditional_flow'); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'checked_step', + required_input_pattern => '{"enabled": true}'::jsonb, + when_unmet => 'skip-cascade' -- skip this AND dependents +); +select pgflow.add_step( + flow_slug => 'conditional_flow', + step_slug => 'dependent_step', + deps_slugs => ARRAY['checked_step'] +); +-- Add an independent root step that should still run +select pgflow.add_step('conditional_flow', 'other_step'); + +-- Start flow with input that does NOT match condition +with flow as ( + select * from pgflow.start_flow('conditional_flow', '{"enabled": false}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: checked_step should be 'skipped' +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'skipped', + 'Step with unmet condition and skip-cascade should be skipped' +); + +-- Test 2: checked_step skip_reason should be 'condition_unmet' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'checked_step'), + 'condition_unmet', + 'Original step should have skip_reason = condition_unmet' +); + +-- Test 3: dependent_step should be 'skipped' (cascaded) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'dependent_step'), + 'skipped', + 'Dependent step should be skipped due to cascade' +); + +-- Test 4: dependent_step skip_reason should be 'dependency_skipped' +select is( + (select skip_reason from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'dependent_step'), + 'dependency_skipped', + 'Cascaded step should have skip_reason = dependency_skipped' +); + +-- Test 5: other_step should be started (independent) +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'other_step'), + 'started', + 'Independent step should start normally' +); + +-- Test 6: Run should continue (remaining_steps decremented by skipped steps) +select is( + (select status from pgflow.runs where run_id = (select run_id from run_ids)), + 'started', + 'Run should continue after skip-cascade' +); + +-- Clean up +drop table if exists run_ids; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/condition_evaluation/skipped_deps_excluded_from_input.test.sql b/pkgs/core/supabase/tests/condition_evaluation/skipped_deps_excluded_from_input.test.sql new file mode 100644 index 000000000..52b342d4c --- /dev/null +++ b/pkgs/core/supabase/tests/condition_evaluation/skipped_deps_excluded_from_input.test.sql @@ -0,0 +1,134 @@ +-- Test: Skipped deps are excluded from handler input (missing key, not null) +-- Verifies that when a dependency is skipped: +-- 1. The handler receives deps_output WITHOUT the skipped dep key +-- 2. The key is missing entirely, not present with null value +-- +-- Flow: +-- step_a (conditional, skip) \ +-- -> step_c (no condition) +-- step_b (always runs) / +-- +-- When step_a is skipped, step_c should receive: {"step_b": } +-- (NOT: {"step_a": null, "step_b": }) +begin; +select plan(5); + +-- Reset database +select pgflow_tests.reset_db(); + +-- Create flow with diamond: a + b -> c +-- a has unmet condition (will be skipped) +-- b always runs +-- c depends on both +select pgflow.create_flow('skip_diamond'); +select pgflow.add_step( + flow_slug => 'skip_diamond', + step_slug => 'step_a', + required_input_pattern => '{"enabled": true}'::jsonb, -- requires enabled=true + when_unmet => 'skip' -- plain skip +); +select pgflow.add_step( + flow_slug => 'skip_diamond', + step_slug => 'step_b' + -- root step, no condition +); +select pgflow.add_step( + flow_slug => 'skip_diamond', + step_slug => 'step_c', + deps_slugs => ARRAY['step_a', 'step_b'] + -- no condition +); + +-- Start flow with input that skips step_a +with flow as ( + select * from pgflow.start_flow('skip_diamond', '{"enabled": false}'::jsonb) +) +select run_id into temporary run_ids from flow; + +-- Test 1: step_a should be skipped +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_a'), + 'skipped', + 'step_a with unmet condition should be skipped' +); + +-- Test 2: step_b should be started +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_b'), + 'started', + 'step_b without condition should be started' +); + +-- Read and start step_b's task +select pgflow_tests.read_and_start('skip_diamond'); + +-- Complete step_b with some output +select pgflow.complete_task( + (select run_id from run_ids), + 'step_b', + 0, + '{"data": "from_b"}'::jsonb +); + +-- Test 3: Verify step_c remaining_deps is 0 (ready to start) +select is( + (select remaining_deps from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 0, + 'step_c remaining_deps should be 0 (a skipped + b completed)' +); + +-- Now read and start step_c - this replicates what read_and_start does +-- and allows us to inspect the returned input value +-- +-- We need to do this in steps: +-- 1. Read the message from the queue +-- 2. Start the task with start_tasks +-- 3. Inspect the input returned by start_tasks + +-- Read the message and store msg_id +with read_msg as ( + select * from pgmq.read_with_poll('skip_diamond', 1, 1, 1, 50) + limit 1 +), +msg_ids as ( + select array_agg(msg_id) as ids from read_msg +), +-- Start the task and get the input +start_result as ( + select st.input, st.step_slug, st.run_id + from pgflow.start_tasks( + 'skip_diamond', + (select ids from msg_ids), + pgflow_tests.ensure_worker('skip_diamond') + ) st +) +-- Store the input for later testing +select input, step_slug, run_id into temporary step_c_inputs +from start_result +where step_slug = 'step_c'; + +-- Test 4: Verify step_c was started +select is( + (select status from pgflow.step_states + where run_id = (select run_id from run_ids) and step_slug = 'step_c'), + 'started', + 'step_c should be started after read_and_start' +); + +-- Test 5: Verify the input does NOT contain step_a key +-- The handler input should only have step_b, NOT step_a +select is( + (select input from step_c_inputs), + '{"step_b": {"data": "from_b"}}'::jsonb, + 'step_c input should only contain step_b, not step_a (skipped deps are excluded)' +); + +-- Clean up +drop table if exists run_ids; +drop table if exists step_c_inputs; + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/create_flow_from_shape/basic_compile.test.sql b/pkgs/core/supabase/tests/create_flow_from_shape/basic_compile.test.sql index ee059957b..e74c19675 100644 --- a/pkgs/core/supabase/tests/create_flow_from_shape/basic_compile.test.sql +++ b/pkgs/core/supabase/tests/create_flow_from_shape/basic_compile.test.sql @@ -1,5 +1,5 @@ begin; -select plan(4); +select plan(5); select pgflow_tests.reset_db(); -- Test: Compile a simple sequential flow from shape @@ -7,9 +7,9 @@ select pgflow._create_flow_from_shape( 'test_flow', '{ "steps": [ - {"slug": "first", "stepType": "single", "dependencies": []}, - {"slug": "second", "stepType": "single", "dependencies": ["first"]}, - {"slug": "third", "stepType": "single", "dependencies": ["second"]} + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "second", "stepType": "single", "dependencies": ["first"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "third", "stepType": "single", "dependencies": ["second"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb ); @@ -42,5 +42,18 @@ select results_eq( 'Dependencies should be created correctly' ); +-- Verify shape round-trips correctly +select is( + pgflow._get_flow_shape('test_flow'), + '{ + "steps": [ + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "second", "stepType": "single", "dependencies": ["first"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "third", "stepType": "single", "dependencies": ["second"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + 'Shape should round-trip correctly' +); + select finish(); rollback; diff --git a/pkgs/core/supabase/tests/create_flow_from_shape/condition_modes_compile.test.sql b/pkgs/core/supabase/tests/create_flow_from_shape/condition_modes_compile.test.sql new file mode 100644 index 000000000..99ff0942f --- /dev/null +++ b/pkgs/core/supabase/tests/create_flow_from_shape/condition_modes_compile.test.sql @@ -0,0 +1,61 @@ +begin; +select plan(4); +select pgflow_tests.reset_db(); + +-- Test: Compile flow with non-default whenUnmet/whenFailed values +select pgflow._create_flow_from_shape( + 'condition_flow', + '{ + "steps": [ + {"slug": "always_run", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "cascade_skip", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "skip-cascade", "whenFailed": "skip", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "fail_on_unmet", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "fail", "whenFailed": "skip-cascade", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb +); + +-- Verify when_unmet values were stored correctly +select results_eq( + $$ SELECT step_slug, when_unmet FROM pgflow.steps WHERE flow_slug = 'condition_flow' ORDER BY step_index $$, + $$ VALUES ('always_run', 'skip'), ('cascade_skip', 'skip-cascade'), ('fail_on_unmet', 'fail') $$, + 'when_unmet values should be stored correctly' +); + +-- Verify when_failed values were stored correctly +select results_eq( + $$ SELECT step_slug, when_failed FROM pgflow.steps WHERE flow_slug = 'condition_flow' ORDER BY step_index $$, + $$ VALUES ('always_run', 'fail'), ('cascade_skip', 'skip'), ('fail_on_unmet', 'skip-cascade') $$, + 'when_failed values should be stored correctly' +); + +-- Verify shape round-trips correctly with all condition mode variants +select is( + pgflow._get_flow_shape('condition_flow'), + '{ + "steps": [ + {"slug": "always_run", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "cascade_skip", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "skip-cascade", "whenFailed": "skip", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "fail_on_unmet", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "fail", "whenFailed": "skip-cascade", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb, + 'Shape with condition modes should round-trip correctly' +); + +-- Verify comparison detects no differences for matching shape +select is( + pgflow._compare_flow_shapes( + pgflow._get_flow_shape('condition_flow'), + '{ + "steps": [ + {"slug": "always_run", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "cascade_skip", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "skip-cascade", "whenFailed": "skip", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "fail_on_unmet", "stepType": "single", "dependencies": ["always_run"], "whenUnmet": "fail", "whenFailed": "skip-cascade", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} + ] + }'::jsonb + ), + '{}'::text[], + 'Matching shapes should have no differences' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/create_flow_from_shape/map_step_compile.test.sql b/pkgs/core/supabase/tests/create_flow_from_shape/map_step_compile.test.sql index a0e26bb41..0c71d3a6d 100644 --- a/pkgs/core/supabase/tests/create_flow_from_shape/map_step_compile.test.sql +++ b/pkgs/core/supabase/tests/create_flow_from_shape/map_step_compile.test.sql @@ -7,8 +7,8 @@ select pgflow._create_flow_from_shape( 'map_flow', '{ "steps": [ - {"slug": "root_map", "stepType": "map", "dependencies": []}, - {"slug": "process", "stepType": "single", "dependencies": ["root_map"]} + {"slug": "root_map", "stepType": "map", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "process", "stepType": "single", "dependencies": ["root_map"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb ); @@ -25,11 +25,11 @@ select is( pgflow._get_flow_shape('map_flow'), '{ "steps": [ - {"slug": "root_map", "stepType": "map", "dependencies": []}, - {"slug": "process", "stepType": "single", "dependencies": ["root_map"]} + {"slug": "root_map", "stepType": "map", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "process", "stepType": "single", "dependencies": ["root_map"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb, - 'Shape should round-trip correctly' + 'Shape should round-trips correctly' ); select finish(); diff --git a/pkgs/core/supabase/tests/create_flow_from_shape/options_compile.test.sql b/pkgs/core/supabase/tests/create_flow_from_shape/options_compile.test.sql index 3e57bdd89..72a1542b6 100644 --- a/pkgs/core/supabase/tests/create_flow_from_shape/options_compile.test.sql +++ b/pkgs/core/supabase/tests/create_flow_from_shape/options_compile.test.sql @@ -7,7 +7,7 @@ select pgflow._create_flow_from_shape( 'flow_with_options', '{ "steps": [ - {"slug": "step1", "stepType": "single", "dependencies": []} + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail"} ], "options": { "maxAttempts": 5, @@ -33,6 +33,8 @@ select pgflow._create_flow_from_shape( "slug": "step1", "stepType": "single", "dependencies": [], + "whenUnmet": "skip", + "whenFailed": "fail", "options": { "maxAttempts": 7, "baseDelay": 15, @@ -56,7 +58,7 @@ select pgflow._create_flow_from_shape( 'flow_no_options', '{ "steps": [ - {"slug": "step1", "stepType": "single", "dependencies": []} + {"slug": "step1", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail"} ] }'::jsonb ); @@ -84,6 +86,8 @@ select pgflow._create_flow_from_shape( "slug": "step1", "stepType": "single", "dependencies": [], + "whenUnmet": "skip", + "whenFailed": "fail", "options": { "timeout": 30 } diff --git a/pkgs/core/supabase/tests/ensure_flow_compiled/allow_data_loss_recompiles.test.sql b/pkgs/core/supabase/tests/ensure_flow_compiled/allow_data_loss_recompiles.test.sql index d77bda21d..41cd12f81 100644 --- a/pkgs/core/supabase/tests/ensure_flow_compiled/allow_data_loss_recompiles.test.sql +++ b/pkgs/core/supabase/tests/ensure_flow_compiled/allow_data_loss_recompiles.test.sql @@ -17,7 +17,7 @@ select is( 'allow_loss_flow', '{ "steps": [ - {"slug": "new_step", "stepType": "single", "dependencies": []} + {"slug": "new_step", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail"} ] }'::jsonb, true -- allow_data_loss = true diff --git a/pkgs/core/supabase/tests/ensure_flow_compiled/auto_recompiles_when_local.test.sql b/pkgs/core/supabase/tests/ensure_flow_compiled/auto_recompiles_when_local.test.sql index e354d4741..895dc70b6 100644 --- a/pkgs/core/supabase/tests/ensure_flow_compiled/auto_recompiles_when_local.test.sql +++ b/pkgs/core/supabase/tests/ensure_flow_compiled/auto_recompiles_when_local.test.sql @@ -17,7 +17,7 @@ select is( 'local_flow', '{ "steps": [ - {"slug": "new_step", "stepType": "single", "dependencies": []} + {"slug": "new_step", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail"} ] }'::jsonb ) as result diff --git a/pkgs/core/supabase/tests/ensure_flow_compiled/compiles_missing_flow.test.sql b/pkgs/core/supabase/tests/ensure_flow_compiled/compiles_missing_flow.test.sql index ac4f6cb91..1d083066c 100644 --- a/pkgs/core/supabase/tests/ensure_flow_compiled/compiles_missing_flow.test.sql +++ b/pkgs/core/supabase/tests/ensure_flow_compiled/compiles_missing_flow.test.sql @@ -10,7 +10,7 @@ select is( 'new_flow', '{ "steps": [ - {"slug": "first", "stepType": "single", "dependencies": []} + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail"} ] }'::jsonb ) as result diff --git a/pkgs/core/supabase/tests/ensure_flow_compiled/verifies_matching_shape.test.sql b/pkgs/core/supabase/tests/ensure_flow_compiled/verifies_matching_shape.test.sql index 80ca90ab6..34738ac94 100644 --- a/pkgs/core/supabase/tests/ensure_flow_compiled/verifies_matching_shape.test.sql +++ b/pkgs/core/supabase/tests/ensure_flow_compiled/verifies_matching_shape.test.sql @@ -15,8 +15,8 @@ select is( 'existing_flow', '{ "steps": [ - {"slug": "first", "stepType": "single", "dependencies": []}, - {"slug": "second", "stepType": "single", "dependencies": ["first"]} + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "second", "stepType": "single", "dependencies": ["first"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb ) as result @@ -33,8 +33,8 @@ select is( 'existing_flow', '{ "steps": [ - {"slug": "first", "stepType": "single", "dependencies": []}, - {"slug": "second", "stepType": "single", "dependencies": ["first"]} + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "second", "stepType": "single", "dependencies": ["first"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb ) as result diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_decrements_remaining_deps.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_decrements_remaining_deps.test.sql new file mode 100644 index 000000000..c0d221d08 --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_decrements_remaining_deps.test.sql @@ -0,0 +1,84 @@ +-- Test: when_failed='skip' (non-cascade) should decrement remaining_deps on dependent steps +-- This mirrors the behavior in cascade_resolve_conditions.sql for when_unmet='skip' +-- +-- Flow structure: +-- step_a (when_failed='skip', max_attempts=0) → step_b +-- +-- Expected behavior: +-- 1. step_a fails, gets skipped (when_failed='skip') +-- 2. step_b.remaining_deps decremented from 1 to 0 +-- 3. step_b becomes ready and starts +-- 4. Run continues (status != 'failed') + +begin; +select plan(5); +select pgflow_tests.reset_db(); + +-- Create flow with step_a → step_b where step_a has when_failed='skip' and max_attempts=0 +select pgflow.create_flow('skip_test'); +select pgflow.add_step('skip_test', 'step_a', max_attempts => 0, when_failed => 'skip'); +select pgflow.add_step('skip_test', 'step_b', array['step_a']); + +-- Start the flow +select pgflow.start_flow('skip_test', '"input"'::jsonb); + +-- Verify step_b starts with remaining_deps = 1 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_b' + ), + 1::int, + 'step_b should start with remaining_deps = 1' +); + +-- Poll and fail step_a (it has max_attempts=0, so it will be skipped immediately) +select pgflow_tests.poll_and_fail('skip_test'); + +-- Test 1: step_a should be skipped with handler_failed reason +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'skipped', + 'step_a should be skipped after failure' +); + +-- Test 2: step_b.remaining_deps should be decremented to 0 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_b' + ), + 0::int, + 'step_b.remaining_deps should be decremented to 0' +); + +-- Test 3: step_b should be started (became ready when remaining_deps hit 0) +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_b' + ), + 'started', + 'step_b should be started (became ready)' +); + +-- Test 4: Run should NOT be failed (continues with step_b) +select isnt( + (select status from pgflow.runs limit 1), + 'failed', + 'Run should not be failed (continues with step_b)' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_diamond_multiple_dependents.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_diamond_multiple_dependents.test.sql new file mode 100644 index 000000000..0703908e9 --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_diamond_multiple_dependents.test.sql @@ -0,0 +1,90 @@ +-- Test: when_failed='skip' decrements remaining_deps on MULTIPLE dependent steps +-- +-- Flow structure (diamond pattern): +-- step_a (when_failed='skip', max_attempts=0) +-- ├── step_b (depends on step_a) +-- └── step_c (depends on step_a) +-- +-- Expected behavior: +-- 1. step_a fails and gets skipped +-- 2. BOTH step_b and step_c have remaining_deps decremented from 1 to 0 +-- 3. BOTH step_b and step_c become ready and start + +begin; +select plan(5); +select pgflow_tests.reset_db(); + +-- Create diamond flow: step_a -> step_b, step_a -> step_c +select pgflow.create_flow('diamond_skip'); +select pgflow.add_step('diamond_skip', 'step_a', max_attempts => 0, when_failed => 'skip'); +select pgflow.add_step('diamond_skip', 'step_b', array['step_a']); +select pgflow.add_step('diamond_skip', 'step_c', array['step_a']); + +-- Start the flow +select pgflow.start_flow('diamond_skip', '"input"'::jsonb); + +-- Poll and fail step_a +select pgflow_tests.poll_and_fail('diamond_skip'); + +-- Test 1: step_a should be skipped +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'skipped', + 'step_a should be skipped after failure' +); + +-- Test 2: step_b.remaining_deps should be decremented to 0 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_b' + ), + 0::int, + 'step_b.remaining_deps should be decremented to 0' +); + +-- Test 3: step_c.remaining_deps should be decremented to 0 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_c' + ), + 0::int, + 'step_c.remaining_deps should be decremented to 0' +); + +-- Test 4: step_b should be started +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_b' + ), + 'started', + 'step_b should be started' +); + +-- Test 5: step_c should be started +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_c' + ), + 'started', + 'step_c should be started' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_only_step_completes_run.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_only_step_completes_run.test.sql new file mode 100644 index 000000000..9f82d53eb --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_only_step_completes_run.test.sql @@ -0,0 +1,59 @@ +-- Test: when_failed='skip' on the only step should complete the run +-- +-- Flow structure: +-- step_a (when_failed='skip', max_attempts=0) - only step in flow +-- +-- Expected behavior: +-- 1. step_a fails and gets skipped +-- 2. remaining_steps decremented to 0 +-- 3. Run completes (status='completed') + +begin; +select plan(4); +select pgflow_tests.reset_db(); + +-- Create flow with single step +select pgflow.create_flow('single_skip'); +select pgflow.add_step('single_skip', 'step_a', max_attempts => 0, when_failed => 'skip'); + +-- Start the flow +select pgflow.start_flow('single_skip', '"input"'::jsonb); + +-- Verify run starts with remaining_steps = 1 +select is( + (select remaining_steps from pgflow.runs limit 1), + 1::int, + 'Run should start with remaining_steps = 1' +); + +-- Poll and fail step_a +select pgflow_tests.poll_and_fail('single_skip'); + +-- Test 1: step_a should be skipped +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'skipped', + 'step_a should be skipped after failure' +); + +-- Test 2: remaining_steps should be 0 +select is( + (select remaining_steps from pgflow.runs limit 1), + 0::int, + 'Run remaining_steps should be 0' +); + +-- Test 3: Run should be completed +select is( + (select status from pgflow.runs limit 1), + 'completed', + 'Run should be completed when only step is skipped' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_partial_deps_waits.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_partial_deps_waits.test.sql new file mode 100644 index 000000000..f265b56da --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_partial_deps_waits.test.sql @@ -0,0 +1,85 @@ +-- Test: when_failed='skip' decrements remaining_deps but step waits if other deps remain +-- +-- Flow structure: +-- step_a (when_failed='skip', max_attempts=0) ─┐ +-- step_b ───────────────────────────────────────┼──> step_c (depends on both) +-- +-- Expected behavior: +-- 1. step_a fails and gets skipped +-- 2. step_c.remaining_deps decremented from 2 to 1 +-- 3. step_c does NOT start yet (still waiting for step_b) +-- 4. Run continues (not failed) + +begin; +select plan(5); +select pgflow_tests.reset_db(); + +-- Create flow: step_a + step_b -> step_c +select pgflow.create_flow('partial_skip'); +select pgflow.add_step('partial_skip', 'step_a', max_attempts => 0, when_failed => 'skip'); +select pgflow.add_step('partial_skip', 'step_b'); +select pgflow.add_step('partial_skip', 'step_c', array['step_a', 'step_b']); + +-- Start the flow +select pgflow.start_flow('partial_skip', '"input"'::jsonb); + +-- Verify step_c starts with remaining_deps = 2 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_c' + ), + 2::int, + 'step_c should start with remaining_deps = 2' +); + +-- Poll and fail step_a (step_b is still running) +select pgflow_tests.poll_and_fail('partial_skip'); + +-- Test 1: step_a should be skipped +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'skipped', + 'step_a should be skipped after failure' +); + +-- Test 2: step_c.remaining_deps should be decremented to 1 (not 0) +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_c' + ), + 1::int, + 'step_c.remaining_deps should be decremented to 1 (waiting for step_b)' +); + +-- Test 3: step_c should NOT be started yet (still 'created') +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_c' + ), + 'created', + 'step_c should still be created (waiting for step_b)' +); + +-- Test 4: Run should continue (not failed) +select isnt( + (select status from pgflow.runs limit 1), + 'failed', + 'Run should not be failed (continues with step_b)' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_propagates_to_map_step.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_propagates_to_map_step.test.sql new file mode 100644 index 000000000..eb6d4bc58 --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_propagates_to_map_step.test.sql @@ -0,0 +1,113 @@ +-- Test: when_failed='skip' propagates correctly to map step dependent +-- +-- This mirrors the behavior of when_unmet='skip' for conditions: +-- - Map step with skipped dependency gets initial_tasks=0 +-- - Map step auto-completes with output=[] +-- +-- Flow structure: +-- producer (when_failed='skip', max_attempts=0) → map_consumer (map step) +-- +-- Expected behavior: +-- 1. producer fails and gets skipped +-- 2. map_consumer.remaining_deps decremented to 0 +-- 3. map_consumer.initial_tasks set to 0 (skipped dep = empty array) +-- 4. map_consumer auto-completes with status='completed', output='[]' +-- 5. Run completes + +begin; +select plan(7); +select pgflow_tests.reset_db(); + +-- Create flow with producer -> map_consumer +select pgflow.create_flow('map_skip_test'); +select pgflow.add_step('map_skip_test', 'producer', max_attempts => 0, when_failed => 'skip'); +-- Map consumer: step_type='map' handles empty array from skipped producer +select pgflow.add_step('map_skip_test', 'map_consumer', array['producer'], step_type => 'map'); + +-- Start the flow +select pgflow.start_flow('map_skip_test', '"input"'::jsonb); + +-- Verify initial state +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'map_consumer' + ), + 1::int, + 'map_consumer should start with remaining_deps = 1' +); + +-- Poll and fail producer +select pgflow_tests.poll_and_fail('map_skip_test'); + +-- Test 1: producer should be skipped +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'producer' + ), + 'skipped', + 'producer should be skipped after failure' +); + +-- Test 2: map_consumer.remaining_deps should be 0 +select is( + ( + select remaining_deps from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'map_consumer' + ), + 0::int, + 'map_consumer.remaining_deps should be decremented to 0' +); + +-- Test 3: map_consumer.initial_tasks should be 0 (skipped dep = empty array) +select is( + ( + select initial_tasks from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'map_consumer' + ), + 0::int, + 'map_consumer.initial_tasks should be 0' +); + +-- Test 4: map_consumer should be completed (auto-completed with zero tasks) +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'map_consumer' + ), + 'completed', + 'map_consumer should be auto-completed' +); + +-- Test 5: map_consumer.output should be empty array +select is( + ( + select output from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'map_consumer' + ), + '[]'::jsonb, + 'map_consumer.output should be empty array' +); + +-- Test 6: Run should be completed +select is( + (select status from pgflow.runs limit 1), + 'completed', + 'Run should be completed' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/skip_verifies_handler_failed_reason.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/skip_verifies_handler_failed_reason.test.sql new file mode 100644 index 000000000..3a1d7c21c --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/skip_verifies_handler_failed_reason.test.sql @@ -0,0 +1,78 @@ +-- Test: when_failed='skip' sets skip_reason to 'handler_failed' +-- +-- This distinguishes handler-failure skips from condition-unmet skips. +-- +-- Flow structure: +-- step_a (when_failed='skip', max_attempts=0) → step_b +-- +-- Expected behavior: +-- 1. step_a fails and gets skipped +-- 2. step_a.skip_reason = 'handler_failed' +-- 3. step_a.skipped_at is set +-- 4. step_a.error_message contains the failure reason + +begin; +select plan(4); +select pgflow_tests.reset_db(); + +-- Create flow +select pgflow.create_flow('skip_reason_test'); +select pgflow.add_step('skip_reason_test', 'step_a', max_attempts => 0, when_failed => 'skip'); +select pgflow.add_step('skip_reason_test', 'step_b', array['step_a']); + +-- Start the flow +select pgflow.start_flow('skip_reason_test', '"input"'::jsonb); + +-- Poll and fail step_a +select pgflow_tests.poll_and_fail('skip_reason_test'); + +-- Test 1: step_a should be skipped +select is( + ( + select status from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'skipped', + 'step_a should be skipped' +); + +-- Test 2: skip_reason should be 'handler_failed' +select is( + ( + select skip_reason from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'handler_failed', + 'skip_reason should be handler_failed' +); + +-- Test 3: skipped_at should be set +select isnt( + ( + select skipped_at from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + null, + 'skipped_at should be set' +); + +-- Test 4: error_message should be set (poll_and_fail generates "step_a FAILED") +select is( + ( + select error_message from pgflow.step_states + where + run_id = (select run_id from pgflow.runs limit 1) + and step_slug = 'step_a' + ), + 'step_a FAILED', + 'error_message should be set' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/type_violation_always_hard_fails.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/type_violation_always_hard_fails.test.sql new file mode 100644 index 000000000..6d5e642a4 --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/type_violation_always_hard_fails.test.sql @@ -0,0 +1,57 @@ +-- Test: TYPE_VIOLATION in complete_task always hard fails regardless of when_failed +-- TYPE_VIOLATION is a programming error (wrong return type), not a runtime condition +-- It should always cause the run to fail, even with when_failed='skip' or 'skip-cascade' +begin; +select plan(4); +select pgflow_tests.reset_db(); + +-- SETUP: Create a flow where step_a feeds into a map step +-- step_a has when_failed='skip-cascade' but TYPE_VIOLATION should override this +select pgflow.create_flow('test_flow'); +select pgflow.add_step('test_flow', 'step_a', when_failed => 'skip-cascade'); +select pgflow.add_step('test_flow', 'step_b', ARRAY['step_a'], step_type => 'map'); + +-- Start flow +select pgflow.start_flow('test_flow', '{}'::jsonb); + +-- Poll for step_a's task and complete it with non-array output (causes TYPE_VIOLATION) +with task as ( + select * from pgflow_tests.read_and_start('test_flow', 1, 1) +) +select pgflow.complete_task( + (select run_id from task), + (select step_slug from task), + 0, + '"not_an_array"'::jsonb -- String instead of array - TYPE_VIOLATION +); + +-- TEST 1: step_a should be marked as failed (not skipped) +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'failed', + 'step_a should be failed on TYPE_VIOLATION (not skipped despite when_failed=skip-cascade)' +); + +-- TEST 2: error_message should contain TYPE_VIOLATION +select ok( + (select error_message from pgflow.step_states + where flow_slug = 'test_flow' and step_slug = 'step_a') LIKE '%TYPE_VIOLATION%', + 'Error message should indicate TYPE_VIOLATION' +); + +-- TEST 3: Run should be failed +select is( + (select status from pgflow.runs where flow_slug = 'test_flow'), + 'failed', + 'Run should be failed on TYPE_VIOLATION regardless of when_failed setting' +); + +-- TEST 4: step_b should NOT be skipped (run failed before cascade could happen) +select isnt( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_b'), + 'skipped', + 'step_b should not be skipped - run failed before any cascade' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_fail_marks_run_failed.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_fail_marks_run_failed.test.sql new file mode 100644 index 000000000..057581b3b --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_fail_marks_run_failed.test.sql @@ -0,0 +1,37 @@ +-- Test: fail_task with when_failed='fail' (default) marks run as failed +-- This is the current behavior and should remain unchanged +begin; +select plan(3); +select pgflow_tests.reset_db(); + +-- SETUP: Create a flow with default when_failed='fail' (0 retries so it fails immediately) +select pgflow.create_flow('test_flow'); +select pgflow.add_step('test_flow', 'step_a', max_attempts => 0); + +-- Start flow and fail the task +select pgflow.start_flow('test_flow', '{}'::jsonb); +select pgflow_tests.poll_and_fail('test_flow'); + +-- TEST 1: Task should be failed +select is( + (select status from pgflow.step_tasks where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'failed', + 'Task should be marked as failed' +); + +-- TEST 2: Step should be failed +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'failed', + 'Step should be marked as failed' +); + +-- TEST 3: Run should be failed +select is( + (select status from pgflow.runs where flow_slug = 'test_flow'), + 'failed', + 'Run should be marked as failed when when_failed=fail (default)' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_cascade_skips_dependents.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_cascade_skips_dependents.test.sql new file mode 100644 index 000000000..ea0ecef9a --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_cascade_skips_dependents.test.sql @@ -0,0 +1,68 @@ +-- Test: fail_task with when_failed='skip-cascade' skips step and cascades to dependents +begin; +select plan(7); +select pgflow_tests.reset_db(); + +-- SETUP: Create a flow with when_failed='skip-cascade' +-- step_a (will fail) -> step_b (depends on a) -> step_c (depends on b) +select pgflow.create_flow('test_flow'); +select pgflow.add_step('test_flow', 'step_a', max_attempts => 0, when_failed => 'skip-cascade'); +select pgflow.add_step('test_flow', 'step_b', ARRAY['step_a']); +select pgflow.add_step('test_flow', 'step_c', ARRAY['step_b']); +select pgflow.add_step('test_flow', 'step_d'); -- Independent step to verify run continues + +-- Start flow and fail step_a's task +select pgflow.start_flow('test_flow', '{}'::jsonb); +select pgflow_tests.poll_and_fail('test_flow'); + +-- TEST 1: step_a should be skipped (not failed) +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'skipped', + 'step_a should be marked as skipped when when_failed=skip-cascade' +); + +-- TEST 2: step_a skip reason should be handler_failed +select is( + (select skip_reason from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'handler_failed', + 'step_a skip reason should be handler_failed' +); + +-- TEST 3: step_b (dependent) should be skipped via cascade +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_b'), + 'skipped', + 'step_b should be cascaded-skipped' +); + +-- TEST 4: step_b skip reason should indicate dependency skipped +select is( + (select skip_reason from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_b'), + 'dependency_skipped', + 'step_b skip reason should be dependency_skipped' +); + +-- TEST 5: step_c (transitive dependent) should also be skipped +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_c'), + 'skipped', + 'step_c should be transitively cascade-skipped' +); + +-- TEST 6: step_d (independent) should remain started (not affected) +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_d'), + 'started', + 'step_d (independent step) should remain started' +); + +-- TEST 7: Run should NOT be failed (continues running) +select isnt( + (select status from pgflow.runs where flow_slug = 'test_flow'), + 'failed', + 'Run should NOT be marked as failed when when_failed=skip-cascade' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_skips_step.test.sql b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_skips_step.test.sql new file mode 100644 index 000000000..08d029cf1 --- /dev/null +++ b/pkgs/core/supabase/tests/fail_task_when_failed/when_failed_skip_skips_step.test.sql @@ -0,0 +1,51 @@ +-- Test: fail_task with when_failed='skip' skips the step and continues run +begin; +select plan(5); +select pgflow_tests.reset_db(); + +-- SETUP: Create a flow with when_failed='skip' (0 retries so it fails immediately) +select pgflow.create_flow('test_flow'); +select pgflow.add_step('test_flow', 'step_a', max_attempts => 0, when_failed => 'skip'); +select pgflow.add_step('test_flow', 'step_b'); -- Independent step to verify run continues + +-- Start flow and fail step_a's task +select pgflow.start_flow('test_flow', '{}'::jsonb); +select pgflow_tests.poll_and_fail('test_flow'); + +-- TEST 1: Task should be failed (it still failed, but skip mode affects step/run) +select is( + (select status from pgflow.step_tasks where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'failed', + 'Task should be marked as failed' +); + +-- TEST 2: Step should be skipped (not failed) +select is( + (select status from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'skipped', + 'Step should be marked as skipped when when_failed=skip' +); + +-- TEST 3: Skip reason should indicate handler failure +select is( + (select skip_reason from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'handler_failed', + 'Skip reason should be handler_failed' +); + +-- TEST 4: Run should NOT be failed (continues running) +select isnt( + (select status from pgflow.runs where flow_slug = 'test_flow'), + 'failed', + 'Run should NOT be marked as failed when when_failed=skip' +); + +-- TEST 5: Error message should be preserved in step_states +select is( + (select error_message from pgflow.step_states where flow_slug = 'test_flow' and step_slug = 'step_a'), + 'step_a FAILED', + 'Error message should be preserved on skipped step' +); + +select finish(); +rollback; diff --git a/pkgs/core/supabase/tests/get_flow_shape/basic_shape.test.sql b/pkgs/core/supabase/tests/get_flow_shape/basic_shape.test.sql index 573e276dc..840af07c0 100644 --- a/pkgs/core/supabase/tests/get_flow_shape/basic_shape.test.sql +++ b/pkgs/core/supabase/tests/get_flow_shape/basic_shape.test.sql @@ -13,9 +13,9 @@ select is( pgflow._get_flow_shape('test_flow'), '{ "steps": [ - {"slug": "first", "stepType": "single", "dependencies": []}, - {"slug": "second", "stepType": "single", "dependencies": ["first"]}, - {"slug": "third", "stepType": "single", "dependencies": ["second"]} + {"slug": "first", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "second", "stepType": "single", "dependencies": ["first"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "third", "stepType": "single", "dependencies": ["second"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb, 'Should return correct shape for simple sequential flow' diff --git a/pkgs/core/supabase/tests/get_flow_shape/map_steps.test.sql b/pkgs/core/supabase/tests/get_flow_shape/map_steps.test.sql index 292bb9a3d..67c26e206 100644 --- a/pkgs/core/supabase/tests/get_flow_shape/map_steps.test.sql +++ b/pkgs/core/supabase/tests/get_flow_shape/map_steps.test.sql @@ -20,8 +20,8 @@ select is( pgflow._get_flow_shape('map_flow'), '{ "steps": [ - {"slug": "root_map", "stepType": "map", "dependencies": []}, - {"slug": "process", "stepType": "single", "dependencies": ["root_map"]} + {"slug": "root_map", "stepType": "map", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "process", "stepType": "single", "dependencies": ["root_map"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb, 'Should correctly identify map step type' diff --git a/pkgs/core/supabase/tests/get_flow_shape/multiple_deps_sorted.test.sql b/pkgs/core/supabase/tests/get_flow_shape/multiple_deps_sorted.test.sql index a92436dd1..099275036 100644 --- a/pkgs/core/supabase/tests/get_flow_shape/multiple_deps_sorted.test.sql +++ b/pkgs/core/supabase/tests/get_flow_shape/multiple_deps_sorted.test.sql @@ -16,10 +16,10 @@ select is( pgflow._get_flow_shape('multi_deps'), '{ "steps": [ - {"slug": "alpha", "stepType": "single", "dependencies": []}, - {"slug": "beta", "stepType": "single", "dependencies": []}, - {"slug": "gamma", "stepType": "single", "dependencies": []}, - {"slug": "final", "stepType": "single", "dependencies": ["alpha", "beta", "gamma"]} + {"slug": "alpha", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "beta", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "gamma", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "final", "stepType": "single", "dependencies": ["alpha", "beta", "gamma"], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": false}} ] }'::jsonb, 'Dependencies should be sorted alphabetically' diff --git a/pkgs/core/supabase/tests/get_flow_shape/pattern_shape.test.sql b/pkgs/core/supabase/tests/get_flow_shape/pattern_shape.test.sql new file mode 100644 index 000000000..481d15a1e --- /dev/null +++ b/pkgs/core/supabase/tests/get_flow_shape/pattern_shape.test.sql @@ -0,0 +1,44 @@ +begin; +select plan(2); +select pgflow_tests.reset_db(); + +-- Setup: Create a flow with pattern conditions +select pgflow.create_flow('test_flow'); +select pgflow.add_step('test_flow', 'step_with_if', max_attempts := 1, required_input_pattern := '{"status": "active"}'::jsonb); +select pgflow.add_step('test_flow', 'step_with_ifnot', max_attempts := 1, forbidden_input_pattern := '{"type": "deleted"}'::jsonb); +select pgflow.add_step('test_flow', 'step_with_both', max_attempts := 1, required_input_pattern := '{"status": "active"}'::jsonb, forbidden_input_pattern := '{"type": "archived"}'::jsonb); + +-- Test: Get flow shape with patterns (order matches insertion order: if, ifnot, both) +select is( + pgflow._get_flow_shape('test_flow'), + '{ + "steps": [ + {"slug": "step_with_if", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "active"}}, "forbiddenInputPattern": {"defined": false}}, + {"slug": "step_with_ifnot", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": false}, "forbiddenInputPattern": {"defined": true, "value": {"type": "deleted"}}}, + {"slug": "step_with_both", "stepType": "single", "dependencies": [], "whenUnmet": "skip", "whenFailed": "fail", "requiredInputPattern": {"defined": true, "value": {"status": "active"}}, "forbiddenInputPattern": {"defined": true, "value": {"type": "archived"}}} + ] + }'::jsonb, + 'Should return correct shape with pattern conditions' +); + +-- Test: Verify patterns are stored in steps table +select results_eq( + $$ + SELECT step_slug, required_input_pattern, forbidden_input_pattern + FROM pgflow.steps + WHERE flow_slug = 'test_flow' + ORDER BY step_slug + $$, + $$ + SELECT * + FROM (VALUES + ('step_with_both', '{"status": "active"}'::jsonb, '{"type": "archived"}'::jsonb), + ('step_with_if', '{"status": "active"}'::jsonb, NULL::jsonb), + ('step_with_ifnot', NULL::jsonb, '{"type": "deleted"}'::jsonb) + ) AS t(step_slug, required_input_pattern, forbidden_input_pattern) + $$, + 'Pattern columns should be correctly stored in steps table' +); + +select finish(); +rollback; diff --git a/pkgs/dsl/__tests__/runtime/condition-options.test.ts b/pkgs/dsl/__tests__/runtime/condition-options.test.ts new file mode 100644 index 000000000..1dcd89ea7 --- /dev/null +++ b/pkgs/dsl/__tests__/runtime/condition-options.test.ts @@ -0,0 +1,288 @@ +import { describe, it, expect } from 'vitest'; +import { Flow } from '../../src/dsl.js'; +import { compileFlow } from '../../src/compile-flow.js'; + +describe('Condition Options', () => { + describe('DSL accepts if and whenUnmet', () => { + it('should accept if option on a step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'conditional_step', if: { enabled: true } }, + () => 'result' + ); + + const step = flow.getStepDefinition('conditional_step'); + expect(step.options.if).toEqual({ enabled: true }); + }); + + it('should accept whenUnmet option on a step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'conditional_step', whenUnmet: 'skip' }, + () => 'result' + ); + + const step = flow.getStepDefinition('conditional_step'); + expect(step.options.whenUnmet).toBe('skip'); + }); + + it('should accept both if and whenUnmet together', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'conditional_step', + if: { status: 'active' }, + whenUnmet: 'skip-cascade', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('conditional_step'); + expect(step.options.if).toEqual({ status: 'active' }); + expect(step.options.whenUnmet).toBe('skip-cascade'); + }); + + it('should accept if on dependent steps', () => { + const flow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'first' }, () => ({ success: true })) + .step( + { + slug: 'conditional_step', + dependsOn: ['first'], + if: { first: { success: true } }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('conditional_step'); + expect(step.options.if).toEqual({ first: { success: true } }); + expect(step.options.whenUnmet).toBe('skip'); + }); + }); + + describe('compileFlow includes condition parameters', () => { + it('should compile required_input_pattern for root step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', if: { enabled: true } }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain( + 'required_input_pattern => \'{"enabled":true}\'' + ); + }); + + it('should compile when_unmet for step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', whenUnmet: 'fail' }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain("when_unmet => 'fail'"); + }); + + it('should compile both required_input_pattern and when_unmet together', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + if: { active: true, type: 'premium' }, + whenUnmet: 'skip-cascade', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain( + 'required_input_pattern => \'{"active":true,"type":"premium"}\'' + ); + expect(statements[1]).toContain("when_unmet => 'skip-cascade'"); + }); + + it('should compile step with all options including condition', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + maxAttempts: 3, + timeout: 60, + if: { enabled: true }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain('max_attempts => 3'); + expect(statements[1]).toContain('timeout => 60'); + expect(statements[1]).toContain( + 'required_input_pattern => \'{"enabled":true}\'' + ); + expect(statements[1]).toContain("when_unmet => 'skip'"); + }); + + it('should compile dependent step with condition checking deps output', () => { + const flow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'first' }, () => ({ success: true })) + .step( + { + slug: 'second', + dependsOn: ['first'], + if: { first: { success: true } }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(3); + expect(statements[2]).toContain("ARRAY['first']"); + expect(statements[2]).toContain( + 'required_input_pattern => \'{"first":{"success":true}}\'' + ); + expect(statements[2]).toContain("when_unmet => 'skip'"); + }); + }); + + describe('whenUnmet validation', () => { + it('should only accept valid whenUnmet values', () => { + // Valid values should work + expect(() => + new Flow({ slug: 'test' }).step( + { slug: 's1', whenUnmet: 'fail' }, + () => 1 + ) + ).not.toThrow(); + + expect(() => + new Flow({ slug: 'test' }).step( + { slug: 's1', whenUnmet: 'skip' }, + () => 1 + ) + ).not.toThrow(); + + expect(() => + new Flow({ slug: 'test' }).step( + { slug: 's1', whenUnmet: 'skip-cascade' }, + () => 1 + ) + ).not.toThrow(); + }); + }); + + describe('DSL accepts ifNot', () => { + it('should accept ifNot option on a step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'no_admin_step', ifNot: { role: 'admin' } }, + () => 'result' + ); + + const step = flow.getStepDefinition('no_admin_step'); + expect(step.options.ifNot).toEqual({ role: 'admin' }); + }); + + it('should accept both if and ifNot together', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'admin_action', + if: { role: 'admin', active: true }, + ifNot: { suspended: true }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('admin_action'); + expect(step.options.if).toEqual({ role: 'admin', active: true }); + expect(step.options.ifNot).toEqual({ suspended: true }); + expect(step.options.whenUnmet).toBe('skip'); + }); + + it('should accept ifNot on dependent steps', () => { + const flow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'first' }, () => ({ error: false })) + .step( + { + slug: 'continue_step', + dependsOn: ['first'], + ifNot: { first: { error: true } }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('continue_step'); + expect(step.options.ifNot).toEqual({ first: { error: true } }); + expect(step.options.whenUnmet).toBe('skip'); + }); + }); + + describe('compileFlow includes ifNot parameters', () => { + it('should compile forbidden_input_pattern for root step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', ifNot: { role: 'admin' } }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain( + 'forbidden_input_pattern => \'{"role":"admin"}\'' + ); + }); + + it('should compile both if and ifNot together', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + if: { active: true }, + ifNot: { suspended: true }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain( + 'required_input_pattern => \'{"active":true}\'' + ); + expect(statements[1]).toContain( + 'forbidden_input_pattern => \'{"suspended":true}\'' + ); + expect(statements[1]).toContain("when_unmet => 'skip'"); + }); + + it('should compile ifNot for dependent step', () => { + const flow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'first' }, () => ({ error: false })) + .step( + { + slug: 'second', + dependsOn: ['first'], + ifNot: { first: { error: true } }, + whenUnmet: 'skip', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(3); + expect(statements[2]).toContain("ARRAY['first']"); + expect(statements[2]).toContain( + 'forbidden_input_pattern => \'{"first":{"error":true}}\'' + ); + expect(statements[2]).toContain("when_unmet => 'skip'"); + }); + }); +}); diff --git a/pkgs/dsl/__tests__/runtime/flow-shape.test.ts b/pkgs/dsl/__tests__/runtime/flow-shape.test.ts index 6b7971568..50691a4b2 100644 --- a/pkgs/dsl/__tests__/runtime/flow-shape.test.ts +++ b/pkgs/dsl/__tests__/runtime/flow-shape.test.ts @@ -61,6 +61,10 @@ describe('extractFlowShape', () => { slug: 'step1', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }); }); @@ -108,6 +112,10 @@ describe('extractFlowShape', () => { slug: 'step1', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, options: { maxAttempts: 3, baseDelay: 5, @@ -129,6 +137,10 @@ describe('extractFlowShape', () => { slug: 'step1', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }); expect('options' in shape.steps[0]).toBe(false); }); @@ -160,6 +172,10 @@ describe('extractFlowShape', () => { slug: 'process_items', stepType: 'map', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }); }); @@ -175,6 +191,10 @@ describe('extractFlowShape', () => { slug: 'process', stepType: 'map', dependencies: ['get_items'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }); }); }); @@ -189,7 +209,12 @@ describe('extractFlowShape', () => { }) .step({ slug: 'website' }, (flowInput) => ({ content: flowInput.url })) .step( - { slug: 'sentiment', dependsOn: ['website'], maxAttempts: 5, timeout: 30 }, + { + slug: 'sentiment', + dependsOn: ['website'], + maxAttempts: 5, + timeout: 30, + }, () => ({ score: 0.8 }) ) .step({ slug: 'summary', dependsOn: ['website'] }, () => ({ @@ -209,11 +234,19 @@ describe('extractFlowShape', () => { slug: 'website', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, { slug: 'sentiment', stepType: 'single', dependencies: ['website'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, options: { maxAttempts: 5, timeout: 30, @@ -223,11 +256,19 @@ describe('extractFlowShape', () => { slug: 'summary', stepType: 'single', dependencies: ['website'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, { slug: 'save_to_db', stepType: 'single', dependencies: ['sentiment', 'summary'], // sorted alphabetically + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], options: { @@ -251,6 +292,89 @@ describe('extractFlowShape', () => { 'third', ]); }); + + describe('pattern extraction', () => { + it('should extract requiredInputPattern from step with if option', () => { + const flow = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', if: { status: 'active' } }, + (flowInput) => flowInput + ); + const shape = extractFlowShape(flow); + + expect(shape.steps[0]).toEqual({ + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: true, value: { status: 'active' } }, + forbiddenInputPattern: { defined: false }, + }); + }); + + it('should extract forbiddenInputPattern from step with ifNot option', () => { + const flow = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', ifNot: { status: 'deleted' } }, + (flowInput) => flowInput + ); + const shape = extractFlowShape(flow); + + expect(shape.steps[0]).toEqual({ + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: true, value: { status: 'deleted' } }, + }); + }); + + it('should extract both pattern fields when both if and ifNot are set', () => { + const flow = new Flow<{ status: string; type: string }>({ + slug: 'test_flow', + }).step( + { + slug: 'step1', + if: { status: 'active' }, + ifNot: { type: 'archived' }, + }, + (flowInput) => flowInput + ); + const shape = extractFlowShape(flow); + + expect(shape.steps[0]).toEqual({ + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: true, value: { status: 'active' } }, + forbiddenInputPattern: { defined: true, value: { type: 'archived' } }, + }); + }); + + it('should include pattern keys with defined:false when no patterns are set', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1' }, + (flowInput) => flowInput + ); + const shape = extractFlowShape(flow); + + expect(shape.steps[0]).toEqual({ + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }); + // Pattern keys are now always present with the wrapper format + expect('requiredInputPattern' in shape.steps[0]).toBe(true); + expect('forbiddenInputPattern' in shape.steps[0]).toBe(true); + }); + }); }); }); @@ -263,6 +387,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -290,7 +418,15 @@ describe('compareFlowShapes', () => { }; const b: FlowShape = { steps: [ - { slug: 'step1', stepType: 'single', dependencies: [] }, + { + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; @@ -305,7 +441,15 @@ describe('compareFlowShapes', () => { it('should detect extra step at end', () => { const a: FlowShape = { steps: [ - { slug: 'step1', stepType: 'single', dependencies: [] }, + { + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; const b: FlowShape = { steps: [] }; @@ -321,14 +465,46 @@ describe('compareFlowShapes', () => { it('should detect different steps at same positions', () => { const a: FlowShape = { steps: [ - { slug: 'step_a', stepType: 'single', dependencies: [] }, - { slug: 'step_b', stepType: 'single', dependencies: [] }, + { + slug: 'step_a', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, + { + slug: 'step_b', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; const b: FlowShape = { steps: [ - { slug: 'step_c', stepType: 'single', dependencies: [] }, - { slug: 'step_d', stepType: 'single', dependencies: [] }, + { + slug: 'step_c', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, + { + slug: 'step_d', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; @@ -347,14 +523,46 @@ describe('compareFlowShapes', () => { it('should detect steps in different order', () => { const a: FlowShape = { steps: [ - { slug: 'step_a', stepType: 'single', dependencies: [] }, - { slug: 'step_b', stepType: 'single', dependencies: [] }, + { + slug: 'step_a', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, + { + slug: 'step_b', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; const b: FlowShape = { steps: [ - { slug: 'step_b', stepType: 'single', dependencies: [] }, - { slug: 'step_a', stepType: 'single', dependencies: [] }, + { + slug: 'step_b', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, + { + slug: 'step_a', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; @@ -373,12 +581,28 @@ describe('compareFlowShapes', () => { it('should detect stepType difference', () => { const a: FlowShape = { steps: [ - { slug: 'step1', stepType: 'single', dependencies: [] }, + { + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; const b: FlowShape = { steps: [ - { slug: 'step1', stepType: 'map', dependencies: [] }, + { + slug: 'step1', + stepType: 'map', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; @@ -394,7 +618,15 @@ describe('compareFlowShapes', () => { it('should detect added dependency', () => { const a: FlowShape = { steps: [ - { slug: 'step1', stepType: 'single', dependencies: [] }, + { + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, ], }; const b: FlowShape = { @@ -403,6 +635,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: ['step0'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -421,6 +657,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: ['dep1', 'dep2'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -430,6 +670,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: ['dep1'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -448,6 +692,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: ['old_dep'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -457,6 +705,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: ['new_dep'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -473,12 +725,15 @@ describe('compareFlowShapes', () => { it('should match flows with same structure but different DSL options', () => { // This is the key behavior: options are in shape for creation, // but don't affect shape matching (runtime tunable via SQL) - const flowA = new Flow({ slug: 'test_flow', maxAttempts: 3 }).step( - { slug: 'step1', timeout: 60 }, - (flowInput) => flowInput - ); + const flowA = new Flow({ + slug: 'test_flow', + maxAttempts: 3, + }).step({ slug: 'step1', timeout: 60 }, (flowInput) => flowInput); - const flowB = new Flow({ slug: 'test_flow', maxAttempts: 10 }).step( + const flowB = new Flow({ + slug: 'test_flow', + maxAttempts: 10, + }).step( { slug: 'step1', timeout: 300, startDelay: 100 }, (flowInput) => flowInput ); @@ -490,7 +745,10 @@ describe('compareFlowShapes', () => { expect(shapeA.options).toEqual({ maxAttempts: 3 }); expect(shapeB.options).toEqual({ maxAttempts: 10 }); expect(shapeA.steps[0].options).toEqual({ timeout: 60 }); - expect(shapeB.steps[0].options).toEqual({ timeout: 300, startDelay: 100 }); + expect(shapeB.steps[0].options).toEqual({ + timeout: 300, + startDelay: 100, + }); // But comparison ignores options - only structure matters const result = compareFlowShapes(shapeA, shapeB); @@ -507,6 +765,10 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -516,11 +778,19 @@ describe('compareFlowShapes', () => { slug: 'step1', stepType: 'map', dependencies: ['dep1'], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, { slug: 'step2', stepType: 'single', dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, }, ], }; @@ -615,5 +885,89 @@ describe('compareFlowShapes', () => { 'Step at index 1: dependencies differ [] vs [step1]' ); }); + + describe('pattern comparison', () => { + it('should detect requiredInputPattern difference', () => { + const flowA = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', if: { status: 'active' } }, + (flowInput) => flowInput + ); + + const flowB = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', if: { status: 'pending' } }, + (flowInput) => flowInput + ); + + const shapeA = extractFlowShape(flowA); + const shapeB = extractFlowShape(flowB); + + const result = compareFlowShapes(shapeA, shapeB); + expect(result.match).toBe(false); + expect(result.differences).toContain( + 'Step at index 0: requiredInputPattern differs \'{"defined":true,"value":{"status":"active"}}\' vs \'{"defined":true,"value":{"status":"pending"}}\'' + ); + }); + + it('should detect forbiddenInputPattern difference', () => { + const flowA = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', ifNot: { status: 'deleted' } }, + (flowInput) => flowInput + ); + + const flowB = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', ifNot: { status: 'archived' } }, + (flowInput) => flowInput + ); + + const shapeA = extractFlowShape(flowA); + const shapeB = extractFlowShape(flowB); + + const result = compareFlowShapes(shapeA, shapeB); + expect(result.match).toBe(false); + expect(result.differences).toContain( + 'Step at index 0: forbiddenInputPattern differs \'{"defined":true,"value":{"status":"deleted"}}\' vs \'{"defined":true,"value":{"status":"archived"}}\'' + ); + }); + + it('should match flows with identical patterns', () => { + const createFlow = () => + new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { + slug: 'step1', + if: { status: 'active' }, + ifNot: { status: 'deleted' }, + }, + (flowInput) => flowInput + ); + + const shapeA = extractFlowShape(createFlow()); + const shapeB = extractFlowShape(createFlow()); + + const result = compareFlowShapes(shapeA, shapeB); + expect(result.match).toBe(true); + expect(result.differences).toEqual([]); + }); + + it('should detect missing requiredInputPattern', () => { + const flowA = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1' }, + (flowInput) => flowInput + ); + + const flowB = new Flow<{ status: string }>({ slug: 'test_flow' }).step( + { slug: 'step1', if: { status: 'active' } }, + (flowInput) => flowInput + ); + + const shapeA = extractFlowShape(flowA); + const shapeB = extractFlowShape(flowB); + + const result = compareFlowShapes(shapeA, shapeB); + expect(result.match).toBe(false); + expect(result.differences).toContain( + 'Step at index 0: requiredInputPattern differs \'{"defined":false}\' vs \'{"defined":true,"value":{"status":"active"}}\'' + ); + }); + }); }); }); diff --git a/pkgs/dsl/__tests__/runtime/when-failed-options.test.ts b/pkgs/dsl/__tests__/runtime/when-failed-options.test.ts new file mode 100644 index 000000000..527bfb7bb --- /dev/null +++ b/pkgs/dsl/__tests__/runtime/when-failed-options.test.ts @@ -0,0 +1,186 @@ +import { describe, it, expect } from 'vitest'; +import { Flow } from '../../src/dsl.js'; +import { compileFlow } from '../../src/compile-flow.js'; + +describe('retriesExhausted Options', () => { + describe('DSL accepts retriesExhausted option', () => { + it('should accept retriesExhausted option on a step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'skip' }, + () => 'result' + ); + + const step = flow.getStepDefinition('step1'); + expect(step.options.retriesExhausted).toBe('skip'); + }); + + it('should accept retriesExhausted: fail (default behavior)', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'fail' }, + () => 'result' + ); + + const step = flow.getStepDefinition('step1'); + expect(step.options.retriesExhausted).toBe('fail'); + }); + + it('should accept retriesExhausted: skip-cascade', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'skip-cascade' }, + () => 'result' + ); + + const step = flow.getStepDefinition('step1'); + expect(step.options.retriesExhausted).toBe('skip-cascade'); + }); + + it('should accept retriesExhausted on dependent steps', () => { + const flow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'first' }, () => ({ data: 'test' })) + .step( + { + slug: 'second', + dependsOn: ['first'], + retriesExhausted: 'skip', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('second'); + expect(step.options.retriesExhausted).toBe('skip'); + }); + + it('should accept retriesExhausted together with other options', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + maxAttempts: 3, + timeout: 60, + retriesExhausted: 'skip-cascade', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('step1'); + expect(step.options.maxAttempts).toBe(3); + expect(step.options.timeout).toBe(60); + expect(step.options.retriesExhausted).toBe('skip-cascade'); + }); + + it('should accept both whenUnmet and retriesExhausted together', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + if: { enabled: true }, + whenUnmet: 'skip', + retriesExhausted: 'skip-cascade', + }, + () => 'result' + ); + + const step = flow.getStepDefinition('step1'); + expect(step.options.if).toEqual({ enabled: true }); + expect(step.options.whenUnmet).toBe('skip'); + expect(step.options.retriesExhausted).toBe('skip-cascade'); + }); + }); + + describe('compileFlow includes when_failed parameter', () => { + it('should compile when_failed for step', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'skip' }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain("when_failed => 'skip'"); + }); + + it('should compile when_failed: fail', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'fail' }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain("when_failed => 'fail'"); + }); + + it('should compile when_failed: skip-cascade', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1', retriesExhausted: 'skip-cascade' }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain("when_failed => 'skip-cascade'"); + }); + + it('should compile step with all options including retriesExhausted', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'step1', + maxAttempts: 3, + timeout: 60, + if: { enabled: true }, + whenUnmet: 'skip', + retriesExhausted: 'skip-cascade', + }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain('max_attempts => 3'); + expect(statements[1]).toContain('timeout => 60'); + expect(statements[1]).toContain( + 'required_input_pattern => \'{"enabled":true}\'' + ); + expect(statements[1]).toContain("when_unmet => 'skip'"); + expect(statements[1]).toContain("when_failed => 'skip-cascade'"); + }); + + it('should not include when_failed when not specified', () => { + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'step1' }, + () => 'result' + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).not.toContain('when_failed'); + }); + }); + + describe('retriesExhausted on map steps', () => { + it('should accept retriesExhausted on map step', () => { + const flow = new Flow({ slug: 'test_flow' }).map( + { slug: 'map_step', retriesExhausted: 'skip' }, + (item) => item.toUpperCase() + ); + + const step = flow.getStepDefinition('map_step'); + expect(step.options.retriesExhausted).toBe('skip'); + }); + + it('should compile when_failed for map step', () => { + const flow = new Flow({ slug: 'test_flow' }).map( + { slug: 'map_step', retriesExhausted: 'skip-cascade' }, + (item) => item.toUpperCase() + ); + + const statements = compileFlow(flow); + + expect(statements).toHaveLength(2); + expect(statements[1]).toContain("when_failed => 'skip-cascade'"); + }); + }); +}); diff --git a/pkgs/dsl/__tests__/types/condition-pattern.test-d.ts b/pkgs/dsl/__tests__/types/condition-pattern.test-d.ts new file mode 100644 index 000000000..a19f0bfea --- /dev/null +++ b/pkgs/dsl/__tests__/types/condition-pattern.test-d.ts @@ -0,0 +1,618 @@ +import { Flow, type ContainmentPattern } from '../../src/index.js'; +import { describe, it, expectTypeOf } from 'vitest'; + +describe('ContainmentPattern utility type', () => { + describe('primitive types', () => { + it('should allow exact value match for string', () => { + type Pattern = ContainmentPattern; + expectTypeOf().toEqualTypeOf(); + }); + + it('should allow exact value match for number', () => { + type Pattern = ContainmentPattern; + expectTypeOf().toEqualTypeOf(); + }); + + it('should allow exact value match for boolean', () => { + type Pattern = ContainmentPattern; + expectTypeOf().toEqualTypeOf(); + }); + + it('should allow exact value match for null', () => { + type Pattern = ContainmentPattern; + expectTypeOf().toEqualTypeOf(); + }); + }); + + describe('object types', () => { + it('should make all keys optional for simple objects', () => { + type Input = { name: string; age: number }; + type Pattern = ContainmentPattern; + + // All keys should be optional + expectTypeOf().toEqualTypeOf<{ name?: string; age?: number }>(); + }); + + it('should allow empty object pattern (always matches)', () => { + type Input = { name: string; age: number }; + type Pattern = ContainmentPattern; + + // Empty object should be assignable to pattern + // eslint-disable-next-line @typescript-eslint/no-empty-object-type + expectTypeOf<{}>().toMatchTypeOf(); + }); + + it('should handle nested objects recursively', () => { + type Input = { user: { name: string; role: string } }; + type Pattern = ContainmentPattern; + + // Nested object should have optional keys + expectTypeOf().toEqualTypeOf<{ + user?: { name?: string; role?: string }; + }>(); + }); + + it('should allow partial patterns for nested objects', () => { + type Input = { user: { name: string; role: string; age: number } }; + type Pattern = ContainmentPattern; + + // Should be able to specify only some nested keys + const validPattern: Pattern = { user: { role: 'admin' } }; + expectTypeOf(validPattern).toMatchTypeOf(); + }); + }); + + describe('array types', () => { + it('should allow array containment patterns', () => { + type Input = string[]; + type Pattern = ContainmentPattern; + + // Array pattern should be ContainmentPattern[] + expectTypeOf().toEqualTypeOf(); + }); + + it('should handle arrays of objects', () => { + type Input = { type: string; value: number }[]; + type Pattern = ContainmentPattern; + + // Should allow partial object patterns in array + expectTypeOf().toEqualTypeOf< + { type?: string; value?: number }[] + >(); + }); + + it('should allow array pattern with specific elements', () => { + type Input = { type: string; value: number }[]; + type Pattern = ContainmentPattern; + + // Should be able to check for specific elements + const validPattern: Pattern = [{ type: 'error' }]; + expectTypeOf(validPattern).toMatchTypeOf(); + }); + + it('should handle readonly arrays', () => { + type Input = readonly string[]; + type Pattern = ContainmentPattern; + + // Should work with readonly arrays + expectTypeOf().toEqualTypeOf(); + }); + }); + + describe('complex nested structures', () => { + it('should handle deeply nested objects', () => { + type Input = { + level1: { + level2: { + level3: { value: string }; + }; + }; + }; + type Pattern = ContainmentPattern; + + // All levels should have optional keys + expectTypeOf().toEqualTypeOf<{ + level1?: { + level2?: { + level3?: { value?: string }; + }; + }; + }>(); + }); + + it('should handle objects with array properties', () => { + type Input = { + items: { id: number; name: string }[]; + meta: { count: number }; + }; + type Pattern = ContainmentPattern; + + expectTypeOf().toEqualTypeOf<{ + items?: { id?: number; name?: string }[]; + meta?: { count?: number }; + }>(); + }); + }); +}); + +describe('if option typing in step methods', () => { + describe('root step if', () => { + it('should type if as ContainmentPattern', () => { + type FlowInput = { userId: string; role: string }; + + // This should compile - valid partial pattern + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'check', if: { role: 'admin' } }, + (input) => input.userId + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should reject invalid keys in if', () => { + type FlowInput = { userId: string; role: string }; + + // @ts-expect-error - 'invalidKey' does not exist on FlowInput + new Flow({ slug: 'test_flow' }).step( + { slug: 'check', if: { invalidKey: 'value' } }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (input: any) => input.userId + ); + }); + + it('should reject wrong value types in if', () => { + type FlowInput = { userId: string; role: string }; + + // @ts-expect-error - role should be string, not number + new Flow({ slug: 'test_flow' }).step( + { slug: 'check', if: { role: 123 } }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (input: any) => input.userId + ); + }); + + it('should allow empty object if (always matches)', () => { + type FlowInput = { userId: string; role: string }; + + // Empty object should be valid + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'check', if: {} }, + (input) => input.userId + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should allow nested object patterns', () => { + type FlowInput = { user: { name: string; role: string } }; + + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'check', if: { user: { role: 'admin' } } }, + (input) => input.user.name + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('dependent step if', () => { + it('should type if as ContainmentPattern', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ status: 'ok', data: 'result' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + if: { fetch: { status: 'ok' } }, + }, + (deps) => deps.fetch.data + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should reject invalid dep slug in if', () => { + new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ status: 'ok' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + // @ts-expect-error - 'nonexistent' is not a dependency + if: { nonexistent: { status: 'ok' } }, + }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (deps: any) => deps.fetch.status + ); + }); + + it('should reject invalid keys within dep output', () => { + new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ status: 'ok' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + // @ts-expect-error - 'invalidField' does not exist on fetch output + if: { fetch: { invalidField: 'value' } }, + }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (deps: any) => deps.fetch.status + ); + }); + + it('should handle multiple dependencies in if', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'step1' }, () => ({ ready: true })) + .step({ slug: 'step2' }, () => ({ valid: true })) + .step( + { + slug: 'final', + dependsOn: ['step1', 'step2'], + if: { step1: { ready: true }, step2: { valid: true } }, + }, + (deps) => deps.step1.ready && deps.step2.valid + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('array step if', () => { + it('should type if for root array step', () => { + type FlowInput = { items: string[]; enabled: boolean }; + + const flow = new Flow({ slug: 'test_flow' }).array( + { slug: 'getItems', if: { enabled: true } }, + (input) => input.items + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should type if for dependent array step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ ready: true, items: ['a', 'b'] })) + .array( + { + slug: 'process', + dependsOn: ['fetch'], + if: { fetch: { ready: true } }, + }, + (deps) => deps.fetch.items + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('map step if', () => { + it('should type if for root map step', () => { + type FlowInput = { type: string; value: number }[]; + + const flow = new Flow({ slug: 'test_flow' }).map( + // Root map if checks the array itself + { slug: 'process', if: [{ type: 'active' }] }, + (item) => item.value * 2 + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should type if for dependent map step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => [ + { id: 1, active: true }, + { id: 2, active: false }, + ]) + .map( + { + slug: 'process', + array: 'fetch', + // Condition checks the array dep + if: { fetch: [{ active: true }] }, + }, + (item) => item.id + ); + + expectTypeOf(flow).toBeObject(); + }); + }); +}); + +describe('ifNot option typing in step methods', () => { + describe('root step ifNot', () => { + it('should type ifNot as ContainmentPattern', () => { + type FlowInput = { userId: string; role: string }; + + // This should compile - valid partial pattern + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'check', ifNot: { role: 'admin' } }, + (input) => input.userId + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should reject invalid keys in ifNot', () => { + type FlowInput = { userId: string; role: string }; + + // @ts-expect-error - 'invalidKey' does not exist on FlowInput + new Flow({ slug: 'test_flow' }).step( + { slug: 'check', ifNot: { invalidKey: 'value' } }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (input: any) => input.userId + ); + }); + + it('should reject wrong value types in ifNot', () => { + type FlowInput = { userId: string; role: string }; + + // @ts-expect-error - role should be string, not number + new Flow({ slug: 'test_flow' }).step( + { slug: 'check', ifNot: { role: 123 } }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (input: any) => input.userId + ); + }); + + it('should allow combined if and ifNot', () => { + type FlowInput = { role: string; active: boolean; suspended?: boolean }; + + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'admin_action', + if: { role: 'admin', active: true }, + ifNot: { suspended: true }, + }, + (input) => input.role + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('dependent step ifNot', () => { + it('should type ifNot as ContainmentPattern', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ hasError: true, data: 'result' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + ifNot: { fetch: { hasError: true } }, + }, + (deps) => deps.fetch.data + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should reject invalid dep slug in ifNot', () => { + new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ status: 'ok' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + // @ts-expect-error - 'nonexistent' is not a dependency + ifNot: { nonexistent: { status: 'error' } }, + }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (deps: any) => deps.fetch.status + ); + }); + + it('should reject invalid keys within dep output for ifNot', () => { + new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ status: 'ok' })) + .step( + { + slug: 'process', + dependsOn: ['fetch'], + // @ts-expect-error - 'invalidField' does not exist on fetch output + ifNot: { fetch: { invalidField: 'value' } }, + }, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (deps: any) => deps.fetch.status + ); + }); + }); + + describe('array step ifNot', () => { + it('should type ifNot for root array step', () => { + type FlowInput = { items: string[]; disabled: boolean }; + + const flow = new Flow({ slug: 'test_flow' }).array( + { slug: 'getItems', ifNot: { disabled: true } }, + (input) => input.items + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should type ifNot for dependent array step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => ({ error: false, items: ['a', 'b'] })) + .array( + { + slug: 'process', + dependsOn: ['fetch'], + ifNot: { fetch: { error: true } }, + }, + (deps) => deps.fetch.items + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('map step ifNot', () => { + it('should type ifNot for root map step', () => { + type FlowInput = { type: string; value: number }[]; + + const flow = new Flow({ slug: 'test_flow' }).map( + // Root map ifNot checks the array itself + { slug: 'process', ifNot: [{ type: 'disabled' }] }, + (item) => item.value * 2 + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should type ifNot for dependent map step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }) + .step({ slug: 'fetch' }, () => [ + { id: 1, deleted: false }, + { id: 2, deleted: true }, + ]) + .map( + { + slug: 'process', + array: 'fetch', + // Condition checks the array dep + ifNot: { fetch: [{ deleted: true }] }, + }, + (item) => item.id + ); + + expectTypeOf(flow).toBeObject(); + }); + }); +}); + +describe('whenUnmet requires if or ifNot', () => { + describe('step method', () => { + it('should allow whenUnmet with if', () => { + type FlowInput = { role: string }; + + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'admin', if: { role: 'admin' }, whenUnmet: 'skip' }, + (input) => input.role + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should allow whenUnmet with ifNot', () => { + type FlowInput = { role: string }; + + const flow = new Flow({ slug: 'test_flow' }).step( + { slug: 'non_admin', ifNot: { role: 'admin' }, whenUnmet: 'skip' }, + (input) => input.role + ); + + expectTypeOf(flow).toBeObject(); + }); + + it('should allow whenUnmet with both if and ifNot', () => { + type FlowInput = { role: string; suspended: boolean }; + + const flow = new Flow({ slug: 'test_flow' }).step( + { + slug: 'active_admin', + if: { role: 'admin' }, + ifNot: { suspended: true }, + whenUnmet: 'skip-cascade', + }, + (input) => input.role + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('array method', () => { + it('should allow whenUnmet with if on array step', () => { + type FlowInput = { items: string[]; enabled: boolean }; + + const flow = new Flow({ slug: 'test_flow' }).array( + { slug: 'getItems', if: { enabled: true }, whenUnmet: 'skip' }, + (input) => input.items + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('map method', () => { + it('should allow whenUnmet with ifNot on map step', () => { + type FlowInput = { type: string; value: number }[]; + + const flow = new Flow({ slug: 'test_flow' }).map( + { slug: 'process', ifNot: [{ type: 'disabled' }], whenUnmet: 'skip' }, + (item) => item.value + ); + + expectTypeOf(flow).toBeObject(); + }); + }); + + describe('whenUnmet rejection tests', () => { + it('should reject whenUnmet without if or ifNot on root step', () => { + type FlowInput = { role: string }; + + new Flow({ slug: 'test_flow' }) + // @ts-expect-error - whenUnmet requires if or ifNot + .step({ slug: 'step', whenUnmet: 'skip' }, (input) => input.role); + }); + + it('should reject whenUnmet without if or ifNot on dependent step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }).step( + { slug: 'first' }, + () => ({ done: true }) + ); + + // @ts-expect-error - whenUnmet requires if or ifNot + flow.step( + { slug: 'second', dependsOn: ['first'], whenUnmet: 'skip' }, + // Handler typed as any to suppress cascading error from failed overload + (deps: any) => deps.first.done + ); + }); + + it('should reject whenUnmet without if or ifNot on root array step', () => { + type FlowInput = { items: string[] }; + + new Flow({ slug: 'test_flow' }) + // @ts-expect-error - whenUnmet requires if or ifNot + .array({ slug: 'getItems', whenUnmet: 'skip' }, (input) => input.items); + }); + + it('should reject whenUnmet without if or ifNot on dependent array step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }).step( + { slug: 'fetch' }, + () => ({ items: ['a', 'b'] }) + ); + + // @ts-expect-error - whenUnmet requires if or ifNot + flow.array( + { slug: 'process', dependsOn: ['fetch'], whenUnmet: 'skip' }, + // Handler typed as any to suppress cascading error from failed overload + (deps: any) => deps.fetch.items + ); + }); + + it('should reject whenUnmet without if or ifNot on root map step', () => { + type FlowInput = { value: number }[]; + + new Flow({ slug: 'test_flow' }) + // @ts-expect-error - whenUnmet requires if or ifNot + .map({ slug: 'process', whenUnmet: 'skip' }, (item) => item.value); + }); + + it('should reject whenUnmet without if or ifNot on dependent map step', () => { + const flow = new Flow<{ initial: string }>({ slug: 'test_flow' }).step( + { slug: 'fetch' }, + () => [{ id: 1 }, { id: 2 }] + ); + + // @ts-expect-error - whenUnmet requires if or ifNot + flow.map( + { slug: 'process', array: 'fetch', whenUnmet: 'skip' }, + // Handler typed as any to suppress cascading error from failed overload + (item: any) => item.id + ); + }); + }); +}); diff --git a/pkgs/dsl/__tests__/types/extract-flow-steps.test-d.ts b/pkgs/dsl/__tests__/types/extract-flow-steps.test-d.ts index 6b8143703..ed5f3f92e 100644 --- a/pkgs/dsl/__tests__/types/extract-flow-steps.test-d.ts +++ b/pkgs/dsl/__tests__/types/extract-flow-steps.test-d.ts @@ -1,8 +1,10 @@ -import { Flow, type ExtractFlowSteps } from '../../src/index.js'; +import { Flow, type ExtractFlowSteps, type StepOutput } from '../../src/index.js'; import { describe, it, expectTypeOf } from 'vitest'; +// ExtractFlowSteps returns step slugs as keys +// Use StepOutput<> to get the output type from a step describe('ExtractFlowSteps utility type', () => { - it('should correctly extract steps from a flow with defined input', () => { + it('should correctly extract step slugs from a flow', () => { const flow = new Flow<{ userId: number }>({ slug: 'user_flow' }) .step({ slug: 'fetchUser' }, () => ({ name: 'John', age: 30 })) .step({ slug: 'fetchPosts', dependsOn: ['fetchUser'] }, () => [ @@ -12,15 +14,17 @@ describe('ExtractFlowSteps utility type', () => { type Steps = ExtractFlowSteps; - expectTypeOf().toMatchTypeOf<{ - fetchUser: { name: string; age: number }; - fetchPosts: Array<{ id: number; title: string }>; - }>(); + // Keys are step slugs + expectTypeOf().toEqualTypeOf<'fetchUser' | 'fetchPosts'>(); - // ensure it doesn't extract non-existent fields - expectTypeOf().not.toMatchTypeOf<{ - nonExistentStep: number; + // Use StepOutput to get output types (public API) + expectTypeOf>().toMatchTypeOf<{ + name: string; + age: number; }>(); + expectTypeOf>().toMatchTypeOf< + Array<{ id: number; title: string }> + >(); }); it('should work with AnyFlow to extract steps from a generic flow', () => { @@ -31,15 +35,14 @@ describe('ExtractFlowSteps utility type', () => { type Steps = ExtractFlowSteps; - expectTypeOf().toMatchTypeOf<{ - step1: number; - step2: string; - step3: { complex: { nested: boolean } }; - }>(); + // Keys are step slugs + expectTypeOf().toEqualTypeOf<'step1' | 'step2' | 'step3'>(); - // ensure it doesn't extract non-existent fields - expectTypeOf().not.toMatchTypeOf<{ - nonExistentStep: number; + // Use StepOutput to verify output types + expectTypeOf>().toEqualTypeOf(); + expectTypeOf>().toEqualTypeOf(); + expectTypeOf>().toMatchTypeOf<{ + complex: { nested: boolean }; }>(); }); @@ -59,16 +62,15 @@ describe('ExtractFlowSteps utility type', () => { type Steps = ExtractFlowSteps; - expectTypeOf().toMatchTypeOf<{ - numberStep: number; - stringStep: string; - booleanStep: boolean; - nullStep: null; - }>(); + // Keys are step slugs + expectTypeOf().toEqualTypeOf< + 'numberStep' | 'stringStep' | 'booleanStep' | 'nullStep' + >(); - // ensure it doesn't extract non-existent fields - expectTypeOf().not.toMatchTypeOf<{ - nonExistentStep: number; - }>(); + // Use StepOutput to verify output types + expectTypeOf>().toEqualTypeOf(); + expectTypeOf>().toEqualTypeOf(); + expectTypeOf>().toEqualTypeOf(); + expectTypeOf>().toEqualTypeOf(); }); }); diff --git a/pkgs/dsl/__tests__/types/map-method.test-d.ts b/pkgs/dsl/__tests__/types/map-method.test-d.ts index cb52f264a..960ef03ef 100644 --- a/pkgs/dsl/__tests__/types/map-method.test-d.ts +++ b/pkgs/dsl/__tests__/types/map-method.test-d.ts @@ -1,4 +1,4 @@ -import { Flow, type Json, type StepInput, type ExtractFlowContext } from '../../src/index.js'; +import { Flow, type Json, type StepInput, type ExtractFlowContext, type ExtractFlowSteps } from '../../src/index.js'; import { describe, it, expectTypeOf } from 'vitest'; describe('.map() method type constraints', () => { @@ -11,9 +11,7 @@ describe('.map() method type constraints', () => { }); // The map step should return an array of the handler return type - type ProcessOutput = typeof flow extends Flow - ? Steps['process'] - : never; + type ProcessOutput = ExtractFlowSteps['process']; expectTypeOf().toEqualTypeOf<{ processed: string }[]>(); }); @@ -34,9 +32,7 @@ describe('.map() method type constraints', () => { return item.length; }); - type FlattenOutput = typeof flow extends Flow - ? Steps['flatten'] - : never; + type FlattenOutput = ExtractFlowSteps['flatten']; expectTypeOf().toEqualTypeOf(); }); @@ -47,9 +43,7 @@ describe('.map() method type constraints', () => { return String(item); }); - type StringifyOutput = typeof flow extends Flow - ? Steps['stringify'] - : never; + type StringifyOutput = ExtractFlowSteps['stringify']; expectTypeOf().toEqualTypeOf(); }); }); @@ -65,9 +59,7 @@ describe('.map() method type constraints', () => { return item * 2; }); - type DoubleOutput = typeof flow extends Flow - ? Steps['double'] - : never; + type DoubleOutput = ExtractFlowSteps['double']; expectTypeOf().toEqualTypeOf(); }); @@ -105,9 +97,7 @@ describe('.map() method type constraints', () => { return user.name; }); - type NamesOutput = typeof flow extends Flow - ? Steps['extractNames'] - : never; + type NamesOutput = ExtractFlowSteps['extractNames']; expectTypeOf().toEqualTypeOf(); }); }); @@ -149,9 +139,7 @@ describe('.map() method type constraints', () => { return item.length; }); - type LengthsOutput = typeof flow extends Flow - ? Steps['lengths'] - : never; + type LengthsOutput = ExtractFlowSteps['lengths']; expectTypeOf().toEqualTypeOf(); }); @@ -163,9 +151,7 @@ describe('.map() method type constraints', () => { return deps.double.reduce((a, b) => a + b, 0); }); - type SumOutput = typeof flow extends Flow - ? Steps['sum'] - : never; + type SumOutput = ExtractFlowSteps['sum']; expectTypeOf().toEqualTypeOf(); }); }); @@ -239,9 +225,7 @@ describe('.map() method type constraints', () => { return String(item); }); - type StringifyOutput = typeof flow extends Flow - ? Steps['stringify'] - : never; + type StringifyOutput = ExtractFlowSteps['stringify']; expectTypeOf().toEqualTypeOf(); }); @@ -252,9 +236,7 @@ describe('.map() method type constraints', () => { return item !== null; }); - type FilterOutput = typeof flow extends Flow - ? Steps['filter'] - : never; + type FilterOutput = ExtractFlowSteps['filter']; expectTypeOf().toEqualTypeOf(); }); }); diff --git a/pkgs/dsl/__tests__/types/map-return-type-inference.test-d.ts b/pkgs/dsl/__tests__/types/map-return-type-inference.test-d.ts index 907be82c5..a266fe01c 100644 --- a/pkgs/dsl/__tests__/types/map-return-type-inference.test-d.ts +++ b/pkgs/dsl/__tests__/types/map-return-type-inference.test-d.ts @@ -1,4 +1,4 @@ -import { Flow } from '../../src/index.js'; +import { Flow, type ExtractFlowSteps } from '../../src/index.js'; import { describe, it, expectTypeOf } from 'vitest'; describe('map step return type inference bug', () => { @@ -38,9 +38,7 @@ describe('map step return type inference bug', () => { ); // Verify the map step output type is not any[] - type ProcessChunksOutput = typeof flow extends Flow - ? Steps['processChunks'] - : never; + type ProcessChunksOutput = ExtractFlowSteps['processChunks']; expectTypeOf().not.toEqualTypeOf(); }); @@ -73,9 +71,7 @@ describe('map step return type inference bug', () => { return { ok: true }; }); - type TransformOutput = typeof flow extends Flow - ? Steps['transform'] - : never; + type TransformOutput = ExtractFlowSteps['transform']; expectTypeOf().toEqualTypeOf(); expectTypeOf().not.toEqualTypeOf(); @@ -100,9 +96,7 @@ describe('map step return type inference bug', () => { return { done: true }; }); - type ProcessOutput = typeof flow extends Flow - ? Steps['process'] - : never; + type ProcessOutput = ExtractFlowSteps['process']; expectTypeOf().not.toEqualTypeOf(); }); @@ -127,9 +121,7 @@ describe('map step return type inference bug', () => { return { ok: true }; }); - type TransformOutput = typeof flow extends Flow - ? Steps['transform'] - : never; + type TransformOutput = ExtractFlowSteps['transform']; expectTypeOf().toEqualTypeOf<{ value: string; length: number }[]>(); expectTypeOf().not.toEqualTypeOf(); @@ -155,9 +147,7 @@ describe('map step return type inference bug', () => { return { count: deps.uppercase.length }; }); - type UppercaseOutput = typeof flow extends Flow - ? Steps['uppercase'] - : never; + type UppercaseOutput = ExtractFlowSteps['uppercase']; expectTypeOf().toEqualTypeOf<{ original: string; transformed: string }[]>(); expectTypeOf().not.toEqualTypeOf(); diff --git a/pkgs/dsl/__tests__/types/skippable-deps.test-d.ts b/pkgs/dsl/__tests__/types/skippable-deps.test-d.ts new file mode 100644 index 000000000..eba36fd6d --- /dev/null +++ b/pkgs/dsl/__tests__/types/skippable-deps.test-d.ts @@ -0,0 +1,562 @@ +import { Flow, type StepInput, type StepOutput } from '../../src/index.js'; +import { describe, it, expectTypeOf } from 'vitest'; + +/** + * Type tests for skippable step dependencies + * + * When a step has `whenUnmet: 'skip' | 'skip-cascade'` or `retriesExhausted: 'skip' | 'skip-cascade'`, + * it may not execute. Dependent steps should receive that step's output as an optional key. + */ + +describe('skippable deps type safety', () => { + describe('core skippability - whenUnmet', () => { + it('step with whenUnmet: skip makes output optional for dependents', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'conditional', if: { value: 42 }, whenUnmet: 'skip' }, + (input) => ({ result: input.value * 2 }) + ) + .step({ slug: 'dependent', dependsOn: ['conditional'] }, (deps) => { + // conditional should be optional - can't access without null check + expectTypeOf(deps.conditional).toEqualTypeOf< + { result: number } | undefined + >(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + conditional?: { result: number }; + }>(); + }); + + it('step with whenUnmet: skip-cascade keeps output required (cascade skips dependents)', () => { + // skip-cascade means if the step is skipped, its dependents are ALSO skipped + // So if the dependent handler runs at all, the parent must have succeeded + // Therefore the dependency should be required, not optional + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'conditional', if: { value: 42 }, whenUnmet: 'skip-cascade' }, + (input) => ({ result: input.value * 2 }) + ) + .step({ slug: 'dependent', dependsOn: ['conditional'] }, (deps) => { + // With skip-cascade, if we're running, the dependency succeeded + expectTypeOf(deps.conditional).toEqualTypeOf<{ result: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + conditional: { result: number }; + }>(); + }); + + it('step with whenUnmet: fail keeps output required (default behavior)', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'conditional', if: { value: 42 }, whenUnmet: 'fail' }, + (input) => ({ result: input.value * 2 }) + ) + .step({ slug: 'dependent', dependsOn: ['conditional'] }, (deps) => { + // whenUnmet: 'fail' means step either runs or flow fails - output is guaranteed + expectTypeOf(deps.conditional).toEqualTypeOf<{ result: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + conditional: { result: number }; + }>(); + }); + + it('step without whenUnmet keeps output required', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'normal' }, (input) => ({ result: input.value * 2 })) + .step({ slug: 'dependent', dependsOn: ['normal'] }, (deps) => { + expectTypeOf(deps.normal).toEqualTypeOf<{ result: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + normal: { result: number }; + }>(); + }); + }); + + describe('core skippability - retriesExhausted', () => { + it('step with retriesExhausted: skip makes output optional for dependents', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'risky', retriesExhausted: 'skip' }, (input) => ({ + result: input.value * 2, + })) + .step({ slug: 'dependent', dependsOn: ['risky'] }, (deps) => { + expectTypeOf(deps.risky).toEqualTypeOf< + { result: number } | undefined + >(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + risky?: { result: number }; + }>(); + }); + + it('step with retriesExhausted: skip-cascade keeps output required (cascade skips dependents)', () => { + // skip-cascade means if the step is skipped, its dependents are ALSO skipped + // So if the dependent handler runs at all, the parent must have succeeded + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'risky', retriesExhausted: 'skip-cascade' }, (input) => ({ + result: input.value * 2, + })) + .step({ slug: 'dependent', dependsOn: ['risky'] }, (deps) => { + // With skip-cascade, if we're running, the dependency succeeded + expectTypeOf(deps.risky).toEqualTypeOf<{ result: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + risky: { result: number }; + }>(); + }); + + it('step with retriesExhausted: fail keeps output required', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'risky', retriesExhausted: 'fail' }, (input) => ({ + result: input.value * 2, + })) + .step({ slug: 'dependent', dependsOn: ['risky'] }, (deps) => { + expectTypeOf(deps.risky).toEqualTypeOf<{ result: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + risky: { result: number }; + }>(); + }); + }); + + describe('multiple dependencies - mixed skippability', () => { + it('mixed deps: some optional, some required', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'skippable', if: { value: 42 }, whenUnmet: 'skip' }, + () => ({ + a: 1, + }) + ) + .step({ slug: 'required' }, () => ({ b: 2 })) + .step( + { slug: 'dependent', dependsOn: ['skippable', 'required'] }, + (deps) => { + expectTypeOf(deps.skippable).toEqualTypeOf< + { a: number } | undefined + >(); + expectTypeOf(deps.required).toEqualTypeOf<{ b: number }>(); + return { done: true }; + } + ); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + skippable?: { a: number }; + required: { b: number }; + }>(); + }); + + it('all deps skippable: all optional', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'skip1', retriesExhausted: 'skip' }, () => ({ a: 1 })) + .step({ slug: 'skip2', retriesExhausted: 'skip' }, () => ({ b: 2 })) + .step({ slug: 'dependent', dependsOn: ['skip1', 'skip2'] }, (deps) => { + expectTypeOf(deps.skip1).toEqualTypeOf<{ a: number } | undefined>(); + expectTypeOf(deps.skip2).toEqualTypeOf<{ b: number } | undefined>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + skip1?: { a: number }; + skip2?: { b: number }; + }>(); + }); + + it('all deps required: none optional', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'req1' }, () => ({ a: 1 })) + .step({ slug: 'req2' }, () => ({ b: 2 })) + .step({ slug: 'dependent', dependsOn: ['req1', 'req2'] }, (deps) => { + expectTypeOf(deps.req1).toEqualTypeOf<{ a: number }>(); + expectTypeOf(deps.req2).toEqualTypeOf<{ b: number }>(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + req1: { a: number }; + req2: { b: number }; + }>(); + }); + }); + + describe('chains and graphs', () => { + it('chain A(skip) -> B -> C: A optional in B, B required in C', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'a', retriesExhausted: 'skip' }, () => ({ aVal: 1 })) + .step({ slug: 'b', dependsOn: ['a'] }, (deps) => { + expectTypeOf(deps.a).toEqualTypeOf<{ aVal: number } | undefined>(); + return { bVal: 2 }; + }) + .step({ slug: 'c', dependsOn: ['b'] }, (deps) => { + // B is not skippable, so B's output is required + expectTypeOf(deps.b).toEqualTypeOf<{ bVal: number }>(); + return { cVal: 3 }; + }); + + type BInput = StepInput; + expectTypeOf().toEqualTypeOf<{ a?: { aVal: number } }>(); + + type CInput = StepInput; + expectTypeOf().toEqualTypeOf<{ b: { bVal: number } }>(); + }); + + it('diamond: A(skip) -> B, A -> C, B+C -> D: A optional in B and C', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'a', retriesExhausted: 'skip' }, () => ({ aVal: 1 })) + .step({ slug: 'b', dependsOn: ['a'] }, (deps) => { + expectTypeOf(deps.a).toEqualTypeOf<{ aVal: number } | undefined>(); + return { bVal: 2 }; + }) + .step({ slug: 'c', dependsOn: ['a'] }, (deps) => { + expectTypeOf(deps.a).toEqualTypeOf<{ aVal: number } | undefined>(); + return { cVal: 3 }; + }) + .step({ slug: 'd', dependsOn: ['b', 'c'] }, (deps) => { + // B and C are not skippable themselves + expectTypeOf(deps.b).toEqualTypeOf<{ bVal: number }>(); + expectTypeOf(deps.c).toEqualTypeOf<{ cVal: number }>(); + return { dVal: 4 }; + }); + + type BInput = StepInput; + expectTypeOf().toEqualTypeOf<{ a?: { aVal: number } }>(); + + type CInput = StepInput; + expectTypeOf().toEqualTypeOf<{ a?: { aVal: number } }>(); + + type DInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + b: { bVal: number }; + c: { cVal: number }; + }>(); + }); + + it('cascade does NOT propagate: A(skip-cascade) -> B: B sees A as required', () => { + // skip-cascade means A and its dependents get skipped at RUNTIME + // If A is skipped, B is also skipped (cascade), so B never runs with undefined A + // Therefore B should see A as required, not optional + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'a', retriesExhausted: 'skip-cascade' }, () => ({ + aVal: 1, + })) + .step({ slug: 'b', dependsOn: ['a'] }, (deps) => { + // With skip-cascade, if B runs, A must have succeeded + expectTypeOf(deps.a).toEqualTypeOf<{ aVal: number }>(); + return { bVal: 2 }; + }) + .step({ slug: 'c', dependsOn: ['b'] }, (deps) => { + // B is not skippable in its own definition, so its output is required + expectTypeOf(deps.b).toEqualTypeOf<{ bVal: number }>(); + return { cVal: 3 }; + }); + + type BInput = StepInput; + expectTypeOf().toEqualTypeOf<{ a: { aVal: number } }>(); + + type CInput = StepInput; + expectTypeOf().toEqualTypeOf<{ b: { bVal: number } }>(); + }); + }); + + describe('edge cases', () => { + it('root step with skip: valid config, no dependents affected (no deps)', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }).step( + { slug: 'root', retriesExhausted: 'skip' }, + (input) => ({ result: input.value }) + ); + + // Root step has no deps, so StepInput is the flow input + type RootInput = StepInput; + expectTypeOf().toEqualTypeOf<{ value: number }>(); + }); + + it('map step with skip: entire output array is optional type', () => { + const flow = new Flow({ slug: 'test' }) + .map({ slug: 'process', retriesExhausted: 'skip' }, (item) => + item.toUpperCase() + ) + .step({ slug: 'aggregate', dependsOn: ['process'] }, (deps) => { + expectTypeOf(deps.process).toEqualTypeOf(); + return { done: true }; + }); + + type AggInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + process?: string[]; + }>(); + }); + + it('array step with skip: entire output array is optional type', () => { + const flow = new Flow<{ count: number }>({ slug: 'test' }) + .array({ slug: 'generate', retriesExhausted: 'skip' }, (input) => + Array(input.count) + .fill(0) + .map((_, i) => i) + ) + .step({ slug: 'sum', dependsOn: ['generate'] }, (deps) => { + expectTypeOf(deps.generate).toEqualTypeOf(); + return { done: true }; + }); + + type SumInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + generate?: number[]; + }>(); + }); + + it('both whenUnmet and retriesExhausted set: still skippable', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step( + { + slug: 'both', + if: { value: 42 }, + whenUnmet: 'skip', + retriesExhausted: 'skip', + }, + () => ({ result: 1 }) + ) + .step({ slug: 'dependent', dependsOn: ['both'] }, (deps) => { + expectTypeOf(deps.both).toEqualTypeOf< + { result: number } | undefined + >(); + return { done: true }; + }); + + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ + both?: { result: number }; + }>(); + }); + }); + + describe('type inference and narrowing', () => { + it('cannot access property on optional dep without null check', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'skippable', retriesExhausted: 'skip' }, () => ({ + foo: 'bar', + })) + .step({ slug: 'dependent', dependsOn: ['skippable'] }, (deps) => { + // Direct property access should be a compile error - we test via runtime pattern + // The type system should make deps.skippable potentially undefined + expectTypeOf(deps.skippable).toEqualTypeOf< + { foo: string } | undefined + >(); + return { done: true }; + }); + + // Type verification + type DepInput = StepInput; + expectTypeOf().toEqualTypeOf<{ skippable?: { foo: string } }>(); + }); + + it('type narrowing works after existence check', () => { + new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'skippable', retriesExhausted: 'skip' }, () => ({ + foo: 'bar', + })) + .step({ slug: 'dependent', dependsOn: ['skippable'] }, (deps) => { + if (deps.skippable) { + // After narrowing, foo is accessible + expectTypeOf(deps.skippable.foo).toEqualTypeOf(); + } + return { done: true }; + }); + }); + + it('handler receives correctly typed deps object', () => { + new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'skip1', retriesExhausted: 'skip' }, () => ({ a: 1 })) + .step({ slug: 'req1' }, () => ({ b: 'str' })) + .step({ slug: 'dependent', dependsOn: ['skip1', 'req1'] }, (deps) => { + // Handler parameter should have correct mixed optionality + expectTypeOf(deps).toEqualTypeOf<{ + skip1?: { a: number }; + req1: { b: string }; + }>(); + return { done: true }; + }); + }); + }); + + describe('utility types', () => { + it('StepOutput returns output type (not metadata)', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'normal' }, () => ({ result: 42 })) + .step({ slug: 'skippable', retriesExhausted: 'skip' }, () => ({ + other: 'str', + })); + + // StepOutput should return the actual output type, not the metadata structure + type NormalOutput = StepOutput; + expectTypeOf().toEqualTypeOf<{ result: number }>(); + + type SkippableOutput = StepOutput; + expectTypeOf().toEqualTypeOf<{ other: string }>(); + }); + + it('keyof ExtractFlowSteps still returns slug union', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'a' }, () => 1) + .step({ slug: 'b', retriesExhausted: 'skip' }, () => 2) + .step({ slug: 'c', dependsOn: ['a', 'b'] }, () => 3); + + type StepSlugs = keyof import('../../src/index.js').ExtractFlowSteps< + typeof flow + >; + expectTypeOf().toEqualTypeOf<'a' | 'b' | 'c'>(); + }); + }); + + describe('dependent map with skippable array source', () => { + it('dependent map on skippable array: deps should be optional', () => { + const flow = new Flow<{ value: number }>({ slug: 'test' }) + .array({ slug: 'items', retriesExhausted: 'skip' }, () => [1, 2, 3]) + .map({ slug: 'double', array: 'items' }, (item) => item * 2) + .step({ slug: 'sum', dependsOn: ['double'] }, (deps) => { + // The map step itself doesn't have skip, but its source does + // This is an interesting edge case - map depends on skippable array + // For now, map's own skippability determines its output optionality + expectTypeOf(deps.double).toEqualTypeOf(); + return { done: true }; + }); + + type SumInput = StepInput; + expectTypeOf().toEqualTypeOf<{ double: number[] }>(); + }); + }); +}); + +/** + * Compile-time error tests for skippable deps + * + * These tests use @ts-expect-error to verify that TypeScript correctly + * rejects invalid patterns when accessing skippable dependencies. + */ +describe('skippable deps compile-time errors', () => { + describe('direct property access on optional deps', () => { + it('should reject direct property access on skippable dep without null check', () => { + new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'maybeSkipped', retriesExhausted: 'skip' }, () => ({ + data: 'result', + })) + .step({ slug: 'consumer', dependsOn: ['maybeSkipped'] }, (deps) => { + // @ts-expect-error - deps.maybeSkipped is optional, cannot access .data directly + const result: string = deps.maybeSkipped.data; + return { result }; + }); + }); + + it('should reject direct property access with whenUnmet: skip', () => { + new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'conditional', if: { value: 42 }, whenUnmet: 'skip' }, + () => ({ processed: true }) + ) + .step({ slug: 'next', dependsOn: ['conditional'] }, (deps) => { + // @ts-expect-error - deps.conditional is optional due to whenUnmet: skip + const flag: boolean = deps.conditional.processed; + return { flag }; + }); + }); + + it('should ALLOW direct property access with whenUnmet: skip-cascade (cascade skips dependents)', () => { + // With skip-cascade, if the dependent runs, the parent must have succeeded + // So direct property access should be allowed + new Flow<{ value: number }>({ slug: 'test' }) + .step( + { slug: 'cascading', if: { value: 42 }, whenUnmet: 'skip-cascade' }, + () => ({ count: 10 }) + ) + .step({ slug: 'next', dependsOn: ['cascading'] }, (deps) => { + // No error - deps.cascading is required with skip-cascade + const num: number = deps.cascading.count; + return { num }; + }); + }); + + it('should ALLOW direct property access with retriesExhausted: skip-cascade (cascade skips dependents)', () => { + // With skip-cascade, if the dependent runs, the parent must have succeeded + new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'risky', retriesExhausted: 'skip-cascade' }, () => ({ + status: 'ok', + })) + .step({ slug: 'next', dependsOn: ['risky'] }, (deps) => { + // No error - deps.risky is required with skip-cascade + const s: string = deps.risky.status; + return { s }; + }); + }); + }); + + describe('mixed deps - optional and required', () => { + it('should allow direct access on required dep but reject on optional', () => { + new Flow<{ value: number }>({ slug: 'test' }) + .step({ slug: 'required' }, () => ({ reqData: 'always' })) + .step({ slug: 'optional', retriesExhausted: 'skip' }, () => ({ + optData: 'maybe', + })) + .step( + { slug: 'consumer', dependsOn: ['required', 'optional'] }, + (deps) => { + // This is fine - required dep is always present + const req: string = deps.required.reqData; + + // @ts-expect-error - deps.optional is optional, cannot access .optData directly + const opt: string = deps.optional.optData; + + return { req, opt }; + } + ); + }); + }); + + describe('array and map steps with skip modes', () => { + it('should reject direct access on skippable array step output', () => { + new Flow<{ items: string[] }>({ slug: 'test' }) + .array({ slug: 'processed', retriesExhausted: 'skip' }, (input) => + input.items.map((s) => s.toUpperCase()) + ) + .step({ slug: 'consumer', dependsOn: ['processed'] }, (deps) => { + // @ts-expect-error - deps.processed is optional, cannot access .length directly + const len: number = deps.processed.length; + return { len }; + }); + }); + + it('should reject direct access on skippable map step output', () => { + new Flow({ slug: 'test' }) + .map( + { slug: 'doubled', retriesExhausted: 'skip' }, + (item) => item + item + ) + .step({ slug: 'consumer', dependsOn: ['doubled'] }, (deps) => { + // @ts-expect-error - deps.doubled is optional, cannot access [0] directly + const first: string = deps.doubled[0]; + return { first }; + }); + }); + }); +}); diff --git a/pkgs/dsl/src/compile-flow.ts b/pkgs/dsl/src/compile-flow.ts index bbf169fe1..1286e2203 100644 --- a/pkgs/dsl/src/compile-flow.ts +++ b/pkgs/dsl/src/compile-flow.ts @@ -43,7 +43,9 @@ export function compileFlow(flow: AnyFlow): string[] { /** * Formats runtime options into SQL parameter string */ -function formatRuntimeOptions(options: RuntimeOptions | StepRuntimeOptions): string { +function formatRuntimeOptions( + options: RuntimeOptions | StepRuntimeOptions +): string { const parts: string[] = []; if (options.maxAttempts !== undefined) { @@ -62,5 +64,25 @@ function formatRuntimeOptions(options: RuntimeOptions | StepRuntimeOptions): str parts.push(`start_delay => ${options.startDelay}`); } + if ('if' in options && options.if !== undefined) { + // Serialize JSON pattern and escape for SQL + const jsonStr = JSON.stringify(options.if); + parts.push(`required_input_pattern => '${jsonStr}'`); + } + + if ('ifNot' in options && options.ifNot !== undefined) { + // Serialize JSON pattern and escape for SQL + const jsonStr = JSON.stringify(options.ifNot); + parts.push(`forbidden_input_pattern => '${jsonStr}'`); + } + + if ('whenUnmet' in options && options.whenUnmet !== undefined) { + parts.push(`when_unmet => '${options.whenUnmet}'`); + } + + if ('retriesExhausted' in options && options.retriesExhausted !== undefined) { + parts.push(`when_failed => '${options.retriesExhausted}'`); + } + return parts.length > 0 ? `, ${parts.join(', ')}` : ''; } diff --git a/pkgs/dsl/src/dsl.ts b/pkgs/dsl/src/dsl.ts index 13d05d885..c4a44d501 100644 --- a/pkgs/dsl/src/dsl.ts +++ b/pkgs/dsl/src/dsl.ts @@ -16,14 +16,29 @@ export type Json = // Used to flatten the types of a union of objects for readability export type Simplify = { [KeyType in keyof T]: T[KeyType] } & {}; +/** + * ContainmentPattern - Type for JSON containment (@>) patterns + * + * Matches PostgreSQL's @> containment semantics where a pattern is a + * recursive partial structure that the target must contain: + * - Primitives: exact value match + * - Objects: all keys optional, recursively applied + * - Arrays: elements expected to be present in target array + */ +export type ContainmentPattern = T extends readonly (infer U)[] + ? ContainmentPattern[] // Array: elements expected to be present + : T extends object + ? { [K in keyof T]?: ContainmentPattern } // Object: all keys optional + : T; // Primitive: exact value match + // Utility that unwraps Promise and keeps plain values unchanged // Note: `any[]` is required here for proper type inference in conditional types // `unknown[]` would be too restrictive and break type matching type AwaitedReturn = T extends (...args: any[]) => Promise ? R : T extends (...args: any[]) => infer R - ? R - : never; + ? R + : never; // ======================== // ENVIRONMENT TYPE SYSTEM @@ -51,8 +66,22 @@ export type AnyInput = Json; export type AnyOutput = Json; // Step Types +// Skippable mode: 'skip' makes deps optional, 'skip-cascade' keeps deps required +// (because cascade skips dependents at runtime, so if handler runs, dep succeeded) +export type SkippableMode = 'skip' | 'skip-cascade' | false; + +// Step metadata structure - enriched type that tracks output and skippability +export interface StepMeta< + TOutput = AnyOutput, + TSkippable extends SkippableMode = SkippableMode +> { + output: TOutput; + skippable: TSkippable; +} + export type EmptySteps = Record; -export type AnySteps = Record; // Could use unknown if needed +// AnySteps now uses StepMeta structure for enriched step information +export type AnySteps = Record; // Dependency Types export type EmptyDeps = Record; @@ -99,7 +128,7 @@ export type ExtractFlowInput = TFlow extends Flow< * This creates a union of all step input types */ export type AllStepInputs = { - [K in keyof ExtractFlowSteps & string]: StepInput + [K in keyof ExtractFlowSteps & string]: StepInput; }[keyof ExtractFlowSteps & string]; /** @@ -121,7 +150,7 @@ export type ExtractFlowOutput = TFlow extends Flow< : never; /** - * Extracts the steps type from a Flow + * Extracts the steps type from a Flow (unwraps StepMeta to just output types) * @template TFlow - The Flow type to extract from */ export type ExtractFlowSteps = TFlow extends Flow< @@ -130,6 +159,20 @@ export type ExtractFlowSteps = TFlow extends Flow< infer TS, infer _TD, infer _TEnv +> + ? { [K in keyof TS]: TS[K]['output'] } + : never; + +/** + * Extracts the raw steps type from a Flow (includes StepMeta structure with skippable info) + * @template TFlow - The Flow type to extract from + */ +export type ExtractFlowStepsRaw = TFlow extends Flow< + infer _TI, + infer _TC, + infer TS, + infer _TD, + infer _TEnv > ? TS : never; @@ -204,10 +247,11 @@ export type CompatibleFlow< F extends AnyFlow, PlatformResources extends Record, UserResources extends Record = Record -> = - (FlowContext> & PlatformResources & UserResources) extends ExtractFlowContext - ? F - : never; +> = FlowContext> & + PlatformResources & + UserResources extends ExtractFlowContext + ? F + : never; /** * Extracts the dependencies type from a Flow @@ -222,6 +266,7 @@ type StepDepsOf< /** * Extracts only the leaf steps from a Flow (steps that are not dependencies of any other steps) + * Returns the output types, not the full StepMeta structure * @template TFlow - The Flow type to extract from */ export type ExtractFlowLeafSteps = { @@ -235,6 +280,7 @@ export type ExtractFlowLeafSteps = { // Utility type to extract the output type of a step handler from a Flow // Usage: // StepOutput +// Returns the output type (ExtractFlowSteps already unwraps StepMeta) export type StepOutput< TFlow extends AnyFlow, TStepSlug extends string @@ -242,23 +288,66 @@ export type StepOutput< ? ExtractFlowSteps[TStepSlug] : never; +/** + * Gets the skippable mode for a step ('skip' | 'skip-cascade' | false) + * @template TFlow - The Flow type + * @template TStepSlug - The step slug to check + */ +export type GetSkippableMode< + TFlow extends AnyFlow, + TStepSlug extends string +> = TStepSlug extends keyof ExtractFlowStepsRaw + ? ExtractFlowStepsRaw[TStepSlug]['skippable'] + : false; + +/** + * Checks if a step makes its dependents' deps optional (only 'skip' mode, not 'skip-cascade') + * With 'skip-cascade', dependents are also skipped at runtime, so if handler runs, dep succeeded. + */ +export type IsStepSkippable< + TFlow extends AnyFlow, + TStepSlug extends string +> = GetSkippableMode extends 'skip' ? true : false; + +// Helper types for StepInput with optional skippable deps +// Only 'skip' mode makes deps optional (dependents run with undefined value) +// 'skip-cascade' keeps deps required (dependents also skipped, so value guaranteed if running) +type RequiredDeps = { + [K in Extract< + keyof ExtractFlowSteps, + StepDepsOf + > as GetSkippableMode extends 'skip' + ? never + : K]: ExtractFlowSteps[K]; +}; + +type OptionalDeps = { + [K in Extract< + keyof ExtractFlowSteps, + StepDepsOf + > as GetSkippableMode extends 'skip' + ? K + : never]?: ExtractFlowSteps[K]; +}; + /** * Asymmetric step input type: * - Root steps (no dependencies): receive flow input directly * - Dependent steps: receive only their dependencies (flow input available via context) + * - Skippable deps (whenUnmet/retriesExhausted: 'skip') are optional + * - Cascade deps (whenUnmet/retriesExhausted: 'skip-cascade') are required + * (because if handler runs, the dependency must have succeeded) + * - All other deps are required * * This enables functional composition where subflows can receive typed inputs * without the 'run' wrapper that previously blocked type matching. */ -export type StepInput = - StepDepsOf extends never - ? ExtractFlowInput // Root step: flow input directly - : { - [K in Extract< - keyof ExtractFlowSteps, - StepDepsOf - >]: ExtractFlowSteps[K]; - }; // Dependent step: only deps +export type StepInput< + TFlow extends AnyFlow, + TStepSlug extends string +> = StepDepsOf extends never + ? ExtractFlowInput // Root step: flow input directly + : Simplify & OptionalDeps>; // Runtime options interface for flow-level options export interface RuntimeOptions { @@ -309,19 +398,223 @@ export interface BaseContext { * receive flow_input from SQL; other step types lazy-load it on demand. * Use `await ctx.flowInput` to access the original flow input. */ -export interface FlowContext extends BaseContext { +export interface FlowContext< + TEnv extends Env = Env, + TFlowInput extends AnyInput = AnyInput +> extends BaseContext { stepTask: StepTaskRecord; flowInput: Promise; } // Generic context type helper (uses FlowContext for flow handlers) -export type Context = Record, TEnv extends Env = Env> = FlowContext & T; +export type Context< + T extends Record = Record, + TEnv extends Env = Env +> = FlowContext & T; + +/** + * Options for handling unmet conditions (when 'if' pattern doesn't match input) + * + * @example + * // Fail the step (and run) when pattern doesn't match + * { if: { enabled: true }, whenUnmet: 'fail' } + * + * @example + * // Skip this step only when pattern doesn't match + * { if: { enabled: true }, whenUnmet: 'skip' } + * + * @example + * // Skip this step and all dependents when pattern doesn't match + * { if: { enabled: true }, whenUnmet: 'skip-cascade' } + * + * @remarks + * - `'fail'`: When pattern doesn't match, step fails -> run fails (default) + * - `'skip'`: When pattern doesn't match, skip step and continue (step key omitted from dependent inputs) + * - `'skip-cascade'`: When pattern doesn't match, skip step + mark all dependents as skipped + */ +export type WhenUnmetMode = 'fail' | 'skip' | 'skip-cascade'; + +/** + * Options for handling errors after all retries are exhausted + * + * @example + * // Fail the run after retries exhausted (default) + * { retriesExhausted: 'fail' } + * + * @example + * // Skip this step after retries exhausted, continue run + * { retriesExhausted: 'skip' } + * + * @example + * // Skip this step and all dependents after retries exhausted + * { retriesExhausted: 'skip-cascade' } + * + * @remarks + * - `'fail'`: Step fails -> run fails (default behavior) + * - `'skip'`: Mark step as skipped, continue run (step key omitted from dependent inputs) + * - `'skip-cascade'`: Skip step + mark all dependents as skipped too + * + * @note + * TYPE_VIOLATION errors (e.g., single step returns non-array for map dependent) + * are NOT subject to retriesExhausted - these always hard fail as they indicate + * programming errors, not runtime conditions. + */ +export type RetriesExhaustedMode = 'fail' | 'skip' | 'skip-cascade'; + +/** + * Helper type for dependent step handlers - creates deps object with correct optionality. + * Only steps with 'skip' mode (not 'skip-cascade') make deps optional. + * With 'skip-cascade', dependents are also skipped at runtime, so if handler runs, dep succeeded. + */ +type DepsWithOptionalSkippable< + TSteps extends AnySteps, + TDeps extends string +> = { + // Required deps: either not skippable or skip-cascade (cascade skips dependents, so value guaranteed) + [K in TDeps as K extends keyof TSteps + ? TSteps[K]['skippable'] extends 'skip' + ? never + : K + : K]: K extends keyof TSteps ? TSteps[K]['output'] : never; +} & { + // Optional deps: only 'skip' mode (dependents run with undefined value) + [K in TDeps as K extends keyof TSteps + ? TSteps[K]['skippable'] extends 'skip' + ? K + : never + : never]?: K extends keyof TSteps ? TSteps[K]['output'] : never; +}; // Step runtime options interface that extends flow options with step-specific options +// Note: 'if' is typed as Json here for internal storage; overloads provide type safety export interface StepRuntimeOptions extends RuntimeOptions { startDelay?: number; + + /** + * Pattern to match using PostgreSQL's @> (contains) operator + * + * @example + * // Root step: match against flow input + * { if: { role: 'admin', active: true } } + * + * @example + * // Dependent step: match against dependency outputs + * { if: { prevStep: { status: 'success' } } } + * + * @remarks + * - Primitives: exact value match + * - Objects: all keys optional, recursively applied + * - Arrays: elements expected to be present in target array + * + * @see WhenUnmetMode for controlling what happens when pattern doesn't match + */ + if?: Json; + + /** + * Negative pattern - step executes when input does NOT match this pattern + * + * @example + * // Root step: execute when NOT an admin + * { ifNot: { role: 'admin' } } + * + * @example + * // Combined with 'if' for AND semantics: "active admin who is NOT suspended" + * { if: { role: 'admin', active: true }, ifNot: { suspended: true } } + * + * @remarks + * - Uses PostgreSQL's @> containment check, negated + * - When combined with 'if', BOTH must pass (AND semantics) + * - For mutual exclusion: use same pattern with if on one step, ifNot on another + * + * @see WhenUnmetMode for controlling what happens when condition not met + */ + ifNot?: Json; + + /** + * What to do when the 'if' pattern doesn't match the input + * + * @default 'skip' + * + * @example + * { whenUnmet: 'fail' } // Pattern doesn't match -> step fails -> run fails + * { whenUnmet: 'skip' } // Pattern doesn't match -> skip step, continue run + * { whenUnmet: 'skip-cascade' } // Pattern doesn't match -> skip step + all dependents + * + * @see WhenUnmetMode for detailed documentation of each mode + */ + whenUnmet?: WhenUnmetMode; + + /** + * What to do when handler throws an error after all retries are exhausted + * + * @default 'fail' + * + * @example + * { retriesExhausted: 'fail' } // Step fails -> run fails + * { retriesExhausted: 'skip' } // Skip step, continue run + * { retriesExhausted: 'skip-cascade' } // Skip step + all dependents + * + * @remarks + * Only applies after maxAttempts retries are exhausted. + * TYPE_VIOLATION errors always fail regardless of this setting. + * + * @see RetriesExhaustedMode for detailed documentation of each mode + */ + retriesExhausted?: RetriesExhaustedMode; +} + +// Base runtime options without condition-related fields +interface BaseStepRuntimeOptions extends RuntimeOptions { + startDelay?: number; + retriesExhausted?: RetriesExhaustedMode; } +/** + * Condition with 'if' required (ifNot optional) - allows whenUnmet. + * whenUnmet only makes sense when there's a condition to be "unmet". + */ +type WithIfCondition = { + if: ContainmentPattern; + ifNot?: ContainmentPattern; + whenUnmet?: WhenUnmetMode; +}; + +/** + * Condition with 'ifNot' required (if optional) - allows whenUnmet. + */ +type WithIfNotCondition = { + if?: ContainmentPattern; + ifNot: ContainmentPattern; + whenUnmet?: WhenUnmetMode; +}; + +/** + * No condition - if, ifNot, and whenUnmet are all forbidden. + * This ensures whenUnmet can only be used with a condition. + */ +type WithoutCondition = { + if?: never; + ifNot?: never; + whenUnmet?: never; +}; + +/** + * Discriminated union for condition options. + * whenUnmet is only allowed when if or ifNot is provided. + */ +type ConditionOpts = + | WithIfCondition + | WithIfNotCondition + | WithoutCondition; + +// Typed step options for root steps (if/ifNot match FlowInput pattern) +export type RootStepOptions = BaseStepRuntimeOptions & + ConditionOpts; + +// Typed step options for dependent steps (if/ifNot match deps object pattern) +export type DependentStepOptions = BaseStepRuntimeOptions & + ConditionOpts; + // Define the StepDefinition interface with integrated options export interface StepDefinition< TInput extends AnyInput, @@ -383,6 +676,7 @@ export class Flow< * Returns the step definition with asymmetric input typing: * - Root steps (no dependencies): input is flowInput directly * - Dependent steps: input is deps object only (flowInput available via context) + * - Skippable deps are optional, required deps are required * * @throws Error if the step with the given slug doesn't exist */ @@ -391,12 +685,22 @@ export class Flow< ): StepDefinition< StepDependencies[SlugType] extends [] | readonly [] ? TFlowInput // Root step: flow input directly - : Simplify<{ - [K in StepDependencies[SlugType][number]]: K extends keyof Steps - ? Steps[K] - : never; - }>, // Dependent step: only deps - Steps[SlugType], + : Simplify< + { + [K in StepDependencies[SlugType][number] as K extends keyof Steps + ? Steps[K]['skippable'] extends true + ? never + : K + : never]: K extends keyof Steps ? Steps[K]['output'] : never; + } & { + [K in StepDependencies[SlugType][number] as K extends keyof Steps + ? Steps[K]['skippable'] extends true + ? K + : never + : never]?: K extends keyof Steps ? Steps[K]['output'] : never; + } + >, // Dependent step: only deps (skippable deps optional) + Steps[SlugType]['output'], FlowContext & TContext > { // Check if the slug exists in stepDefinitions using a more explicit pattern @@ -406,18 +710,30 @@ export class Flow< ); } - // Use a type assertion directive to tell TypeScript that this is safe - // @ts-expect-error The type system cannot track that this.stepDefinitions[slug] has the correct type - // but we know it's safe because we only add steps through the strongly-typed `step` method return this.stepDefinitions[slug as string]; } // Overload 1: Root step (no dependsOn) - receives flowInput directly + // if is typed as ContainmentPattern + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) step< Slug extends string, - TOutput + TOutput, + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug; dependsOn?: never } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + dependsOn?: never; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition & { whenUnmet?: TWhenUnmet }) + | (WithIfNotCondition & { whenUnmet?: TWhenUnmet }) + | WithoutCondition + ) & + Omit + >, handler: ( flowInput: TFlowInput, context: FlowContext & TContext @@ -425,27 +741,65 @@ export class Flow< ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: Awaited }, + Steps & { + [K in Slug]: StepMeta< + Awaited, + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: [] }, TEnv >; // Overload 2: Dependent step (with dependsOn) - receives deps, flowInput via context + // if is typed as ContainmentPattern // Note: [Deps, ...Deps[]] requires at least one dependency - empty arrays are rejected at compile time + // Handler receives deps with correct optionality based on upstream steps' skippability + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) step< Slug extends string, Deps extends Extract, - TOutput + TOutput, + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug; dependsOn: [Deps, ...Deps[]] } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + dependsOn: [Deps, ...Deps[]]; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition>> & { + whenUnmet?: TWhenUnmet; + }) + | (WithIfNotCondition< + Simplify> + > & { whenUnmet?: TWhenUnmet }) + | WithoutCondition + ) & + Omit + >, handler: ( - deps: { [K in Deps]: K extends keyof Steps ? Steps[K] : never }, + deps: Simplify>, context: FlowContext & TContext ) => TOutput | Promise ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: Awaited }, + Steps & { + [K in Slug]: StepMeta< + Awaited, + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: Deps[] }, TEnv >; @@ -477,6 +831,11 @@ export class Flow< if (opts.baseDelay !== undefined) options.baseDelay = opts.baseDelay; if (opts.timeout !== undefined) options.timeout = opts.timeout; if (opts.startDelay !== undefined) options.startDelay = opts.startDelay; + if (opts.if !== undefined) options.if = opts.if; + if (opts.ifNot !== undefined) options.ifNot = opts.ifNot; + if (opts.whenUnmet !== undefined) options.whenUnmet = opts.whenUnmet; + if (opts.retriesExhausted !== undefined) + options.retriesExhausted = opts.retriesExhausted; // Validate runtime options (optional for step level) validateRuntimeOptions(options, { optional: true }); @@ -520,11 +879,26 @@ export class Flow< * @returns A new Flow instance with the array step added */ // Overload 1: Root array (no dependsOn) - receives flowInput directly + // if is typed as ContainmentPattern + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) array< Slug extends string, - TOutput extends readonly any[] + TOutput extends readonly any[], + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug; dependsOn?: never } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + dependsOn?: never; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition & { whenUnmet?: TWhenUnmet }) + | (WithIfNotCondition & { whenUnmet?: TWhenUnmet }) + | WithoutCondition + ) & + Omit + >, handler: ( flowInput: TFlowInput, context: FlowContext & TContext @@ -532,27 +906,64 @@ export class Flow< ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: Awaited }, + Steps & { + [K in Slug]: StepMeta< + Awaited, + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: [] }, TEnv >; // Overload 2: Dependent array (with dependsOn) - receives deps, flowInput via context + // if is typed as ContainmentPattern // Note: [Deps, ...Deps[]] requires at least one dependency - empty arrays are rejected at compile time + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) array< Slug extends string, Deps extends Extract, - TOutput extends readonly any[] + TOutput extends readonly any[], + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug; dependsOn: [Deps, ...Deps[]] } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + dependsOn: [Deps, ...Deps[]]; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition>> & { + whenUnmet?: TWhenUnmet; + }) + | (WithIfNotCondition< + Simplify> + > & { whenUnmet?: TWhenUnmet }) + | WithoutCondition + ) & + Omit + >, handler: ( - deps: { [K in Deps]: K extends keyof Steps ? Steps[K] : never }, + deps: Simplify>, context: FlowContext & TContext ) => TOutput | Promise ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: Awaited }, + Steps & { + [K in Slug]: StepMeta< + Awaited, + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: Deps[] }, TEnv >; @@ -575,36 +986,93 @@ export class Flow< * @returns A new Flow instance with the map step added */ // Overload for root map - handler receives item, context includes flowInput + // if is typed as ContainmentPattern (checks the array itself) + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) map< Slug extends string, THandler extends TFlowInput extends readonly (infer Item)[] - ? (item: Item, context: FlowContext & TContext) => Json | Promise - : never + ? ( + item: Item, + context: FlowContext & TContext + ) => Json | Promise + : never, + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition & { whenUnmet?: TWhenUnmet }) + | (WithIfNotCondition & { whenUnmet?: TWhenUnmet }) + | WithoutCondition + ) & + Omit + >, handler: THandler ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: AwaitedReturn[] }, + Steps & { + [K in Slug]: StepMeta< + AwaitedReturn[], + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: [] }, TEnv >; // Overload for dependent map - handler receives item, context includes flowInput + // if is typed as ContainmentPattern<{ arrayDep: ArrayOutput }> (checks the dep object) + // whenUnmet is only allowed when if or ifNot is provided (enforced by ConditionOpts union) map< Slug extends string, TArrayDep extends Extract, - THandler extends Steps[TArrayDep] extends readonly (infer Item)[] - ? (item: Item, context: FlowContext & TContext) => Json | Promise - : never + THandler extends Steps[TArrayDep]['output'] extends readonly (infer Item)[] + ? ( + item: Item, + context: FlowContext & TContext + ) => Json | Promise + : never, + TWhenUnmet extends WhenUnmetMode | undefined = undefined, + TRetries extends RetriesExhaustedMode | undefined = undefined >( - opts: Simplify<{ slug: Slug extends keyof Steps ? never : Slug; array: TArrayDep } & StepRuntimeOptions>, + opts: Simplify< + { + slug: Slug extends keyof Steps ? never : Slug; + array: TArrayDep; + retriesExhausted?: TRetries; + } & ( + | (WithIfCondition<{ [K in TArrayDep]: Steps[K]['output'] }> & { + whenUnmet?: TWhenUnmet; + }) + | (WithIfNotCondition<{ [K in TArrayDep]: Steps[K]['output'] }> & { + whenUnmet?: TWhenUnmet; + }) + | WithoutCondition + ) & + Omit + >, handler: THandler ): Flow< TFlowInput, TContext, - Steps & { [K in Slug]: AwaitedReturn[] }, + Steps & { + [K in Slug]: StepMeta< + AwaitedReturn[], + TWhenUnmet extends 'skip' | 'skip-cascade' + ? TWhenUnmet + : TRetries extends 'skip' | 'skip-cascade' + ? TRetries + : false + >; + }, StepDependencies & { [K in Slug]: [TArrayDep] }, TEnv >; @@ -626,7 +1094,9 @@ export class Flow< if (arrayDep) { // Dependent map - validate single dependency exists and returns array if (!this.stepDefinitions[arrayDep]) { - throw new Error(`Step "${slug}" depends on undefined step "${arrayDep}"`); + throw new Error( + `Step "${slug}" depends on undefined step "${arrayDep}"` + ); } dependencies = [arrayDep]; } else { @@ -640,13 +1110,22 @@ export class Flow< if (opts.baseDelay !== undefined) options.baseDelay = opts.baseDelay; if (opts.timeout !== undefined) options.timeout = opts.timeout; if (opts.startDelay !== undefined) options.startDelay = opts.startDelay; + if (opts.if !== undefined) options.if = opts.if; + if (opts.ifNot !== undefined) options.ifNot = opts.ifNot; + if (opts.whenUnmet !== undefined) options.whenUnmet = opts.whenUnmet; + if (opts.retriesExhausted !== undefined) + options.retriesExhausted = opts.retriesExhausted; // Validate runtime options validateRuntimeOptions(options, { optional: true }); // Create the map step definition with stepType // Note: We use AnyInput/AnyOutput here because the actual types are handled at the type level via overloads - const newStepDefinition: StepDefinition = { + const newStepDefinition: StepDefinition< + AnyInput, + AnyOutput, + BaseContext & TContext + > = { slug, handler: handler as any, // Type assertion needed due to complex generic constraints dependencies, diff --git a/pkgs/dsl/src/flow-shape.ts b/pkgs/dsl/src/flow-shape.ts index d6ce295c7..108fc3ab6 100644 --- a/pkgs/dsl/src/flow-shape.ts +++ b/pkgs/dsl/src/flow-shape.ts @@ -1,9 +1,18 @@ -import { AnyFlow } from './dsl.js'; +import { AnyFlow, WhenUnmetMode, RetriesExhaustedMode, Json } from './dsl.js'; // ======================== // SHAPE TYPE DEFINITIONS // ======================== +/** + * Input pattern wrapper - explicit representation to avoid null vs JSON-null ambiguity. + * - { defined: false } means no pattern (don't check) + * - { defined: true, value: Json } means pattern is set (check against value) + */ +export type InputPattern = + | { defined: false } + | { defined: true; value: Json }; + /** * Step-level options that can be included in the shape for creation, * but are NOT compared during shape comparison (runtime tunable). @@ -31,11 +40,18 @@ export interface FlowShapeOptions { * The `options` field is included for flow creation but NOT compared during * shape comparison. Options can be tuned at runtime via SQL without * requiring recompilation. See: /deploy/tune-flow-config/ + * + * `whenUnmet`, `whenFailed`, and pattern fields ARE structural - they affect + * DAG execution semantics and must match between worker and database. */ export interface StepShape { slug: string; stepType: 'single' | 'map'; dependencies: string[]; // sorted alphabetically for deterministic comparison + whenUnmet: WhenUnmetMode; + whenFailed: RetriesExhaustedMode; + requiredInputPattern: InputPattern; + forbiddenInputPattern: InputPattern; options?: StepShapeOptions; } @@ -107,6 +123,18 @@ export function extractFlowShape(flow: AnyFlow): FlowShape { stepType: stepDef.stepType ?? 'single', // Sort dependencies alphabetically for deterministic comparison dependencies: [...stepDef.dependencies].sort(), + // Condition modes are structural - they affect DAG execution semantics + whenUnmet: stepDef.options.whenUnmet ?? 'skip', + whenFailed: stepDef.options.retriesExhausted ?? 'fail', + // Input patterns use explicit wrapper to avoid null vs JSON-null ambiguity + requiredInputPattern: + stepDef.options.if !== undefined + ? { defined: true, value: stepDef.options.if } + : { defined: false }, + forbiddenInputPattern: + stepDef.options.ifNot !== undefined + ? { defined: true, value: stepDef.options.ifNot } + : { defined: false }, }; // Only include options if at least one is defined @@ -175,9 +203,13 @@ export function compareFlowShapes( const bStep = b.steps[i]; if (!aStep) { - differences.push(`Step at index ${i}: missing in first shape (second has '${bStep.slug}')`); + differences.push( + `Step at index ${i}: missing in first shape (second has '${bStep.slug}')` + ); } else if (!bStep) { - differences.push(`Step at index ${i}: missing in second shape (first has '${aStep.slug}')`); + differences.push( + `Step at index ${i}: missing in second shape (first has '${aStep.slug}')` + ); } else { compareSteps(aStep, bStep, i, differences); } @@ -217,7 +249,40 @@ function compareSteps( const bDeps = b.dependencies.join(','); if (aDeps !== bDeps) { differences.push( - `Step at index ${index}: dependencies differ [${a.dependencies.join(', ')}] vs [${b.dependencies.join(', ')}]` + `Step at index ${index}: dependencies differ [${a.dependencies.join( + ', ' + )}] vs [${b.dependencies.join(', ')}]` + ); + } + + // Compare condition modes (structural - affects DAG execution semantics) + if (a.whenUnmet !== b.whenUnmet) { + differences.push( + `Step at index ${index}: whenUnmet differs '${a.whenUnmet}' vs '${b.whenUnmet}'` + ); + } + + if (a.whenFailed !== b.whenFailed) { + differences.push( + `Step at index ${index}: whenFailed differs '${a.whenFailed}' vs '${b.whenFailed}'` + ); + } + + // Compare pattern fields (structural - affects DAG execution semantics) + // Uses wrapper objects: { defined: false } or { defined: true, value: Json } + const aReqPattern = JSON.stringify(a.requiredInputPattern); + const bReqPattern = JSON.stringify(b.requiredInputPattern); + if (aReqPattern !== bReqPattern) { + differences.push( + `Step at index ${index}: requiredInputPattern differs '${aReqPattern}' vs '${bReqPattern}'` + ); + } + + const aForbPattern = JSON.stringify(a.forbiddenInputPattern); + const bForbPattern = JSON.stringify(b.forbiddenInputPattern); + if (aForbPattern !== bForbPattern) { + differences.push( + `Step at index ${index}: forbiddenInputPattern differs '${aForbPattern}' vs '${bForbPattern}'` ); } } diff --git a/pkgs/edge-worker/tests/integration/flow/compilationAtStartup.test.ts b/pkgs/edge-worker/tests/integration/flow/compilationAtStartup.test.ts index 3fa616090..2a61052d3 100644 --- a/pkgs/edge-worker/tests/integration/flow/compilationAtStartup.test.ts +++ b/pkgs/edge-worker/tests/integration/flow/compilationAtStartup.test.ts @@ -9,11 +9,12 @@ import postgresLib from 'postgres'; import { integrationConfig } from '../../config.ts'; // Define a minimal test flow -const TestCompilationFlow = new Flow<{ value: number }>({ slug: 'test_compilation_flow' }) - .step({ slug: 'double' }, async (flowInput) => { - await delay(1); - return flowInput.value * 2; - }); +const TestCompilationFlow = new Flow<{ value: number }>({ + slug: 'test_compilation_flow', +}).step({ slug: 'double' }, async (flowInput) => { + await delay(1); + return flowInput.value * 2; +}); const noop = () => {}; @@ -43,7 +44,9 @@ function createPlatformAdapterWithLocalEnv( return { ...baseAdapter, - get isLocalEnvironment() { return isLocal; }, + get isLocalEnvironment() { + return isLocal; + }, }; } @@ -56,7 +59,11 @@ Deno.test( const [flowBefore] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowBefore, undefined, 'Flow should not exist before worker startup'); + assertEquals( + flowBefore, + undefined, + 'Flow should not exist before worker startup' + ); // Create worker (compilation happens during acknowledgeStart) const worker = createFlowWorker( @@ -86,7 +93,11 @@ Deno.test( const [flowAfter] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowAfter?.flow_slug, 'test_compilation_flow', 'Flow should be created'); + assertEquals( + flowAfter?.flow_slug, + 'test_compilation_flow', + 'Flow should be created' + ); // Verify step was created const steps = await sql` @@ -113,7 +124,11 @@ Deno.test( const [flowBefore] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowBefore?.flow_slug, 'test_compilation_flow', 'Flow should exist'); + assertEquals( + flowBefore?.flow_slug, + 'test_compilation_flow', + 'Flow should exist' + ); // Create and start worker const worker = createFlowWorker( @@ -143,7 +158,11 @@ Deno.test( const [flowAfter] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowAfter?.flow_slug, 'test_compilation_flow', 'Flow should still exist'); + assertEquals( + flowAfter?.flow_slug, + 'test_compilation_flow', + 'Flow should still exist' + ); } finally { await worker.stop(); } @@ -196,9 +215,17 @@ Deno.test( await delay(200); // Verify error was thrown - assertEquals(caughtErrors.length > 0, true, 'Should have caught an error'); + assertEquals( + caughtErrors.length > 0, + true, + 'Should have caught an error' + ); const caughtError = caughtErrors[0]; - assertEquals(caughtError.name, 'FlowShapeMismatchError', 'Error should be FlowShapeMismatchError'); + assertEquals( + caughtError.name, + 'FlowShapeMismatchError', + 'Error should be FlowShapeMismatchError' + ); assertEquals( caughtError.message.includes('shape mismatch'), true, @@ -254,7 +281,11 @@ Deno.test( SELECT step_slug FROM pgflow.steps WHERE flow_slug = 'test_compilation_flow' ORDER BY step_slug `; assertEquals(steps.length, 1, 'Should have 1 step after recompilation'); - assertEquals(steps[0].step_slug, 'double', 'Step should be "double" after recompilation'); + assertEquals( + steps[0].step_slug, + 'double', + 'Step should be "double" after recompilation' + ); } finally { await worker.stop(); } @@ -269,7 +300,17 @@ Deno.test( const CONCURRENT = 50; // 50 separate connections const flowSlug = `concurrent_test_${Date.now()}`; const shape = { - steps: [{ slug: 'step1', stepType: 'single', dependencies: [] }], + steps: [ + { + slug: 'step1', + stepType: 'single', + dependencies: [], + whenUnmet: 'skip', + whenFailed: 'fail', + requiredInputPattern: { defined: false }, + forbiddenInputPattern: { defined: false }, + }, + ], }; // Create N SEPARATE connections (critical for true concurrency) @@ -283,8 +324,9 @@ Deno.test( // Fire all compilations simultaneously on separate connections // Note: Must use conn.json() for proper jsonb parameter passing const results = await Promise.all( - connections.map((conn) => - conn`SELECT pgflow.ensure_flow_compiled( + connections.map( + (conn) => + conn`SELECT pgflow.ensure_flow_compiled( ${flowSlug}, ${conn.json(shape)} ) as result` @@ -314,7 +356,9 @@ Deno.test( assertEquals(stepCount.count, 1, 'Exactly 1 step should exist'); } finally { // Cleanup - await sql`SELECT pgflow.delete_flow_and_data(${flowSlug})`.catch(() => {}); + await sql`SELECT pgflow.delete_flow_and_data(${flowSlug})`.catch( + () => {} + ); await Promise.all(connections.map((c) => c.end())); } }) @@ -331,14 +375,18 @@ Deno.test( const [flowBefore] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowBefore, undefined, 'Flow should not exist before worker startup'); + assertEquals( + flowBefore, + undefined, + 'Flow should not exist before worker startup' + ); // Create worker with compilation: false const worker = createFlowWorker( TestCompilationFlow, { sql, - compilation: false, // SKIP compilation + compilation: false, // SKIP compilation maxConcurrent: 1, batchSize: 10, maxPollSeconds: 1, @@ -359,7 +407,11 @@ Deno.test( const [flowAfter] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowAfter, undefined, 'Flow should NOT be created when compilation skipped'); + assertEquals( + flowAfter, + undefined, + 'Flow should NOT be created when compilation skipped' + ); } finally { await worker.stop(); } @@ -375,14 +427,18 @@ Deno.test( const [flowBefore] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowBefore, undefined, 'Flow should not exist before worker startup'); + assertEquals( + flowBefore, + undefined, + 'Flow should not exist before worker startup' + ); // Create worker with compilation: {} (explicit) const worker = createFlowWorker( TestCompilationFlow, { sql, - compilation: {}, // EXPLICIT empty object = enable compilation + compilation: {}, // EXPLICIT empty object = enable compilation maxConcurrent: 1, batchSize: 10, maxPollSeconds: 1, @@ -403,7 +459,11 @@ Deno.test( const [flowAfter] = await sql` SELECT * FROM pgflow.flows WHERE flow_slug = 'test_compilation_flow' `; - assertEquals(flowAfter?.flow_slug, 'test_compilation_flow', 'Flow should be created when compilation: {}'); + assertEquals( + flowAfter?.flow_slug, + 'test_compilation_flow', + 'Flow should be created when compilation: {}' + ); } finally { await worker.stop(); } @@ -426,7 +486,7 @@ Deno.test( TestCompilationFlow, { sql, - compilation: false, // Skip compilation check + compilation: false, // Skip compilation check maxConcurrent: 1, batchSize: 10, maxPollSeconds: 1, @@ -447,8 +507,16 @@ Deno.test( const workers = await sql` SELECT * FROM pgflow.workers WHERE worker_id = ${workerId} `; - assertEquals(workers.length, 1, 'Worker should be registered even when skipping compilation'); - assertEquals(workers[0].queue_name, 'test_compilation_flow', 'Worker should be registered for the correct queue'); + assertEquals( + workers.length, + 1, + 'Worker should be registered even when skipping compilation' + ); + assertEquals( + workers[0].queue_name, + 'test_compilation_flow', + 'Worker should be registered for the correct queue' + ); } finally { await worker.stop(); } @@ -474,7 +542,7 @@ Deno.test( TestCompilationFlow, // Has 'double' step, not 'old_step' { sql, - compilation: { allowDataLoss: true }, // Allow destructive recompile in production + compilation: { allowDataLoss: true }, // Allow destructive recompile in production maxConcurrent: 1, batchSize: 10, maxPollSeconds: 1, @@ -497,8 +565,16 @@ Deno.test( const steps = await sql` SELECT step_slug FROM pgflow.steps WHERE flow_slug = 'test_compilation_flow' ORDER BY step_slug `; - assertEquals(steps.length, 1, 'Should have 1 step after recompilation with allowDataLoss'); - assertEquals(steps[0].step_slug, 'double', 'Step should be "double" after recompilation'); + assertEquals( + steps.length, + 1, + 'Should have 1 step after recompilation with allowDataLoss' + ); + assertEquals( + steps[0].step_slug, + 'double', + 'Step should be "double" after recompilation' + ); } finally { await worker.stop(); } @@ -524,7 +600,7 @@ Deno.test( TestCompilationFlow, // Has only 'double' step { sql, - compilation: { allowDataLoss: false }, // Explicit false + compilation: { allowDataLoss: false }, // Explicit false maxConcurrent: 1, batchSize: 10, maxPollSeconds: 1, @@ -552,9 +628,17 @@ Deno.test( await delay(200); // Verify error was thrown - assertEquals(caughtErrors.length > 0, true, 'Should have caught an error'); + assertEquals( + caughtErrors.length > 0, + true, + 'Should have caught an error' + ); const caughtError = caughtErrors[0]; - assertEquals(caughtError.name, 'FlowShapeMismatchError', 'Error should be FlowShapeMismatchError'); + assertEquals( + caughtError.name, + 'FlowShapeMismatchError', + 'Error should be FlowShapeMismatchError' + ); assertEquals( caughtError.message.includes('shape mismatch'), true, diff --git a/prompt.md b/prompt.md new file mode 100644 index 000000000..e27ca6a39 --- /dev/null +++ b/prompt.md @@ -0,0 +1 @@ +your job is to read the beads related to the pgf-3hs epic about conditionals, verify current implementation in possibilities in @pkgs/dsl/__tests__/types/condition-pattern.test-d.ts @pkgs/dsl/__tests__/types/skippable-deps.test-d.ts and also some parts of schema @pkgs/core/schemas/0100_function__cascade_force_skip_steps.sql @pkgs/core/schemas/0100_function_complete_task.sql and the tests for the @pkgs/dsl/__tests__/runtime/condition-options.test.ts and @pkgs/core/schemas/0100_function_cascade_resolve_conditions.sql and the @pkgs/core/supabase/tests/condition_evaluation/ and @pkgs/core/supabase/tests/_cascade_force_skip_steps/ and the @.changeset/add-when-failed-option.md @.changeset/skip-infrastructure-schema.md and @pkgs/dsl/__tests__/runtime/when-failed-options.test.ts @pkgs/core/supabase/tests/fail_task_when_failed/ and anything you think is related, then you should read the current docs, especially the onboarding in @pkgs/website/src/content/docs/get-started/ , the @pkgs/website/src/content/docs/concepts/, and @pkgs/website/src/content/docs/build/ and you should come up with a plan on how it would be best to explain the conditions, skipping, skip cascades, whenFailed, json containment matching and all that in a way that is easy to understand, visual and have examples. i think we would need a DAG d2 diagrams showing step by step what happens for each skip/fail mode, showing the differences of what happens whe stuff is skipped/failed and what happes to dependents/run based on what whenUnmet/retriesExhausted modes selected. probably need a new color for skipped status in @pkgs/website/src/assets/pgflow-theme.d2 and new explanation in the @.claude/skills/writing-d2-diagrams/SKILL.md and some updates into @.claude/skills/writing-pgflow-flows/SKILL.md . you should leverage the parallel task agents for as much work as possible in order to answer as many questions that you have created. you should do the research in multiple stages. first, think about what you should understand first, so then you can spawn multiple task agents that will read and summarize and explain stuff to you. then you will be able to plan additional stages of tasks for the parallel subagents. the results of your work should be a comphrehensive plan diff --git a/x.md b/x.md new file mode 100644 index 000000000..04632a2e8 --- /dev/null +++ b/x.md @@ -0,0 +1 @@ +please go through all the docs we have updated in this PR and make sure that we are adhering to the writing-pgflow-flows skill - there is no input.run anymore, but flowInput. and dependent steps need to await ctx.flowInput as they dont have the access to it directly anymore, input.run is no longer available