|
7 | 7 | setActivityQueryData, |
8 | 8 | resetActivityQueryCache, |
9 | 9 | isEntryStale, |
| 10 | + setErrorOnlyCacheEntry, |
| 11 | + _retryTestHelpers, |
10 | 12 | } from '../use-activity-query' |
11 | 13 |
|
12 | 14 | describe('use-activity-query utilities', () => { |
@@ -765,3 +767,348 @@ describe('cache edge cases and error handling', () => { |
765 | 767 | expect(getActivityQueryData<string>(testKey)).toBe('second') |
766 | 768 | }) |
767 | 769 | }) |
| 770 | + |
| 771 | +/** |
| 772 | + * Tests for error-only cache entries and persistent error scenarios. |
| 773 | + * This test suite was added to debug and fix an issue where fetchSubscriptionData |
| 774 | + * was being called every second when the endpoint returned errors. |
| 775 | + */ |
| 776 | +describe('error-only entries and persistent error handling', () => { |
| 777 | + let originalDateNow: typeof Date.now |
| 778 | + let mockNow: number |
| 779 | + |
| 780 | + beforeEach(() => { |
| 781 | + resetActivityQueryCache() |
| 782 | + originalDateNow = Date.now |
| 783 | + mockNow = 1000000 |
| 784 | + Date.now = () => mockNow |
| 785 | + }) |
| 786 | + |
| 787 | + afterEach(() => { |
| 788 | + Date.now = originalDateNow |
| 789 | + }) |
| 790 | + |
| 791 | + test('setErrorOnlyCacheEntry creates entry with no data and error', () => { |
| 792 | + const testKey = ['error-entry-test'] |
| 793 | + const error = new Error('Network error') |
| 794 | + |
| 795 | + setErrorOnlyCacheEntry(testKey, error) |
| 796 | + |
| 797 | + // Data should be undefined (error-only entry) |
| 798 | + expect(getActivityQueryData(testKey)).toBeUndefined() |
| 799 | + }) |
| 800 | + |
| 801 | + test('error-only entry with recent errorUpdatedAt should NOT be stale', () => { |
| 802 | + // This test verifies the fix for the infinite refetch loop bug. |
| 803 | + // |
| 804 | + // Scenario: |
| 805 | + // 1. Fetch fails with no prior data |
| 806 | + // 2. Error is stored with errorUpdatedAt = now |
| 807 | + // 3. Polling tick fires |
| 808 | + // 4. isEntryStale should return FALSE if errorUpdatedAt is recent |
| 809 | + // 5. This prevents immediate refetch loop |
| 810 | + |
| 811 | + const testKey = ['error-only-fresh-test'] |
| 812 | + const serializedKey = JSON.stringify(testKey) |
| 813 | + const staleTime = 30000 // 30 seconds |
| 814 | + const error = new Error('API error') |
| 815 | + |
| 816 | + // Create error-only entry at current time (mockNow = 1000000) |
| 817 | + setErrorOnlyCacheEntry(testKey, error, mockNow) |
| 818 | + |
| 819 | + // Entry has errorUpdatedAt = 1000000, current time = 1000000 |
| 820 | + // Time since error: 0ms, staleTime: 30000ms |
| 821 | + // Should NOT be stale because error is recent |
| 822 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 823 | + }) |
| 824 | + |
| 825 | + test('error-only entry becomes stale after staleTime passes', () => { |
| 826 | + const testKey = ['error-stale-after-time-test'] |
| 827 | + const serializedKey = JSON.stringify(testKey) |
| 828 | + const staleTime = 30000 // 30 seconds |
| 829 | + const error = new Error('API error') |
| 830 | + |
| 831 | + // Create error-only entry at current time |
| 832 | + setErrorOnlyCacheEntry(testKey, error, mockNow) |
| 833 | + |
| 834 | + // Initially not stale |
| 835 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 836 | + |
| 837 | + // Advance time by 25 seconds - still fresh |
| 838 | + mockNow += 25000 |
| 839 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 840 | + |
| 841 | + // Advance time past staleTime (now 35 seconds since error) |
| 842 | + mockNow += 10000 |
| 843 | + expect(isEntryStale(serializedKey, staleTime)).toBe(true) |
| 844 | + }) |
| 845 | + |
| 846 | + test('simulates subscription query polling with persistent errors', () => { |
| 847 | + // This test simulates the exact bug scenario: |
| 848 | + // - useSubscriptionQuery with staleTime=30s, refetchInterval=60s |
| 849 | + // - Endpoint returns errors |
| 850 | + // - Without fix: isEntryStale returns true immediately, causing rapid refetches |
| 851 | + // - With fix: isEntryStale uses errorUpdatedAt, preventing rapid refetches |
| 852 | + |
| 853 | + const subscriptionKey = ['subscription', 'current'] |
| 854 | + const serializedKey = JSON.stringify(subscriptionKey) |
| 855 | + const staleTime = 30000 // 30 seconds (matches useSubscriptionQuery) |
| 856 | + const refetchInterval = 60000 // 60 seconds |
| 857 | + const error = new Error('Failed to fetch subscription: 500') |
| 858 | + |
| 859 | + // Simulate first fetch failure at t=0 |
| 860 | + setErrorOnlyCacheEntry(subscriptionKey, error, mockNow) |
| 861 | + |
| 862 | + // Immediately after error, entry should NOT be stale |
| 863 | + // This is the critical fix - prevents immediate refetch loop |
| 864 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 865 | + |
| 866 | + // Simulate polling interval at t=1s (as reported in bug) |
| 867 | + mockNow += 1000 |
| 868 | + // Entry should still NOT be stale (only 1s since error, staleTime is 30s) |
| 869 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 870 | + |
| 871 | + // Simulate many 1-second intervals - none should trigger refetch until staleTime |
| 872 | + for (let i = 0; i < 28; i++) { |
| 873 | + mockNow += 1000 |
| 874 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 875 | + } |
| 876 | + |
| 877 | + // Now at t=29s - should still be fresh (29s is not > 30s) |
| 878 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 879 | + |
| 880 | + // At t=30s - should still be fresh (30s is not > 30s, need strictly greater) |
| 881 | + mockNow += 1000 |
| 882 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) |
| 883 | + |
| 884 | + // At t=31s - now stale, refetch should be allowed (31s > 30s) |
| 885 | + mockNow += 1000 |
| 886 | + expect(isEntryStale(serializedKey, staleTime)).toBe(true) |
| 887 | + }) |
| 888 | + |
| 889 | + test('staleTime of 0 means always stale even for error-only entries', () => { |
| 890 | + const testKey = ['zero-stale-error-test'] |
| 891 | + const serializedKey = JSON.stringify(testKey) |
| 892 | + const error = new Error('Some error') |
| 893 | + |
| 894 | + setErrorOnlyCacheEntry(testKey, error, mockNow) |
| 895 | + |
| 896 | + // With staleTime=0, entry is always considered stale |
| 897 | + expect(isEntryStale(serializedKey, 0)).toBe(true) |
| 898 | + }) |
| 899 | + |
| 900 | + test('error-only entry with null errorUpdatedAt is stale', () => { |
| 901 | + // Edge case: if somehow errorUpdatedAt is null, entry should be stale |
| 902 | + // This shouldn't happen in practice but tests defensive coding |
| 903 | + const testKey = ['null-error-time-test'] |
| 904 | + const serializedKey = JSON.stringify(testKey) |
| 905 | + const staleTime = 30000 |
| 906 | + |
| 907 | + // Create entry without errorUpdatedAt (using undefined which gets stored as null) |
| 908 | + // Note: setErrorOnlyCacheEntry always sets errorUpdatedAt, so we test via regular data |
| 909 | + // and then invalidate it |
| 910 | + |
| 911 | + // Non-existent key is stale |
| 912 | + expect(isEntryStale(serializedKey, staleTime)).toBe(true) |
| 913 | + }) |
| 914 | + |
| 915 | + test('successful data takes precedence over errorUpdatedAt for staleness', () => { |
| 916 | + const testKey = ['data-precedence-test'] |
| 917 | + const serializedKey = JSON.stringify(testKey) |
| 918 | + const staleTime = 30000 |
| 919 | + |
| 920 | + // First, set an error-only entry |
| 921 | + setErrorOnlyCacheEntry(testKey, new Error('Initial error'), mockNow) |
| 922 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) // Fresh error |
| 923 | + |
| 924 | + // Now set successful data (this is what happens on successful retry) |
| 925 | + setActivityQueryData(testKey, { subscription: 'active' }) |
| 926 | + |
| 927 | + // Staleness should now be based on dataUpdatedAt, not errorUpdatedAt |
| 928 | + expect(isEntryStale(serializedKey, staleTime)).toBe(false) // Fresh data |
| 929 | + |
| 930 | + // Advance time past staleTime |
| 931 | + mockNow += 35000 |
| 932 | + expect(isEntryStale(serializedKey, staleTime)).toBe(true) // Stale based on dataUpdatedAt |
| 933 | + }) |
| 934 | +}) |
| 935 | + |
| 936 | +/** |
| 937 | + * Tests for the retry infinite loop bug. |
| 938 | + * |
| 939 | + * BUG: When useSubscriptionQuery fetched /api/user/subscription and got a 401, |
| 940 | + * it would retry every ~1 second infinitely instead of respecting retry:1. |
| 941 | + * |
| 942 | + * ROOT CAUSE: In doFetch's catch block, when scheduling a retry: |
| 943 | + * 1. retryCounts.set(key, next) // Sets count to 1 |
| 944 | + * 2. clearRetryState(key) // Deletes retryCounts → count back to 0! |
| 945 | + * 3. setTimeout to retry in 1s |
| 946 | + * When the retry fires, currentRetries reads as 0 again → thinks it still has |
| 947 | + * retries left → schedules another retry → infinite loop. |
| 948 | + * |
| 949 | + * FIX: Split clearRetryState into clearRetryTimeout (only clears timeout) |
| 950 | + * and clearRetryState (clears both). The retry scheduling block now uses |
| 951 | + * clearRetryTimeout so the retry count is preserved. |
| 952 | + */ |
| 953 | +describe('retry infinite loop bug fix (subscription 401 scenario)', () => { |
| 954 | + beforeEach(() => { |
| 955 | + resetActivityQueryCache() |
| 956 | + }) |
| 957 | + |
| 958 | + test('retry count is preserved after scheduling a retry', () => { |
| 959 | + const queryKey = ['subscription', 'current'] |
| 960 | + const maxRetries = 1 |
| 961 | + |
| 962 | + // Simulate a mounted component (refCount > 0) |
| 963 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 964 | + |
| 965 | + // Initially, no retries have been attempted |
| 966 | + expect(_retryTestHelpers.getRetryCount(queryKey)).toBe(0) |
| 967 | + |
| 968 | + // First fetch fails → should schedule a retry |
| 969 | + const result1 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 970 | + expect(result1.retryScheduled).toBe(true) |
| 971 | + expect(result1.retryCount).toBe(1) |
| 972 | + |
| 973 | + // CRITICAL: Retry count must be preserved (not reset to 0) |
| 974 | + expect(_retryTestHelpers.getRetryCount(queryKey)).toBe(1) |
| 975 | + }) |
| 976 | + |
| 977 | + test('retries are exhausted after maxRetries attempts', () => { |
| 978 | + const queryKey = ['subscription', 'current'] |
| 979 | + const maxRetries = 1 |
| 980 | + |
| 981 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 982 | + |
| 983 | + // First fetch fails → retry scheduled (count becomes 1) |
| 984 | + const result1 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 985 | + expect(result1.retryScheduled).toBe(true) |
| 986 | + expect(result1.retryCount).toBe(1) |
| 987 | + |
| 988 | + // Retry fires, also fails → retries exhausted (count = 1, not < maxRetries=1) |
| 989 | + const result2 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 990 | + expect(result2.retryScheduled).toBe(false) |
| 991 | + expect(result2.retryCount).toBe(0) // Reset after exhaustion |
| 992 | + }) |
| 993 | + |
| 994 | + test('simulates full subscription 401 scenario: fetch + 1 retry + stop', () => { |
| 995 | + // This reproduces the exact bug scenario: |
| 996 | + // useSubscriptionQuery with retry:1 hitting a 401 on /api/user/subscription |
| 997 | + const queryKey = ['subscription', 'current'] |
| 998 | + const maxRetries = 1 |
| 999 | + |
| 1000 | + // Component is mounted |
| 1001 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 1002 | + |
| 1003 | + // === Fetch #1: Initial fetch fails with 401 === |
| 1004 | + const fetch1 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1005 | + expect(fetch1.retryScheduled).toBe(true) |
| 1006 | + expect(fetch1.retryCount).toBe(1) |
| 1007 | + |
| 1008 | + // === Fetch #2: Retry fires after 1s, also fails with 401 === |
| 1009 | + const fetch2 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1010 | + expect(fetch2.retryScheduled).toBe(false) // Retries exhausted! |
| 1011 | + expect(fetch2.retryCount).toBe(0) |
| 1012 | + |
| 1013 | + // === Fetch #3: If the bug existed, this would schedule ANOTHER retry === |
| 1014 | + // With the fix, the error is stored and no more retries are scheduled. |
| 1015 | + // A third call should also exhaust immediately since count was reset to 0 |
| 1016 | + // BUT there's no retry scheduled, so this would only happen from polling. |
| 1017 | + const fetch3 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1018 | + // Even if polling triggers another fetch, retry:1 means ONE retry per fetch cycle |
| 1019 | + expect(fetch3.retryScheduled).toBe(true) // New fetch cycle starts fresh |
| 1020 | + expect(fetch3.retryCount).toBe(1) |
| 1021 | + |
| 1022 | + // The retry for fetch3 fires and fails |
| 1023 | + const fetch4 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1024 | + expect(fetch4.retryScheduled).toBe(false) // Exhausted again |
| 1025 | + }) |
| 1026 | + |
| 1027 | + test('demonstrates the old bug: clearRetryState would reset count causing infinite loop', () => { |
| 1028 | + // This test documents the OLD buggy behavior. |
| 1029 | + // The old code called clearRetryState (which deletes retryCounts) right after |
| 1030 | + // setting the retry count, effectively resetting it to 0 every time. |
| 1031 | + const queryKey = ['subscription', 'current'] |
| 1032 | + |
| 1033 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 1034 | + |
| 1035 | + // Step 1: Simulate first fetch failure setting retry count to 1 |
| 1036 | + _retryTestHelpers.setRetryCount(queryKey, 1) |
| 1037 | + expect(_retryTestHelpers.getRetryCount(queryKey)).toBe(1) |
| 1038 | + |
| 1039 | + // Step 2: OLD CODE would call clearRetryState here, which resets count to 0: |
| 1040 | + // clearRetryState(key) → retryCounts.delete(key) → count = 0 |
| 1041 | + // Simulate the old bug by manually resetting: |
| 1042 | + _retryTestHelpers.setRetryCount(queryKey, 0) |
| 1043 | + expect(_retryTestHelpers.getRetryCount(queryKey)).toBe(0) |
| 1044 | + |
| 1045 | + // Step 3: When the retry fires after 1s, it reads count as 0 |
| 1046 | + // 0 < maxRetries(1) → true → schedules ANOTHER retry (should have been exhausted!) |
| 1047 | + const result = _retryTestHelpers.simulateFailedFetch(queryKey, 1) |
| 1048 | + expect(result.retryScheduled).toBe(true) // BUG: should have been false! |
| 1049 | + expect(result.retryCount).toBe(1) // Count set to 1 again... |
| 1050 | + |
| 1051 | + // And the cycle repeats: count gets reset → retry fires → count is 0 → retry... |
| 1052 | + // With the fix (clearRetryTimeout instead of clearRetryState), count stays at 1 |
| 1053 | + // so the next attempt correctly sees 1 >= maxRetries(1) → exhausted. |
| 1054 | + }) |
| 1055 | + |
| 1056 | + test('retry count resets to 0 when retries are exhausted', () => { |
| 1057 | + const queryKey = ['retry-reset-test'] |
| 1058 | + const maxRetries = 2 |
| 1059 | + |
| 1060 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 1061 | + |
| 1062 | + // First fail → retry scheduled, count=1 |
| 1063 | + const r1 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1064 | + expect(r1).toEqual({ retryScheduled: true, retryCount: 1 }) |
| 1065 | + |
| 1066 | + // Second fail → retry scheduled, count=2 |
| 1067 | + const r2 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1068 | + expect(r2).toEqual({ retryScheduled: true, retryCount: 2 }) |
| 1069 | + |
| 1070 | + // Third fail → retries exhausted, count reset to 0 |
| 1071 | + const r3 = _retryTestHelpers.simulateFailedFetch(queryKey, maxRetries) |
| 1072 | + expect(r3).toEqual({ retryScheduled: false, retryCount: 0 }) |
| 1073 | + }) |
| 1074 | + |
| 1075 | + test('no retries when retry is 0 or false', () => { |
| 1076 | + const queryKey = ['no-retry-test'] |
| 1077 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 1078 | + |
| 1079 | + // retry: 0 (equivalent to retry: false) |
| 1080 | + const result = _retryTestHelpers.simulateFailedFetch(queryKey, 0) |
| 1081 | + expect(result.retryScheduled).toBe(false) |
| 1082 | + expect(result.retryCount).toBe(0) |
| 1083 | + }) |
| 1084 | + |
| 1085 | + test('no retries when component is unmounted (refCount=0)', () => { |
| 1086 | + const queryKey = ['unmounted-test'] |
| 1087 | + // Don't set refCount (defaults to 0 = no mounted components) |
| 1088 | + |
| 1089 | + const result = _retryTestHelpers.simulateFailedFetch(queryKey, 1) |
| 1090 | + expect(result.retryScheduled).toBe(false) |
| 1091 | + }) |
| 1092 | + |
| 1093 | + test('error-only entry is created after retries exhausted', () => { |
| 1094 | + const queryKey = ['error-entry-after-retry'] |
| 1095 | + _retryTestHelpers.setRefCount(queryKey, 1) |
| 1096 | + |
| 1097 | + // First fail → retry |
| 1098 | + _retryTestHelpers.simulateFailedFetch(queryKey, 1) |
| 1099 | + |
| 1100 | + // No cache entry yet during retry phase |
| 1101 | + expect(getActivityQueryData(queryKey)).toBeUndefined() |
| 1102 | + |
| 1103 | + // Second fail → exhausted, error entry created |
| 1104 | + _retryTestHelpers.simulateFailedFetch(queryKey, 1) |
| 1105 | + |
| 1106 | + // Error entry should exist (data is undefined but entry exists) |
| 1107 | + // The entry has error set, which we can verify via isEntryStale behavior |
| 1108 | + const serializedKey = JSON.stringify(queryKey) |
| 1109 | + // Entry exists (not stale due to "no entry" - stale due to other reasons) |
| 1110 | + // Since we just set errorUpdatedAt = Date.now(), it should not be stale |
| 1111 | + // for a reasonable staleTime |
| 1112 | + expect(isEntryStale(serializedKey, 30000)).toBe(false) |
| 1113 | + }) |
| 1114 | +}) |
0 commit comments