From e085ab165d0a9c8293288c0136c69c8f07ffd981 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 03:49:03 +0700 Subject: [PATCH 01/23] refactor: normalize logstore driver interface - Rename interface methods: InsertManyDeliveryEvent -> InsertMany, ListDeliveryEvent -> ListDelivery, RetrieveDeliveryEvent -> RetrieveDelivery - Rename request types: ListDeliveryEventRequest -> ListDeliveryRequest, etc. - Add DeliveryRecord type for query results with Event and Delivery - Update memlogstore, pglogstore, chlogstore implementations - Update all API handlers and tests to use new interface - Remove DeliveryEventID field from Delivery struct Co-Authored-By: Claude Opus 4.5 --- internal/apirouter/legacy_handlers.go | 24 +- internal/apirouter/legacy_handlers_test.go | 8 +- internal/apirouter/log_handlers.go | 71 +++-- internal/apirouter/log_handlers_test.go | 10 +- internal/apirouter/retry_handlers.go | 12 +- internal/apirouter/retry_handlers_test.go | 4 +- internal/deliverymq/mock_test.go | 45 ++- internal/logstore/chlogstore/chlogstore.go | 310 +++++++++---------- internal/logstore/driver/driver.go | 20 +- internal/logstore/drivertest/crud.go | 83 +++-- internal/logstore/drivertest/misc.go | 95 +++--- internal/logstore/drivertest/pagination.go | 215 ++++++------- internal/logstore/logstore.go | 13 +- internal/logstore/memlogstore/memlogstore.go | 213 ++++++------- internal/logstore/pglogstore/pglogstore.go | 278 +++++++---------- internal/models/event.go | 18 +- internal/services/builder.go | 15 +- internal/util/testutil/event.go | 30 +- 18 files changed, 712 insertions(+), 752 deletions(-) diff --git a/internal/apirouter/legacy_handlers.go b/internal/apirouter/legacy_handlers.go index 18195f5e..c94f9bcb 100644 --- a/internal/apirouter/legacy_handlers.go +++ b/internal/apirouter/legacy_handlers.go @@ -137,7 +137,7 @@ func (h *LegacyHandlers) ListEventsByDestination(c *gin.Context) { } // Query deliveries for this destination with pagination - response, err := h.logStore.ListDeliveryEvent(c.Request.Context(), logstore.ListDeliveryEventRequest{ + response, err := h.logStore.ListDelivery(c.Request.Context(), logstore.ListDeliveryRequest{ TenantID: tenant.ID, DestinationIDs: []string{destinationID}, Limit: limit, @@ -153,10 +153,10 @@ func (h *LegacyHandlers) ListEventsByDestination(c *gin.Context) { // Extract unique events (by event ID, keep first occurrence) seen := make(map[string]bool) events := []models.Event{} - for _, de := range response.Data { - if !seen[de.Event.ID] { - seen[de.Event.ID] = true - events = append(events, de.Event) + for _, dr := range response.Data { + if !seen[dr.Event.ID] { + seen[dr.Event.ID] = true + events = append(events, *dr.Event) } } @@ -232,7 +232,7 @@ func (h *LegacyHandlers) ListDeliveriesByEvent(c *gin.Context) { eventID := c.Param("eventID") // Query deliveries for this event - response, err := h.logStore.ListDeliveryEvent(c.Request.Context(), logstore.ListDeliveryEventRequest{ + response, err := h.logStore.ListDelivery(c.Request.Context(), logstore.ListDeliveryRequest{ TenantID: tenant.ID, EventID: eventID, Limit: 100, @@ -251,13 +251,13 @@ func (h *LegacyHandlers) ListDeliveriesByEvent(c *gin.Context) { // Transform to legacy delivery response format (bare array) deliveries := make([]LegacyDeliveryResponse, len(response.Data)) - for i, de := range response.Data { + for i, dr := range response.Data { deliveries[i] = LegacyDeliveryResponse{ - ID: de.Delivery.ID, - DeliveredAt: de.Delivery.Time.UTC().Format("2006-01-02T15:04:05Z07:00"), - Status: de.Delivery.Status, - Code: de.Delivery.Code, - ResponseData: de.Delivery.ResponseData, + ID: dr.Delivery.ID, + DeliveredAt: dr.Delivery.Time.UTC().Format("2006-01-02T15:04:05Z07:00"), + Status: dr.Delivery.Status, + Code: dr.Delivery.Code, + ResponseData: dr.Delivery.ResponseData, } } diff --git a/internal/apirouter/legacy_handlers_test.go b/internal/apirouter/legacy_handlers_test.go index 883eac63..98822806 100644 --- a/internal/apirouter/legacy_handlers_test.go +++ b/internal/apirouter/legacy_handlers_test.go @@ -65,7 +65,7 @@ func TestLegacyRetryByEventDestination(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should retry via legacy endpoint and return deprecation header", func(t *testing.T) { w := httptest.NewRecorder() @@ -167,7 +167,7 @@ func TestLegacyListEventsByDestination(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should list events for destination with deprecation header", func(t *testing.T) { w := httptest.NewRecorder() @@ -238,7 +238,7 @@ func TestLegacyRetrieveEventByDestination(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should retrieve event by destination with deprecation header", func(t *testing.T) { w := httptest.NewRecorder() @@ -313,7 +313,7 @@ func TestLegacyListDeliveriesByEvent(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should list deliveries for event with deprecation header", func(t *testing.T) { w := httptest.NewRecorder() diff --git a/internal/apirouter/log_handlers.go b/internal/apirouter/log_handlers.go index 7e2ee0b2..478a4460 100644 --- a/internal/apirouter/log_handlers.go +++ b/internal/apirouter/log_handlers.go @@ -11,7 +11,6 @@ import ( "github.com/hookdeck/outpost/internal/cursor" "github.com/hookdeck/outpost/internal/logging" "github.com/hookdeck/outpost/internal/logstore" - "github.com/hookdeck/outpost/internal/models" ) type LogHandlers struct { @@ -143,43 +142,47 @@ type EventPaginatedResult struct { Pagination SeekPagination `json:"pagination"` } -// toAPIDelivery converts a DeliveryEvent to APIDelivery with expand options -func toAPIDelivery(de *models.DeliveryEvent, opts IncludeOptions) APIDelivery { +// toAPIDelivery converts a DeliveryRecord to APIDelivery with expand options +func toAPIDelivery(dr *logstore.DeliveryRecord, opts IncludeOptions) APIDelivery { api := APIDelivery{ - Attempt: de.Attempt, - Manual: de.Manual, - Destination: de.DestinationID, + Attempt: dr.Delivery.Attempt, + Manual: dr.Delivery.Manual, + Destination: dr.Delivery.DestinationID, } - if de.Delivery != nil { - api.ID = de.Delivery.ID - api.Status = de.Delivery.Status - api.DeliveredAt = de.Delivery.Time - api.Code = de.Delivery.Code + if dr.Delivery != nil { + api.ID = dr.Delivery.ID + api.Status = dr.Delivery.Status + api.DeliveredAt = dr.Delivery.Time + api.Code = dr.Delivery.Code if opts.ResponseData { - api.ResponseData = de.Delivery.ResponseData + api.ResponseData = dr.Delivery.ResponseData } } - if opts.EventData { - api.Event = APIEventFull{ - ID: de.Event.ID, - Topic: de.Event.Topic, - Time: de.Event.Time, - EligibleForRetry: de.Event.EligibleForRetry, - Metadata: de.Event.Metadata, - Data: de.Event.Data, - } - } else if opts.Event { - api.Event = APIEventSummary{ - ID: de.Event.ID, - Topic: de.Event.Topic, - Time: de.Event.Time, - EligibleForRetry: de.Event.EligibleForRetry, - Metadata: de.Event.Metadata, + if dr.Event != nil { + if opts.EventData { + api.Event = APIEventFull{ + ID: dr.Event.ID, + Topic: dr.Event.Topic, + Time: dr.Event.Time, + EligibleForRetry: dr.Event.EligibleForRetry, + Metadata: dr.Event.Metadata, + Data: dr.Event.Data, + } + } else if opts.Event { + api.Event = APIEventSummary{ + ID: dr.Event.ID, + Topic: dr.Event.Topic, + Time: dr.Event.Time, + EligibleForRetry: dr.Event.EligibleForRetry, + Metadata: dr.Event.Metadata, + } + } else { + api.Event = dr.Event.ID } } else { - api.Event = de.Event.ID + api.Event = dr.Delivery.EventID } // TODO: Handle destination expansion @@ -244,7 +247,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { destinationIDs = []string{destID} } - req := logstore.ListDeliveryEventRequest{ + req := logstore.ListDeliveryRequest{ TenantID: tenantID, EventID: c.Query("event_id"), DestinationIDs: destinationIDs, @@ -262,7 +265,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { SortOrder: dir, } - response, err := h.logStore.ListDeliveryEvent(c.Request.Context(), req) + response, err := h.logStore.ListDelivery(c.Request.Context(), req) if err != nil { if errors.Is(err, cursor.ErrInvalidCursor) || errors.Is(err, cursor.ErrVersionMismatch) { AbortWithError(c, http.StatusBadRequest, NewErrBadRequest(err)) @@ -328,7 +331,7 @@ func (h *LogHandlers) RetrieveDelivery(c *gin.Context) { } deliveryID := c.Param("deliveryID") - deliveryEvent, err := h.logStore.RetrieveDeliveryEvent(c.Request.Context(), logstore.RetrieveDeliveryEventRequest{ + deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ TenantID: tenant.ID, DeliveryID: deliveryID, }) @@ -336,14 +339,14 @@ func (h *LogHandlers) RetrieveDelivery(c *gin.Context) { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryEvent == nil { + if deliveryRecord == nil { AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) return } includeOpts := parseIncludeOptions(c) - c.JSON(http.StatusOK, toAPIDelivery(deliveryEvent, includeOpts)) + c.JSON(http.StatusOK, toAPIDelivery(deliveryRecord, includeOpts)) } // AdminListEvents handles GET /events (admin-only, cross-tenant) diff --git a/internal/apirouter/log_handlers_test.go b/internal/apirouter/log_handlers_test.go index 3bfc69ac..54196fd5 100644 --- a/internal/apirouter/log_handlers_test.go +++ b/internal/apirouter/log_handlers_test.go @@ -80,7 +80,7 @@ func TestListDeliveries(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) @@ -228,7 +228,7 @@ func TestListDeliveries(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=response_data", nil) @@ -351,7 +351,7 @@ func TestRetrieveDelivery(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should retrieve delivery by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -473,7 +473,7 @@ func TestRetrieveEvent(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should retrieve event by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -577,7 +577,7 @@ func TestListEvents(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events", nil) diff --git a/internal/apirouter/retry_handlers.go b/internal/apirouter/retry_handlers.go index 1fa2bd5a..302854d8 100644 --- a/internal/apirouter/retry_handlers.go +++ b/internal/apirouter/retry_handlers.go @@ -44,7 +44,7 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { deliveryID := c.Param("deliveryID") // 1. Look up delivery by ID - deliveryEvent, err := h.logStore.RetrieveDeliveryEvent(c.Request.Context(), logstore.RetrieveDeliveryEventRequest{ + deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ TenantID: tenant.ID, DeliveryID: deliveryID, }) @@ -52,13 +52,13 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryEvent == nil { + if deliveryRecord == nil { AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) return } // 2. Check destination exists and is enabled - destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, deliveryEvent.DestinationID) + destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, deliveryRecord.Delivery.DestinationID) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return @@ -79,7 +79,7 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { } // 3. Create and publish retry delivery event - retryDeliveryEvent := models.NewManualDeliveryEvent(deliveryEvent.Event, deliveryEvent.DestinationID) + retryDeliveryEvent := models.NewManualDeliveryEvent(*deliveryRecord.Event, deliveryRecord.Delivery.DestinationID) if err := h.deliveryMQ.Publish(c.Request.Context(), retryDeliveryEvent); err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) @@ -88,9 +88,9 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { h.logger.Ctx(c.Request.Context()).Audit("manual retry initiated", zap.String("delivery_id", deliveryID), - zap.String("event_id", deliveryEvent.Event.ID), + zap.String("event_id", deliveryRecord.Event.ID), zap.String("tenant_id", tenant.ID), - zap.String("destination_id", deliveryEvent.DestinationID), + zap.String("destination_id", deliveryRecord.Delivery.DestinationID), zap.String("destination_type", destination.Type)) c.JSON(http.StatusAccepted, gin.H{ diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index 486b1274..6e58e5e2 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -65,7 +65,7 @@ func TestRetryDelivery(t *testing.T) { Delivery: delivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{de})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) t.Run("should retry delivery successfully", func(t *testing.T) { w := httptest.NewRecorder() @@ -135,7 +135,7 @@ func TestRetryDelivery(t *testing.T) { Delivery: disabledDelivery, } - require.NoError(t, result.logStore.InsertManyDeliveryEvent(context.Background(), []*models.DeliveryEvent{disabledDE})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&disabledDE.Event}, []*models.Delivery{disabledDE.Delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+disabledDeliveryID+"/retry", nil) diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index 871800fe..ce4be80d 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -38,14 +38,13 @@ func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.De if m.current >= len(m.responses) { m.current++ return &models.Delivery{ - ID: idgen.Delivery(), - DeliveryEventID: idgen.DeliveryEvent(), - EventID: event.ID, - DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, - Code: "OK", - ResponseData: map[string]interface{}{}, - Time: time.Now(), + ID: idgen.Delivery(), + EventID: event.ID, + DestinationID: destination.ID, + Status: models.DeliveryStatusSuccess, + Code: "OK", + ResponseData: map[string]interface{}{}, + Time: time.Now(), }, nil } @@ -53,25 +52,23 @@ func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.De m.current++ if resp == nil { return &models.Delivery{ - ID: idgen.Delivery(), - DeliveryEventID: idgen.DeliveryEvent(), - EventID: event.ID, - DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, - Code: "OK", - ResponseData: map[string]interface{}{}, - Time: time.Now(), + ID: idgen.Delivery(), + EventID: event.ID, + DestinationID: destination.ID, + Status: models.DeliveryStatusSuccess, + Code: "OK", + ResponseData: map[string]interface{}{}, + Time: time.Now(), }, nil } return &models.Delivery{ - ID: idgen.Delivery(), - DeliveryEventID: idgen.DeliveryEvent(), - EventID: event.ID, - DestinationID: destination.ID, - Status: models.DeliveryStatusFailed, - Code: "ERR", - ResponseData: map[string]interface{}{}, - Time: time.Now(), + ID: idgen.Delivery(), + EventID: event.ID, + DestinationID: destination.ID, + Status: models.DeliveryStatusFailed, + Code: "ERR", + ResponseData: map[string]interface{}{}, + Time: time.Now(), }, resp } diff --git a/internal/logstore/chlogstore/chlogstore.go b/internal/logstore/chlogstore/chlogstore.go index 1bcdce8e..c55c0e29 100644 --- a/internal/logstore/chlogstore/chlogstore.go +++ b/internal/logstore/chlogstore/chlogstore.go @@ -99,9 +99,9 @@ func (s *logStoreImpl) ListEvent(ctx context.Context, req driver.ListEventReques }, nil } -func buildEventQuery(table string, req driver.ListEventRequest, q pagination.QueryInput) (string, []interface{}) { +func buildEventQuery(table string, req driver.ListEventRequest, q pagination.QueryInput) (string, []any) { var conditions []string - var args []interface{} + var args []any if req.TenantID != "" { conditions = append(conditions, "tenant_id = ?") @@ -201,7 +201,7 @@ func scanEvents(rows clickhouse.Rows) ([]eventWithPosition, error) { } var metadata map[string]string - var data map[string]interface{} + var data map[string]any if metadataStr != "" { if err := json.Unmarshal([]byte(metadataStr), &metadata); err != nil { @@ -236,7 +236,7 @@ func scanEvents(rows clickhouse.Rows) ([]eventWithPosition, error) { return results, nil } -func buildEventCursorCondition(compare, position string) (string, []interface{}) { +func buildEventCursorCondition(compare, position string) (string, []any) { parts := strings.SplitN(position, "::", 2) if len(parts) != 2 { return "1=1", nil // invalid cursor, return always true @@ -252,16 +252,16 @@ func buildEventCursorCondition(compare, position string) (string, []interface{}) OR (event_time = fromUnixTimestamp64Milli(?) AND event_id %s ?) )`, compare, compare) - return condition, []interface{}{eventTimeMs, eventTimeMs, eventID} + return condition, []any{eventTimeMs, eventTimeMs, eventID} } -// deliveryEventWithPosition wraps a delivery event with its cursor position data. -type deliveryEventWithPosition struct { - *models.DeliveryEvent +// deliveryRecordWithPosition wraps a delivery record with its cursor position data. +type deliveryRecordWithPosition struct { + *driver.DeliveryRecord deliveryTime time.Time } -func (s *logStoreImpl) ListDeliveryEvent(ctx context.Context, req driver.ListDeliveryEventRequest) (driver.ListDeliveryEventResponse, error) { +func (s *logStoreImpl) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -272,23 +272,23 @@ func (s *logStoreImpl) ListDeliveryEvent(ctx context.Context, req driver.ListDel limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryEventWithPosition]{ + res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithPosition]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryEventWithPosition, error) { + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithPosition, error) { query, args := buildDeliveryQuery(s.deliveriesTable, req, q) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryEvents(rows) + return scanDeliveryRecords(rows) }, - Cursor: pagination.Cursor[deliveryEventWithPosition]{ - Encode: func(de deliveryEventWithPosition) string { - position := fmt.Sprintf("%d::%s", de.deliveryTime.UnixMilli(), de.Delivery.ID) + Cursor: pagination.Cursor[deliveryRecordWithPosition]{ + Encode: func(dr deliveryRecordWithPosition) string { + position := fmt.Sprintf("%d::%s", dr.deliveryTime.UnixMilli(), dr.Delivery.ID) return cursor.Encode(cursorResourceDelivery, cursorVersion, position) }, Decode: func(c string) (string, error) { @@ -297,25 +297,25 @@ func (s *logStoreImpl) ListDeliveryEvent(ctx context.Context, req driver.ListDel }, }) if err != nil { - return driver.ListDeliveryEventResponse{}, err + return driver.ListDeliveryResponse{}, err } - // Extract delivery events from results - data := make([]*models.DeliveryEvent, len(res.Items)) + // Extract delivery records from results + data := make([]*driver.DeliveryRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryEvent + data[i] = item.DeliveryRecord } - return driver.ListDeliveryEventResponse{ + return driver.ListDeliveryResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(table string, req driver.ListDeliveryEventRequest, q pagination.QueryInput) (string, []interface{}) { +func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { var conditions []string - var args []interface{} + var args []any if req.TenantID != "" { conditions = append(conditions, "tenant_id = ?") @@ -373,6 +373,7 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryEventRequest, q pag orderByClause := fmt.Sprintf("ORDER BY delivery_time %s, delivery_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) + // Note: delivery_event_id is read but we write empty string for backwards compat query := fmt.Sprintf(` SELECT event_id, @@ -384,7 +385,6 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryEventRequest, q pag metadata, data, delivery_id, - delivery_event_id, status, delivery_time, code, @@ -400,8 +400,8 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryEventRequest, q pag return query, args } -func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, error) { - var results []deliveryEventWithPosition +func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, error) { + var results []deliveryRecordWithPosition for rows.Next() { var ( eventID string @@ -413,7 +413,6 @@ func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, erro metadataStr string dataStr string deliveryID string - deliveryEventID string status string deliveryTime time.Time code string @@ -432,7 +431,6 @@ func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, erro &metadataStr, &dataStr, &deliveryID, - &deliveryEventID, &status, &deliveryTime, &code, @@ -445,8 +443,8 @@ func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, erro } var metadata map[string]string - var data map[string]interface{} - var responseData map[string]interface{} + var data map[string]any + var responseData map[string]any if metadataStr != "" { if err := json.Unmarshal([]byte(metadataStr), &metadata); err != nil { @@ -464,31 +462,30 @@ func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, erro } } - results = append(results, deliveryEventWithPosition{ - DeliveryEvent: &models.DeliveryEvent{ - ID: deliveryEventID, - DestinationID: destinationID, - Manual: manual, - Attempt: int(attempt), - Event: models.Event{ - ID: eventID, - TenantID: tenantID, - DestinationID: destinationID, - Topic: topic, - EligibleForRetry: eligibleForRetry, - Time: eventTime, - Data: data, - Metadata: metadata, - }, + results = append(results, deliveryRecordWithPosition{ + DeliveryRecord: &driver.DeliveryRecord{ Delivery: &models.Delivery{ ID: deliveryID, + TenantID: tenantID, EventID: eventID, DestinationID: destinationID, + Attempt: int(attempt), + Manual: manual, Status: status, Time: deliveryTime, Code: code, ResponseData: responseData, }, + Event: &models.Event{ + ID: eventID, + TenantID: tenantID, + DestinationID: destinationID, + Topic: topic, + EligibleForRetry: eligibleForRetry, + Time: eventTime, + Data: data, + Metadata: metadata, + }, }, deliveryTime: deliveryTime, }) @@ -503,7 +500,7 @@ func scanDeliveryEvents(rows clickhouse.Rows) ([]deliveryEventWithPosition, erro func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRequest) (*models.Event, error) { var conditions []string - var args []interface{} + var args []any if req.TenantID != "" { conditions = append(conditions, "tenant_id = ?") @@ -574,9 +571,9 @@ func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEve return event, nil } -func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.RetrieveDeliveryEventRequest) (*models.DeliveryEvent, error) { +func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { var conditions []string - var args []interface{} + var args []any if req.TenantID != "" { conditions = append(conditions, "tenant_id = ?") @@ -599,7 +596,6 @@ func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.Ret metadata, data, delivery_id, - delivery_event_id, status, delivery_time, code, @@ -630,7 +626,6 @@ func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.Ret metadataStr string dataStr string deliveryID string - deliveryEventID string status string deliveryTime time.Time code string @@ -649,7 +644,6 @@ func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.Ret &metadataStr, &dataStr, &deliveryID, - &deliveryEventID, &status, &deliveryTime, &code, @@ -662,8 +656,8 @@ func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.Ret } var metadata map[string]string - var data map[string]interface{} - var responseData map[string]interface{} + var data map[string]any + var responseData map[string]any if metadataStr != "" { if err := json.Unmarshal([]byte(metadataStr), &metadata); err != nil { @@ -681,140 +675,144 @@ func (s *logStoreImpl) RetrieveDeliveryEvent(ctx context.Context, req driver.Ret } } - return &models.DeliveryEvent{ - ID: deliveryEventID, - DestinationID: destinationID, - Manual: manual, - Attempt: int(attempt), - Event: models.Event{ - ID: eventID, - TenantID: tenantID, - DestinationID: destinationID, - Topic: topic, - EligibleForRetry: eligibleForRetry, - Time: eventTime, - Data: data, - Metadata: metadata, - }, + return &driver.DeliveryRecord{ Delivery: &models.Delivery{ ID: deliveryID, + TenantID: tenantID, EventID: eventID, DestinationID: destinationID, + Attempt: int(attempt), + Manual: manual, Status: status, Time: deliveryTime, Code: code, ResponseData: responseData, }, + Event: &models.Event{ + ID: eventID, + TenantID: tenantID, + DestinationID: destinationID, + Topic: topic, + EligibleForRetry: eligibleForRetry, + Time: eventTime, + Data: data, + Metadata: metadata, + }, }, nil } -func (s *logStoreImpl) InsertManyDeliveryEvent(ctx context.Context, deliveryEvents []*models.DeliveryEvent) error { - if len(deliveryEvents) == 0 { +func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { + if len(events) == 0 && len(deliveries) == 0 { return nil } - eventBatch, err := s.chDB.PrepareBatch(ctx, - fmt.Sprintf(`INSERT INTO %s ( - event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data - )`, s.eventsTable), - ) - if err != nil { - return fmt.Errorf("prepare events batch failed: %w", err) - } - - for _, de := range deliveryEvents { - metadataJSON, err := json.Marshal(de.Event.Metadata) - if err != nil { - return fmt.Errorf("failed to marshal metadata: %w", err) - } - dataJSON, err := json.Marshal(de.Event.Data) + if len(events) > 0 { + eventBatch, err := s.chDB.PrepareBatch(ctx, + fmt.Sprintf(`INSERT INTO %s ( + event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data + )`, s.eventsTable), + ) if err != nil { - return fmt.Errorf("failed to marshal data: %w", err) + return fmt.Errorf("prepare events batch failed: %w", err) } - if err := eventBatch.Append( - de.Event.ID, - de.Event.TenantID, - de.DestinationID, - de.Event.Topic, - de.Event.EligibleForRetry, - de.Event.Time, - string(metadataJSON), - string(dataJSON), - ); err != nil { - return fmt.Errorf("events batch append failed: %w", err) - } - } + for _, e := range events { + metadataJSON, err := json.Marshal(e.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + dataJSON, err := json.Marshal(e.Data) + if err != nil { + return fmt.Errorf("failed to marshal data: %w", err) + } - if err := eventBatch.Send(); err != nil { - return fmt.Errorf("events batch send failed: %w", err) - } + if err := eventBatch.Append( + e.ID, + e.TenantID, + e.DestinationID, + e.Topic, + e.EligibleForRetry, + e.Time, + string(metadataJSON), + string(dataJSON), + ); err != nil { + return fmt.Errorf("events batch append failed: %w", err) + } + } - deliveryBatch, err := s.chDB.PrepareBatch(ctx, - fmt.Sprintf(`INSERT INTO %s ( - event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, delivery_event_id, status, delivery_time, code, response_data, manual, attempt - )`, s.deliveriesTable), - ) - if err != nil { - return fmt.Errorf("prepare deliveries batch failed: %w", err) + if err := eventBatch.Send(); err != nil { + return fmt.Errorf("events batch send failed: %w", err) + } } - for _, de := range deliveryEvents { - metadataJSON, err := json.Marshal(de.Event.Metadata) - if err != nil { - return fmt.Errorf("failed to marshal metadata: %w", err) + if len(deliveries) > 0 { + // Build a map of events for looking up event data when inserting deliveries + eventMap := make(map[string]*models.Event) + for _, e := range events { + eventMap[e.ID] = e } - dataJSON, err := json.Marshal(de.Event.Data) + + deliveryBatch, err := s.chDB.PrepareBatch(ctx, + fmt.Sprintf(`INSERT INTO %s ( + event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, + delivery_id, delivery_event_id, status, delivery_time, code, response_data, manual, attempt + )`, s.deliveriesTable), + ) if err != nil { - return fmt.Errorf("failed to marshal data: %w", err) + return fmt.Errorf("prepare deliveries batch failed: %w", err) } - var deliveryID, status, code string - var deliveryTime time.Time - var responseDataJSON []byte + for _, d := range deliveries { + event := eventMap[d.EventID] + if event == nil { + // If event not in current batch, use delivery's data as fallback + event = &models.Event{ + ID: d.EventID, + TenantID: d.TenantID, + DestinationID: d.DestinationID, + } + } - if de.Delivery != nil { - deliveryID = de.Delivery.ID - status = de.Delivery.Status - deliveryTime = de.Delivery.Time - code = de.Delivery.Code - responseDataJSON, err = json.Marshal(de.Delivery.ResponseData) + metadataJSON, err := json.Marshal(event.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + dataJSON, err := json.Marshal(event.Data) + if err != nil { + return fmt.Errorf("failed to marshal data: %w", err) + } + responseDataJSON, err := json.Marshal(d.ResponseData) if err != nil { return fmt.Errorf("failed to marshal response_data: %w", err) } - } else { - deliveryID = de.ID - status = "pending" - deliveryTime = de.Event.Time - code = "" - responseDataJSON = []byte("{}") - } - if err := deliveryBatch.Append( - de.Event.ID, - de.Event.TenantID, - de.DestinationID, - de.Event.Topic, - de.Event.EligibleForRetry, - de.Event.Time, - string(metadataJSON), - string(dataJSON), - deliveryID, - de.ID, - status, - deliveryTime, - code, - string(responseDataJSON), - de.Manual, - uint32(de.Attempt), - ); err != nil { - return fmt.Errorf("deliveries batch append failed: %w", err) + // Write empty string to delivery_event_id for backwards compat (column still exists) + // Use event.TenantID for tenant_id since it's denormalized from the event + if err := deliveryBatch.Append( + d.EventID, + event.TenantID, // Use event's TenantID, not delivery's + d.DestinationID, + event.Topic, + event.EligibleForRetry, + event.Time, + string(metadataJSON), + string(dataJSON), + d.ID, + "", // delivery_event_id - empty for backwards compat + d.Status, + d.Time, + d.Code, + string(responseDataJSON), + d.Manual, + uint32(d.Attempt), + ); err != nil { + return fmt.Errorf("deliveries batch append failed: %w", err) + } } - } - if err := deliveryBatch.Send(); err != nil { - return fmt.Errorf("deliveries batch send failed: %w", err) + if err := deliveryBatch.Send(); err != nil { + return fmt.Errorf("deliveries batch send failed: %w", err) + } } return nil @@ -824,7 +822,7 @@ func parseTimestampMs(s string) (int64, error) { return strconv.ParseInt(s, 10, 64) } -func buildDeliveryCursorCondition(compare, position string) (string, []interface{}) { +func buildDeliveryCursorCondition(compare, position string) (string, []any) { parts := strings.SplitN(position, "::", 2) if len(parts) != 2 { return "1=1", nil @@ -840,5 +838,5 @@ func buildDeliveryCursorCondition(compare, position string) (string, []interface OR (delivery_time = fromUnixTimestamp64Milli(?) AND delivery_id %s ?) )`, compare, compare) - return condition, []interface{}{deliveryTimeMs, deliveryTimeMs, deliveryID} + return condition, []any{deliveryTimeMs, deliveryTimeMs, deliveryID} } diff --git a/internal/logstore/driver/driver.go b/internal/logstore/driver/driver.go index fe7cf872..33b9be8b 100644 --- a/internal/logstore/driver/driver.go +++ b/internal/logstore/driver/driver.go @@ -18,10 +18,10 @@ type TimeFilter struct { type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDeliveryEvent(context.Context, ListDeliveryEventRequest) (ListDeliveryEventResponse, error) + ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDeliveryEvent(ctx context.Context, request RetrieveDeliveryEventRequest) (*models.DeliveryEvent, error) - InsertManyDeliveryEvent(context.Context, []*models.DeliveryEvent) error + RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + InsertMany(context.Context, []*models.Event, []*models.Delivery) error } type ListEventRequest struct { @@ -41,7 +41,7 @@ type ListEventResponse struct { Prev string } -type ListDeliveryEventRequest struct { +type ListDeliveryRequest struct { Next string Prev string Limit int @@ -54,8 +54,8 @@ type ListDeliveryEventRequest struct { SortOrder string // optional: "asc", "desc" (default: "desc") } -type ListDeliveryEventResponse struct { - Data []*models.DeliveryEvent +type ListDeliveryResponse struct { + Data []*DeliveryRecord Next string Prev string } @@ -66,7 +66,13 @@ type RetrieveEventRequest struct { DestinationID string // optional - if provided, scopes to that destination } -type RetrieveDeliveryEventRequest struct { +type RetrieveDeliveryRequest struct { TenantID string // optional - filter by tenant (if empty, searches all tenants) DeliveryID string // required } + +// DeliveryRecord represents a delivery query result with optional Event population. +type DeliveryRecord struct { + Delivery *models.Delivery + Event *models.Event // optionally populated for query results +} diff --git a/internal/logstore/drivertest/crud.go b/internal/logstore/drivertest/crud.go index 45083c55..071656ec 100644 --- a/internal/logstore/drivertest/crud.go +++ b/internal/logstore/drivertest/crud.go @@ -33,13 +33,13 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { startTime := baseTime.Add(-48 * time.Hour) // We'll populate these as we insert - var allDeliveryEvents []*models.DeliveryEvent + var allDeliveries []*models.Delivery destinationEvents := map[string][]*models.Event{} topicEvents := map[string][]*models.Event{} - statusDeliveryEvents := map[string][]*models.DeliveryEvent{} + statusDeliveries := map[string][]*models.Delivery{} t.Run("insert and verify", func(t *testing.T) { - t.Run("single delivery event", func(t *testing.T) { + t.Run("single delivery", func(t *testing.T) { destID := destinationIDs[0] topic := testutil.TestTopics[0] event := testutil.EventFactory.AnyPointer( @@ -51,29 +51,24 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { ) delivery := testutil.DeliveryFactory.AnyPointer( testutil.DeliveryFactory.WithID("single_del"), + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destID), testutil.DeliveryFactory.WithStatus("success"), testutil.DeliveryFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - de := &models.DeliveryEvent{ - ID: "single_de", - DestinationID: destID, - Event: *event, - Delivery: delivery, - } - err := logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{de}) + err := logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery}) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) // Track in maps for later filter tests destinationEvents[destID] = append(destinationEvents[destID], event) topicEvents[topic] = append(topicEvents[topic], event) - statusDeliveryEvents["success"] = append(statusDeliveryEvents["success"], de) + statusDeliveries["success"] = append(statusDeliveries["success"], delivery) // Verify via List - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, EventID: event.ID, Limit: 10, @@ -94,8 +89,11 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Equal(t, event.ID, retrieved.ID) }) - t.Run("batch delivery events", func(t *testing.T) { + t.Run("batch deliveries", func(t *testing.T) { // Create 15 events spread across destinations and topics for filter testing + var events []*models.Event + var deliveries []*models.Delivery + for i := range 15 { destID := destinationIDs[i%len(destinationIDs)] topic := testutil.TestTopics[i%len(testutil.TestTopics)] @@ -114,30 +112,27 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { ) delivery := testutil.DeliveryFactory.AnyPointer( testutil.DeliveryFactory.WithID(fmt.Sprintf("batch_del_%02d", i)), + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destID), testutil.DeliveryFactory.WithStatus(status), testutil.DeliveryFactory.WithTime(eventTime.Add(time.Millisecond)), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("batch_de_%02d", i), - DestinationID: destID, - Event: *event, - Delivery: delivery, - } - allDeliveryEvents = append(allDeliveryEvents, de) + events = append(events, event) + deliveries = append(deliveries, delivery) + allDeliveries = append(allDeliveries, delivery) destinationEvents[destID] = append(destinationEvents[destID], event) topicEvents[topic] = append(topicEvents[topic], event) - statusDeliveryEvents[status] = append(statusDeliveryEvents[status], de) + statusDeliveries[status] = append(statusDeliveries[status], delivery) } - err := logStore.InsertManyDeliveryEvent(ctx, allDeliveryEvents) + err := logStore.InsertMany(ctx, events, deliveries) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) // Verify all inserted - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -148,7 +143,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) t.Run("empty batch is no-op", func(t *testing.T) { - err := logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{}) + err := logStore.InsertMany(ctx, []*models.Event{}, []*models.Delivery{}) require.NoError(t, err) }) }) @@ -210,50 +205,50 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { } }) - t.Run("ListDeliveryEvent by destination", func(t *testing.T) { + t.Run("ListDelivery by destination", func(t *testing.T) { destID := destinationIDs[0] - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, DestinationIDs: []string{destID}, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, }) require.NoError(t, err) - for _, de := range response.Data { - assert.Equal(t, destID, de.DestinationID) + for _, dr := range response.Data { + assert.Equal(t, destID, dr.Delivery.DestinationID) } }) - t.Run("ListDeliveryEvent by status", func(t *testing.T) { - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + t.Run("ListDelivery by status", func(t *testing.T) { + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Status: "success", Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, }) require.NoError(t, err) - for _, de := range response.Data { - assert.Equal(t, "success", de.Delivery.Status) + for _, dr := range response.Data { + assert.Equal(t, "success", dr.Delivery.Status) } }) - t.Run("ListDeliveryEvent by topic", func(t *testing.T) { + t.Run("ListDelivery by topic", func(t *testing.T) { topic := testutil.TestTopics[0] - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Topics: []string{topic}, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, }) require.NoError(t, err) - for _, de := range response.Data { - assert.Equal(t, topic, de.Event.Topic) + for _, dr := range response.Data { + assert.Equal(t, topic, dr.Event.Topic) } }) - t.Run("ListDeliveryEvent by event ID", func(t *testing.T) { + t.Run("ListDelivery by event ID", func(t *testing.T) { eventID := "batch_evt_00" - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, EventID: eventID, Limit: 100, @@ -310,8 +305,8 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Nil(t, retrieved) }) - t.Run("RetrieveDeliveryEvent existing", func(t *testing.T) { - retrieved, err := logStore.RetrieveDeliveryEvent(ctx, driver.RetrieveDeliveryEventRequest{ + t.Run("RetrieveDelivery existing", func(t *testing.T) { + retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ TenantID: tenantID, DeliveryID: knownDeliveryID, }) @@ -320,8 +315,8 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Equal(t, knownDeliveryID, retrieved.Delivery.ID) }) - t.Run("RetrieveDeliveryEvent non-existent returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDeliveryEvent(ctx, driver.RetrieveDeliveryEventRequest{ + t.Run("RetrieveDelivery non-existent returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ TenantID: tenantID, DeliveryID: "non-existent-delivery", }) @@ -329,8 +324,8 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Nil(t, retrieved) }) - t.Run("RetrieveDeliveryEvent wrong tenant returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDeliveryEvent(ctx, driver.RetrieveDeliveryEventRequest{ + t.Run("RetrieveDelivery wrong tenant returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ TenantID: "wrong-tenant", DeliveryID: knownDeliveryID, }) diff --git a/internal/logstore/drivertest/misc.go b/internal/logstore/drivertest/misc.go index dc594ef6..09bcf496 100644 --- a/internal/logstore/drivertest/misc.go +++ b/internal/logstore/drivertest/misc.go @@ -77,15 +77,12 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithTime(baseTime.Add(-5*time.Minute)), ) - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{ - {ID: idgen.DeliveryEvent(), DestinationID: destinationID, Event: *event1, Delivery: delivery1}, - {ID: idgen.DeliveryEvent(), DestinationID: destinationID, Event: *event2, Delivery: delivery2}, - })) + require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event1, event2}, []*models.Delivery{delivery1, delivery2})) require.NoError(t, h.FlushWrites(ctx)) t.Run("TenantIsolation", func(t *testing.T) { - t.Run("ListDeliveryEvent isolates by tenant", func(t *testing.T) { - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + t.Run("ListDelivery isolates by tenant", func(t *testing.T) { + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenant1ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -94,7 +91,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, require.Len(t, response.Data, 1) assert.Equal(t, "tenant1-event", response.Data[0].Event.ID) - response, err = logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err = logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenant2ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -140,8 +137,8 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.True(t, tenantsSeen[tenant2ID]) }) - t.Run("ListDeliveryEvent returns all tenants when TenantID empty", func(t *testing.T) { - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + t.Run("ListDelivery returns all tenants when TenantID empty", func(t *testing.T) { + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: "", DestinationIDs: []string{destinationID}, Limit: 100, @@ -169,8 +166,8 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.Equal(t, tenant2ID, retrieved2.TenantID) }) - t.Run("RetrieveDeliveryEvent finds delivery across tenants when TenantID empty", func(t *testing.T) { - retrieved1, err := logStore.RetrieveDeliveryEvent(ctx, driver.RetrieveDeliveryEventRequest{ + t.Run("RetrieveDelivery finds delivery across tenants when TenantID empty", func(t *testing.T) { + retrieved1, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ TenantID: "", DeliveryID: "tenant1-delivery", }) @@ -178,7 +175,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, require.NotNil(t, retrieved1) assert.Equal(t, tenant1ID, retrieved1.Event.TenantID) - retrieved2, err := logStore.RetrieveDeliveryEvent(ctx, driver.RetrieveDeliveryEventRequest{ + retrieved2, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ TenantID: "", DeliveryID: "tenant2-delivery", }) @@ -195,7 +192,8 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, destinationID := idgen.Destination() baseTime := time.Now().Truncate(time.Second) - var deliveryEvents []*models.DeliveryEvent + var events []*models.Event + var deliveries []*models.Delivery for i := range 3 { event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(fmt.Sprintf("sort_evt_%d", i)), @@ -205,23 +203,20 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, ) delivery := testutil.DeliveryFactory.AnyPointer( testutil.DeliveryFactory.WithID(fmt.Sprintf("sort_del_%d", i)), + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - deliveryEvents = append(deliveryEvents, &models.DeliveryEvent{ - ID: fmt.Sprintf("sort_de_%d", i), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - }) + events = append(events, event) + deliveries = append(deliveries, delivery) } - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, deliveryEvents)) + require.NoError(t, logStore.InsertMany(ctx, events, deliveries)) startTime := baseTime.Add(-48 * time.Hour) t.Run("invalid SortOrder uses default (desc)", func(t *testing.T) { - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "sideways", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -245,15 +240,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTopic("test.topic"), ) delivery := testutil.DeliveryFactory.AnyPointer( + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{ - {ID: idgen.DeliveryEvent(), DestinationID: destinationID, Event: *event, Delivery: delivery}, - })) + require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) t.Run("nil DestinationIDs equals empty DestinationIDs", func(t *testing.T) { - responseNil, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + responseNil, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, DestinationIDs: nil, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -261,7 +255,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, }) require.NoError(t, err) - responseEmpty, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + responseEmpty, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, DestinationIDs: []string{}, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -303,17 +297,16 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, for _, evt := range []*models.Event{eventBefore, eventAt, eventAfter} { delivery := testutil.DeliveryFactory.AnyPointer( testutil.DeliveryFactory.WithID(fmt.Sprintf("del_%s", evt.ID)), + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(evt.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(evt.Time), ) - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{ - {ID: idgen.DeliveryEvent(), DestinationID: destinationID, Event: *evt, Delivery: delivery}, - })) + require.NoError(t, logStore.InsertMany(ctx, []*models.Event{evt}, []*models.Delivery{delivery})) } t.Run("GTE is inclusive (>=)", func(t *testing.T) { - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &boundaryTime}, Limit: 10, @@ -324,7 +317,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, t.Run("LTE is inclusive (<=)", func(t *testing.T) { farPast := boundaryTime.Add(-1 * time.Hour) - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &farPast, LTE: &boundaryTime}, Limit: 10, @@ -344,15 +337,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), ) delivery := testutil.DeliveryFactory.AnyPointer( + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{ - {ID: idgen.DeliveryEvent(), DestinationID: destinationID, Event: *event, Delivery: delivery}, - })) + require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) - t.Run("modifying ListDeliveryEvent result doesn't affect subsequent queries", func(t *testing.T) { - response1, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + t.Run("modifying ListDelivery result doesn't affect subsequent queries", func(t *testing.T) { + response1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -363,7 +355,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, originalID := response1.Data[0].Event.ID response1.Data[0].Event.ID = "MODIFIED" - response2, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -387,18 +379,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTime(eventTime), ) delivery := testutil.DeliveryFactory.AnyPointer( + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithStatus("success"), testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - batch := []*models.DeliveryEvent{de} + eventBatch := []*models.Event{event} + deliveryBatch := []*models.Delivery{delivery} // Race N goroutines all inserting the same record const numGoroutines = 10 @@ -407,14 +395,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, wg.Add(1) go func() { defer wg.Done() - _ = logStore.InsertManyDeliveryEvent(ctx, batch) + _ = logStore.InsertMany(ctx, eventBatch, deliveryBatch) }() } wg.Wait() require.NoError(t, h.FlushWrites(ctx)) // Assert: still exactly 1 record - response, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -439,7 +427,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + _, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "desc", Next: tc.cursor, @@ -467,18 +455,17 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log ) delivery := testutil.DeliveryFactory.AnyPointer( testutil.DeliveryFactory.WithID(fmt.Sprintf("cursor_del_%d", i)), + testutil.DeliveryFactory.WithTenantID(tenantID), testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, []*models.DeliveryEvent{ - {ID: fmt.Sprintf("cursor_de_%d", i), DestinationID: destinationID, Event: *event, Delivery: delivery}, - })) + require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) } require.NoError(t, h.FlushWrites(ctx)) t.Run("delivery_time desc", func(t *testing.T) { - page1, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "desc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -487,7 +474,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "desc", Next: page1.Next, @@ -499,7 +486,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log }) t.Run("delivery_time asc", func(t *testing.T) { - page1, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "asc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -508,7 +495,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, SortOrder: "asc", Next: page1.Next, diff --git a/internal/logstore/drivertest/pagination.go b/internal/logstore/drivertest/pagination.go index 6a48877c..695dca74 100644 --- a/internal/logstore/drivertest/pagination.go +++ b/internal/logstore/drivertest/pagination.go @@ -28,11 +28,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { baseTime := time.Now().Truncate(time.Second) farPast := baseTime.Add(-48 * time.Hour) - t.Run("ListDeliveryEvent", func(t *testing.T) { + t.Run("ListDelivery", func(t *testing.T) { var tenantID, destinationID, idPrefix string - suite := paginationtest.Suite[*models.DeliveryEvent]{ - Name: "ListDeliveryEvent", + suite := paginationtest.Suite[*driver.DeliveryRecord]{ + Name: "ListDelivery", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -41,7 +41,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *models.DeliveryEvent { + NewItem: func(i int) *driver.DeliveryRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -58,6 +58,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { delivery := &models.Delivery{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", @@ -65,20 +66,24 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Code: "200", } - return &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_de_%03d", idPrefix, i), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, + return &driver.DeliveryRecord{ + Event: event, + Delivery: delivery, } }, - InsertMany: func(ctx context.Context, items []*models.DeliveryEvent) error { - return logStore.InsertManyDeliveryEvent(ctx, items) + InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + events := make([]*models.Event, len(items)) + deliveries := make([]*models.Delivery, len(items)) + for i, dr := range items { + events[i] = dr.Event + deliveries[i] = dr.Delivery + } + return logStore.InsertMany(ctx, events, deliveries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.DeliveryEvent], error) { - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: opts.Limit, SortOrder: opts.Order, @@ -87,17 +92,17 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*models.DeliveryEvent]{}, err + return paginationtest.ListResult[*driver.DeliveryRecord]{}, err } - return paginationtest.ListResult[*models.DeliveryEvent]{ + return paginationtest.ListResult[*driver.DeliveryRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(de *models.DeliveryEvent) string { - return de.Delivery.ID + GetID: func(dr *driver.DeliveryRecord) string { + return dr.Delivery.ID }, AfterInsert: func(ctx context.Context) error { @@ -108,11 +113,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { suite.Run(t) }) - t.Run("ListDeliveryEvent_WithDestinationFilter", func(t *testing.T) { + t.Run("ListDelivery_WithDestinationFilter", func(t *testing.T) { var tenantID, targetDestID, otherDestID, idPrefix string - suite := paginationtest.Suite[*models.DeliveryEvent]{ - Name: "ListDeliveryEvent_WithDestinationFilter", + suite := paginationtest.Suite[*driver.DeliveryRecord]{ + Name: "ListDelivery_WithDestinationFilter", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -122,7 +127,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *models.DeliveryEvent { + NewItem: func(i int) *driver.DeliveryRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -144,6 +149,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { delivery := &models.Delivery{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: tenantID, EventID: event.ID, DestinationID: destID, Status: "success", @@ -151,20 +157,24 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Code: "200", } - return &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_de_%03d", idPrefix, i), - DestinationID: destID, - Event: *event, - Delivery: delivery, + return &driver.DeliveryRecord{ + Event: event, + Delivery: delivery, } }, - InsertMany: func(ctx context.Context, items []*models.DeliveryEvent) error { - return logStore.InsertManyDeliveryEvent(ctx, items) + InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + events := make([]*models.Event, len(items)) + deliveries := make([]*models.Delivery, len(items)) + for i, dr := range items { + events[i] = dr.Event + deliveries[i] = dr.Delivery + } + return logStore.InsertMany(ctx, events, deliveries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.DeliveryEvent], error) { - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, DestinationIDs: []string{targetDestID}, Limit: opts.Limit, @@ -174,21 +184,21 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*models.DeliveryEvent]{}, err + return paginationtest.ListResult[*driver.DeliveryRecord]{}, err } - return paginationtest.ListResult[*models.DeliveryEvent]{ + return paginationtest.ListResult[*driver.DeliveryRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(de *models.DeliveryEvent) string { - return de.Delivery.ID + GetID: func(dr *driver.DeliveryRecord) string { + return dr.Delivery.ID }, - Matches: func(de *models.DeliveryEvent) bool { - return de.DestinationID == targetDestID + Matches: func(dr *driver.DeliveryRecord) bool { + return dr.Delivery.DestinationID == targetDestID }, AfterInsert: func(ctx context.Context) error { @@ -228,24 +238,20 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*models.Event) error { - des := make([]*models.DeliveryEvent, len(items)) + deliveries := make([]*models.Delivery, len(items)) for i, evt := range items { deliveryTime := evt.Time.Add(100 * time.Millisecond) - des[i] = &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_de_%03d", idPrefix, i), + deliveries[i] = &models.Delivery{ + ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: evt.TenantID, + EventID: evt.ID, DestinationID: evt.DestinationID, - Event: *evt, - Delivery: &models.Delivery{ - ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), - EventID: evt.ID, - DestinationID: evt.DestinationID, - Status: "success", - Time: deliveryTime, - Code: "200", - }, + Status: "success", + Time: deliveryTime, + Code: "200", } } - return logStore.InsertManyDeliveryEvent(ctx, des) + return logStore.InsertMany(ctx, items, deliveries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.Event], error) { @@ -314,24 +320,20 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*models.Event) error { - des := make([]*models.DeliveryEvent, len(items)) + deliveries := make([]*models.Delivery, len(items)) for i, evt := range items { deliveryTime := evt.Time.Add(100 * time.Millisecond) - des[i] = &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_de_%03d", idPrefix, i), + deliveries[i] = &models.Delivery{ + ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: evt.TenantID, + EventID: evt.ID, DestinationID: evt.DestinationID, - Event: *evt, - Delivery: &models.Delivery{ - ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), - EventID: evt.ID, - DestinationID: evt.DestinationID, - Status: "success", - Time: deliveryTime, - Code: "200", - }, + Status: "success", + Time: deliveryTime, + Code: "200", } } - return logStore.InsertManyDeliveryEvent(ctx, des) + return logStore.InsertMany(ctx, items, deliveries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.Event], error) { @@ -375,7 +377,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // time-based filters (GTE, LTE, GT, LT), which is critical for // "paginate within a time window" use cases. // - // IMPORTANT: ListDeliveryEvent filters by DELIVERY time, ListEvent filters by EVENT time. + // IMPORTANT: ListDelivery filters by DELIVERY time, ListEvent filters by EVENT time. // In this test, delivery_time = event_time + 100ms. t.Run("TimeFilterWithCursor", func(t *testing.T) { tenantID := idgen.String() @@ -396,7 +398,9 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { deliveryWindowStart := eventWindowStart.Add(time.Second) deliveryWindowEnd := eventWindowEnd.Add(time.Second) - var allEvents []*models.DeliveryEvent + var allRecords []*driver.DeliveryRecord + var allEvents []*models.Event + var allDeliveries []*models.Delivery for i := range 20 { var eventTime time.Time switch { @@ -426,33 +430,34 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { } delivery := &models.Delivery{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", Time: deliveryTime, Code: "200", } - allEvents = append(allEvents, &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_de_%03d", idPrefix, i), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, + allRecords = append(allRecords, &driver.DeliveryRecord{ + Event: event, + Delivery: delivery, }) + allEvents = append(allEvents, event) + allDeliveries = append(allDeliveries, delivery) } - require.NoError(t, logStore.InsertManyDeliveryEvent(ctx, allEvents)) + require.NoError(t, logStore.InsertMany(ctx, allEvents, allDeliveries)) require.NoError(t, h.FlushWrites(ctx)) t.Run("paginate within time-bounded window", func(t *testing.T) { // Paginate through deliveries within the window with limit=3 - // ListDeliveryEvent filters by DELIVERY time, not event time. + // ListDelivery filters by DELIVERY time, not event time. // Should only see deliveries 5-14 (10 total), not 0-4 or 15-19 var collectedIDs []string var nextCursor string pageCount := 0 for { - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -461,8 +466,8 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) - for _, de := range res.Data { - collectedIDs = append(collectedIDs, de.Event.ID) + for _, dr := range res.Data { + collectedIDs = append(collectedIDs, dr.Event.ID) } pageCount++ @@ -488,7 +493,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { t.Run("cursor excludes deliveries outside time filter", func(t *testing.T) { // First page with no time filter gets all deliveries - resAll, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + resAll, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", @@ -500,7 +505,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // Use the cursor but add a time filter that excludes some results // The cursor points to position after delivery 4 (far past deliveries) // But with deliveryWindowStart filter, we should start from delivery 5 - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", @@ -510,23 +515,23 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { require.NoError(t, err) // Results should respect the time filter (on delivery time) - for _, de := range res.Data { - require.True(t, !de.Delivery.Time.Before(deliveryWindowStart), "delivery time should be >= deliveryWindowStart") - require.True(t, !de.Delivery.Time.After(deliveryWindowEnd), "delivery time should be <= deliveryWindowEnd") + for _, dr := range res.Data { + require.True(t, !dr.Delivery.Time.Before(deliveryWindowStart), "delivery time should be >= deliveryWindowStart") + require.True(t, !dr.Delivery.Time.After(deliveryWindowEnd), "delivery time should be <= deliveryWindowEnd") } }) t.Run("delivery time filter with GT/LT operators", func(t *testing.T) { // Test exclusive bounds (GT/LT instead of GTE/LTE) on delivery time // Use delivery times slightly after delivery 5 and slightly before delivery 14 - gtTime := allEvents[5].Delivery.Time.Add(time.Second) // After delivery 5, before delivery 6 - ltTime := allEvents[14].Delivery.Time.Add(-time.Second) // Before delivery 14, after delivery 13 + gtTime := allRecords[5].Delivery.Time.Add(time.Second) // After delivery 5, before delivery 6 + ltTime := allRecords[14].Delivery.Time.Add(-time.Second) // Before delivery 14, after delivery 13 var collectedIDs []string var nextCursor string for { - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -535,8 +540,8 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) - for _, de := range res.Data { - collectedIDs = append(collectedIDs, de.Event.ID) + for _, dr := range res.Data { + collectedIDs = append(collectedIDs, dr.Event.ID) } if res.Next == "" { @@ -561,10 +566,10 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // comparison across databases with different timestamp precision // (PostgreSQL microseconds, ClickHouse DateTime64, etc.). // - // Important: ListDeliveryEvent filters by DELIVERY time, not event time. + // Important: ListDelivery filters by DELIVERY time, not event time. // First, retrieve all deliveries to find delivery 10's time - res, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -577,16 +582,16 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // Find delivery 10's stored delivery time, truncated to seconds var storedDelivery10Time time.Time - for _, de := range res.Data { - if de.Event.ID == allEvents[10].Event.ID { - storedDelivery10Time = de.Delivery.Time.Truncate(time.Second) + for _, dr := range res.Data { + if dr.Event.ID == allRecords[10].Event.ID { + storedDelivery10Time = dr.Delivery.Time.Truncate(time.Second) break } } require.False(t, storedDelivery10Time.IsZero(), "should find delivery 10") // GT with exact time should exclude delivery 10 - resGT, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + resGT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -594,15 +599,15 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) - for _, de := range resGT.Data { - deTimeTrunc := de.Delivery.Time.Truncate(time.Second) - require.True(t, deTimeTrunc.After(storedDelivery10Time), + for _, dr := range resGT.Data { + drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.After(storedDelivery10Time), "GT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - de.Delivery.ID, deTimeTrunc, storedDelivery10Time) + dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) } // LT with exact time should exclude delivery 10 - resLT, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + resLT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -610,15 +615,15 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) - for _, de := range resLT.Data { - deTimeTrunc := de.Delivery.Time.Truncate(time.Second) - require.True(t, deTimeTrunc.Before(storedDelivery10Time), + for _, dr := range resLT.Data { + drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.Before(storedDelivery10Time), "LT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - de.Delivery.ID, deTimeTrunc, storedDelivery10Time) + dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) } // Verify delivery 10 is included with GTE/LTE (inclusive bounds) - resGTE, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + resGTE, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -629,8 +634,8 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }) t.Run("prev cursor respects time filter", func(t *testing.T) { - // Get first page (ListDeliveryEvent filters by delivery time) - res1, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + // Get first page (ListDelivery filters by delivery time) + res1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -640,7 +645,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { require.NotEmpty(t, res1.Next) // Get second page - res2, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + res2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -651,7 +656,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { require.NotEmpty(t, res2.Prev) // Go back to first page using prev cursor - resPrev, err := logStore.ListDeliveryEvent(ctx, driver.ListDeliveryEventRequest{ + resPrev, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -731,7 +736,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // Find event 10's stored event time, truncated to seconds var storedEvent10Time time.Time for _, e := range res.Data { - if e.ID == allEvents[10].Event.ID { + if e.ID == allRecords[10].Event.ID { storedEvent10Time = e.Time.Truncate(time.Second) break } diff --git a/internal/logstore/logstore.go b/internal/logstore/logstore.go index 84a45314..6990cb31 100644 --- a/internal/logstore/logstore.go +++ b/internal/logstore/logstore.go @@ -16,17 +16,18 @@ import ( type TimeFilter = driver.TimeFilter type ListEventRequest = driver.ListEventRequest type ListEventResponse = driver.ListEventResponse -type ListDeliveryEventRequest = driver.ListDeliveryEventRequest -type ListDeliveryEventResponse = driver.ListDeliveryEventResponse +type ListDeliveryRequest = driver.ListDeliveryRequest +type ListDeliveryResponse = driver.ListDeliveryResponse type RetrieveEventRequest = driver.RetrieveEventRequest -type RetrieveDeliveryEventRequest = driver.RetrieveDeliveryEventRequest +type RetrieveDeliveryRequest = driver.RetrieveDeliveryRequest +type DeliveryRecord = driver.DeliveryRecord type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDeliveryEvent(context.Context, ListDeliveryEventRequest) (ListDeliveryEventResponse, error) + ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDeliveryEvent(ctx context.Context, request RetrieveDeliveryEventRequest) (*models.DeliveryEvent, error) - InsertManyDeliveryEvent(context.Context, []*models.DeliveryEvent) error + RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + InsertMany(context.Context, []*models.Event, []*models.Delivery) error } type DriverOpts struct { diff --git a/internal/logstore/memlogstore/memlogstore.go b/internal/logstore/memlogstore/memlogstore.go index fc6c8014..11d88a98 100644 --- a/internal/logstore/memlogstore/memlogstore.go +++ b/internal/logstore/memlogstore/memlogstore.go @@ -22,15 +22,17 @@ const ( // memLogStore is an in-memory implementation of driver.LogStore. // It serves as a reference implementation and is useful for testing. type memLogStore struct { - mu sync.RWMutex - deliveryEvents []*models.DeliveryEvent + mu sync.RWMutex + events map[string]*models.Event // keyed by event ID + deliveries []*models.Delivery // list of all deliveries } var _ driver.LogStore = (*memLogStore)(nil) func NewLogStore() driver.LogStore { return &memLogStore{ - deliveryEvents: make([]*models.DeliveryEvent, 0), + events: make(map[string]*models.Event), + deliveries: make([]*models.Delivery, 0), } } @@ -48,20 +50,13 @@ func (s *memLogStore) ListEvent(ctx context.Context, req driver.ListEventRequest limit = 100 } - // Dedupe by event ID and filter - eventMap := make(map[string]*models.Event) - for _, de := range s.deliveryEvents { - if !s.matchesEventFilter(&de.Event, req) { + // Filter events + var allEvents []*models.Event + for _, event := range s.events { + if !s.matchesEventFilter(event, req) { continue } - if _, exists := eventMap[de.Event.ID]; !exists { - eventMap[de.Event.ID] = copyEvent(&de.Event) - } - } - - var allEvents []*models.Event - for _, event := range eventMap { - allEvents = append(allEvents, event) + allEvents = append(allEvents, copyEvent(event)) } // eventWithTimeID pairs an event with its sortable time ID for cursor operations. @@ -191,38 +186,36 @@ func (s *memLogStore) matchesEventFilter(event *models.Event, req driver.ListEve return true } -func (s *memLogStore) InsertManyDeliveryEvent(ctx context.Context, deliveryEvents []*models.DeliveryEvent) error { +func (s *memLogStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { s.mu.Lock() defer s.mu.Unlock() - for _, de := range deliveryEvents { - copied := &models.DeliveryEvent{ - ID: de.ID, - Attempt: de.Attempt, - DestinationID: de.DestinationID, - Event: de.Event, - Delivery: de.Delivery, - Manual: de.Manual, - } + // Insert events (dedupe by ID) + for _, event := range events { + s.events[event.ID] = copyEvent(event) + } + + // Insert deliveries (idempotent upsert: match on event_id + delivery_id) + for _, d := range deliveries { + copied := copyDelivery(d) - // Idempotent upsert: match on event_id + delivery_id found := false - for i, existing := range s.deliveryEvents { - if existing.Event.ID == de.Event.ID && existing.Delivery != nil && de.Delivery != nil && existing.Delivery.ID == de.Delivery.ID { - s.deliveryEvents[i] = copied + for i, existing := range s.deliveries { + if existing.EventID == d.EventID && existing.ID == d.ID { + s.deliveries[i] = copied found = true break } } if !found { - s.deliveryEvents = append(s.deliveryEvents, copied) + s.deliveries = append(s.deliveries, copied) } } return nil } -func (s *memLogStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeliveryEventRequest) (driver.ListDeliveryEventResponse, error) { +func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -236,52 +229,59 @@ func (s *memLogStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeli limit = 100 } - // Filter delivery events - var allDeliveryEvents []*models.DeliveryEvent - for _, de := range s.deliveryEvents { - if !s.matchesFilter(de, req) { + // Filter deliveries and build records with events + var allRecords []*driver.DeliveryRecord + for _, d := range s.deliveries { + event := s.events[d.EventID] + if event == nil { + continue // skip orphan deliveries + } + if !s.matchesDeliveryFilter(d, event, req) { continue } - allDeliveryEvents = append(allDeliveryEvents, de) + allRecords = append(allRecords, &driver.DeliveryRecord{ + Delivery: copyDelivery(d), + Event: copyEvent(event), + }) } - // deliveryEventWithTimeID pairs a delivery event with its sortable time ID. - type deliveryEventWithTimeID struct { - de *models.DeliveryEvent + // deliveryRecordWithTimeID pairs a delivery record with its sortable time ID. + type deliveryRecordWithTimeID struct { + record *driver.DeliveryRecord timeID string } // Build list with time IDs (using delivery time) - deliveryEventsWithTimeID := make([]deliveryEventWithTimeID, len(allDeliveryEvents)) - for i, de := range allDeliveryEvents { - deliveryEventsWithTimeID[i] = deliveryEventWithTimeID{ - de: de, - timeID: makeTimeID(de.Delivery.Time, de.Delivery.ID), + recordsWithTimeID := make([]deliveryRecordWithTimeID, len(allRecords)) + for i, r := range allRecords { + recordsWithTimeID[i] = deliveryRecordWithTimeID{ + record: r, + timeID: makeTimeID(r.Delivery.Time, r.Delivery.ID), } } - res, err := pagination.Run(ctx, pagination.Config[deliveryEventWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(_ context.Context, q pagination.QueryInput) ([]deliveryEventWithTimeID, error) { + Fetch: func(_ context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { // Sort based on query direction isDesc := q.SortDir == "desc" - sort.Slice(deliveryEventsWithTimeID, func(i, j int) bool { + sort.Slice(recordsWithTimeID, func(i, j int) bool { if isDesc { - return deliveryEventsWithTimeID[i].timeID > deliveryEventsWithTimeID[j].timeID + return recordsWithTimeID[i].timeID > recordsWithTimeID[j].timeID } - return deliveryEventsWithTimeID[i].timeID < deliveryEventsWithTimeID[j].timeID + return recordsWithTimeID[i].timeID < recordsWithTimeID[j].timeID }) // Filter using q.Compare (like SQL WHERE clause) - var filtered []deliveryEventWithTimeID - for _, de := range deliveryEventsWithTimeID { + var filtered []deliveryRecordWithTimeID + for _, r := range recordsWithTimeID { // If no cursor, include all items // If cursor exists, filter using Compare operator - if q.CursorPos == "" || compareTimeID(de.timeID, q.Compare, q.CursorPos) { - filtered = append(filtered, de) + if q.CursorPos == "" || compareTimeID(r.timeID, q.Compare, q.CursorPos) { + filtered = append(filtered, r) } } @@ -290,18 +290,21 @@ func (s *memLogStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeli filtered = filtered[:q.Limit] } - result := make([]deliveryEventWithTimeID, len(filtered)) - for i, de := range filtered { - result[i] = deliveryEventWithTimeID{ - de: copyDeliveryEvent(de.de), - timeID: de.timeID, + result := make([]deliveryRecordWithTimeID, len(filtered)) + for i, r := range filtered { + result[i] = deliveryRecordWithTimeID{ + record: &driver.DeliveryRecord{ + Delivery: copyDelivery(r.record.Delivery), + Event: copyEvent(r.record.Event), + }, + timeID: r.timeID, } } return result, nil }, - Cursor: pagination.Cursor[deliveryEventWithTimeID]{ - Encode: func(de deliveryEventWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, de.timeID) + Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ + Encode: func(r deliveryRecordWithTimeID) string { + return cursor.Encode(cursorResourceDelivery, cursorVersion, r.timeID) }, Decode: func(c string) (string, error) { return cursor.Decode(c, cursorResourceDelivery, cursorVersion) @@ -309,16 +312,16 @@ func (s *memLogStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeli }, }) if err != nil { - return driver.ListDeliveryEventResponse{}, err + return driver.ListDeliveryResponse{}, err } - // Extract delivery events from results - data := make([]*models.DeliveryEvent, len(res.Items)) + // Extract records from results + data := make([]*driver.DeliveryRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.de + data[i] = item.record } - return driver.ListDeliveryEventResponse{ + return driver.ListDeliveryResponse{ Data: data, Next: res.Next, Prev: res.Prev, @@ -329,48 +332,56 @@ func (s *memLogStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEven s.mu.RLock() defer s.mu.RUnlock() - for _, de := range s.deliveryEvents { - if de.Event.ID == req.EventID { - if req.TenantID != "" && de.Event.TenantID != req.TenantID { - continue - } - if req.DestinationID != "" && de.Event.DestinationID != req.DestinationID { - continue - } - return copyEvent(&de.Event), nil - } + event := s.events[req.EventID] + if event == nil { + return nil, nil } - return nil, nil + + if req.TenantID != "" && event.TenantID != req.TenantID { + return nil, nil + } + if req.DestinationID != "" && event.DestinationID != req.DestinationID { + return nil, nil + } + return copyEvent(event), nil } -func (s *memLogStore) RetrieveDeliveryEvent(ctx context.Context, req driver.RetrieveDeliveryEventRequest) (*models.DeliveryEvent, error) { +func (s *memLogStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { s.mu.RLock() defer s.mu.RUnlock() - for _, de := range s.deliveryEvents { - if de.Delivery != nil && de.Delivery.ID == req.DeliveryID { - if req.TenantID != "" && de.Event.TenantID != req.TenantID { + for _, d := range s.deliveries { + if d.ID == req.DeliveryID { + event := s.events[d.EventID] + if event == nil { continue } - return copyDeliveryEvent(de), nil + if req.TenantID != "" && event.TenantID != req.TenantID { + continue + } + return &driver.DeliveryRecord{ + Delivery: copyDelivery(d), + Event: copyEvent(event), + }, nil } } return nil, nil } -func (s *memLogStore) matchesFilter(de *models.DeliveryEvent, req driver.ListDeliveryEventRequest) bool { - if req.TenantID != "" && de.Event.TenantID != req.TenantID { +func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Event, req driver.ListDeliveryRequest) bool { + // Filter by event's tenant ID since deliveries don't have tenant_id in the database + if req.TenantID != "" && event.TenantID != req.TenantID { return false } - if req.EventID != "" && de.Event.ID != req.EventID { + if req.EventID != "" && d.EventID != req.EventID { return false } if len(req.DestinationIDs) > 0 { found := false for _, destID := range req.DestinationIDs { - if de.DestinationID == destID { + if d.DestinationID == destID { found = true break } @@ -380,14 +391,14 @@ func (s *memLogStore) matchesFilter(de *models.DeliveryEvent, req driver.ListDel } } - if req.Status != "" && de.Delivery.Status != req.Status { + if req.Status != "" && d.Status != req.Status { return false } if len(req.Topics) > 0 { found := false for _, topic := range req.Topics { - if de.Event.Topic == topic { + if event.Topic == topic { found = true break } @@ -397,33 +408,22 @@ func (s *memLogStore) matchesFilter(de *models.DeliveryEvent, req driver.ListDel } } - if req.TimeFilter.GTE != nil && de.Delivery.Time.Before(*req.TimeFilter.GTE) { + if req.TimeFilter.GTE != nil && d.Time.Before(*req.TimeFilter.GTE) { return false } - if req.TimeFilter.LTE != nil && de.Delivery.Time.After(*req.TimeFilter.LTE) { + if req.TimeFilter.LTE != nil && d.Time.After(*req.TimeFilter.LTE) { return false } - if req.TimeFilter.GT != nil && !de.Delivery.Time.After(*req.TimeFilter.GT) { + if req.TimeFilter.GT != nil && !d.Time.After(*req.TimeFilter.GT) { return false } - if req.TimeFilter.LT != nil && !de.Delivery.Time.Before(*req.TimeFilter.LT) { + if req.TimeFilter.LT != nil && !d.Time.Before(*req.TimeFilter.LT) { return false } return true } -func copyDeliveryEvent(de *models.DeliveryEvent) *models.DeliveryEvent { - return &models.DeliveryEvent{ - ID: de.ID, - Attempt: de.Attempt, - DestinationID: de.DestinationID, - Event: *copyEvent(&de.Event), - Delivery: copyDelivery(de.Delivery), - Manual: de.Manual, - } -} - func copyEvent(e *models.Event) *models.Event { copied := &models.Event{ ID: e.ID, @@ -441,7 +441,7 @@ func copyEvent(e *models.Event) *models.Event { } } if e.Data != nil { - copied.Data = make(map[string]interface{}, len(e.Data)) + copied.Data = make(map[string]any, len(e.Data)) for k, v := range e.Data { copied.Data[k] = v } @@ -456,15 +456,18 @@ func copyDelivery(d *models.Delivery) *models.Delivery { } copied := &models.Delivery{ ID: d.ID, + TenantID: d.TenantID, EventID: d.EventID, DestinationID: d.DestinationID, + Attempt: d.Attempt, + Manual: d.Manual, Status: d.Status, Time: d.Time, Code: d.Code, } if d.ResponseData != nil { - copied.ResponseData = make(map[string]interface{}, len(d.ResponseData)) + copied.ResponseData = make(map[string]any, len(d.ResponseData)) for k, v := range d.ResponseData { copied.ResponseData[k] = v } diff --git a/internal/logstore/pglogstore/pglogstore.go b/internal/logstore/pglogstore/pglogstore.go index 90f7319f..e97fc517 100644 --- a/internal/logstore/pglogstore/pglogstore.go +++ b/internal/logstore/pglogstore/pglogstore.go @@ -140,7 +140,7 @@ func scanEvents(rows pgx.Rows) ([]eventWithTimeID, error) { eventTime time.Time topic string eligibleForRetry bool - data map[string]interface{} + data map[string]any metadata map[string]string timeID string ) @@ -181,13 +181,13 @@ func scanEvents(rows pgx.Rows) ([]eventWithTimeID, error) { return results, nil } -// deliveryEventWithTimeID wraps a delivery event with its time_delivery_id for cursor encoding. -type deliveryEventWithTimeID struct { - *models.DeliveryEvent +// deliveryRecordWithTimeID wraps a delivery record with its time_delivery_id for cursor encoding. +type deliveryRecordWithTimeID struct { + *driver.DeliveryRecord TimeDeliveryID string } -func (s *logStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeliveryEventRequest) (driver.ListDeliveryEventResponse, error) { +func (s *logStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -198,23 +198,23 @@ func (s *logStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeliver limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryEventWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryEventWithTimeID, error) { + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { query, args := buildDeliveryQuery(req, q) rows, err := s.db.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryEvents(rows) + return scanDeliveryRecords(rows) }, - Cursor: pagination.Cursor[deliveryEventWithTimeID]{ - Encode: func(de deliveryEventWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, de.TimeDeliveryID) + Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ + Encode: func(dr deliveryRecordWithTimeID) string { + return cursor.Encode(cursorResourceDelivery, cursorVersion, dr.TimeDeliveryID) }, Decode: func(c string) (string, error) { return cursor.Decode(c, cursorResourceDelivery, cursorVersion) @@ -222,23 +222,23 @@ func (s *logStore) ListDeliveryEvent(ctx context.Context, req driver.ListDeliver }, }) if err != nil { - return driver.ListDeliveryEventResponse{}, err + return driver.ListDeliveryResponse{}, err } - // Extract delivery events from results - data := make([]*models.DeliveryEvent, len(res.Items)) + // Extract delivery records from results + data := make([]*driver.DeliveryRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryEvent + data[i] = item.DeliveryRecord } - return driver.ListDeliveryEventResponse{ + return driver.ListDeliveryResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(req driver.ListDeliveryEventRequest, q pagination.QueryInput) (string, []any) { +func buildDeliveryQuery(req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { cursorCondition := fmt.Sprintf("AND ($10::text = '' OR idx.time_delivery_id %s $10::text)", q.Compare) orderByClause := fmt.Sprintf("idx.delivery_time %s, idx.delivery_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) @@ -294,8 +294,8 @@ func buildDeliveryQuery(req driver.ListDeliveryEventRequest, q pagination.QueryI return query, args } -func scanDeliveryEvents(rows pgx.Rows) ([]deliveryEventWithTimeID, error) { - var results []deliveryEventWithTimeID +func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { + var results []deliveryRecordWithTimeID for rows.Next() { var ( eventID string @@ -308,10 +308,10 @@ func scanDeliveryEvents(rows pgx.Rows) ([]deliveryEventWithTimeID, error) { timeDeliveryID string tenantID string eligibleForRetry bool - data map[string]interface{} + data map[string]any metadata map[string]string code string - responseData map[string]interface{} + responseData map[string]any manual bool attempt int ) @@ -337,31 +337,30 @@ func scanDeliveryEvents(rows pgx.Rows) ([]deliveryEventWithTimeID, error) { return nil, fmt.Errorf("scan failed: %w", err) } - results = append(results, deliveryEventWithTimeID{ - DeliveryEvent: &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Manual: manual, - Attempt: attempt, - Event: models.Event{ - ID: eventID, - TenantID: tenantID, - DestinationID: destinationID, - Topic: topic, - EligibleForRetry: eligibleForRetry, - Time: eventTime, - Data: data, - Metadata: metadata, - }, + results = append(results, deliveryRecordWithTimeID{ + DeliveryRecord: &driver.DeliveryRecord{ Delivery: &models.Delivery{ ID: deliveryID, + TenantID: tenantID, EventID: eventID, DestinationID: destinationID, + Attempt: attempt, + Manual: manual, Status: status, Time: deliveryTime, Code: code, ResponseData: responseData, }, + Event: &models.Event{ + ID: eventID, + TenantID: tenantID, + DestinationID: destinationID, + Topic: topic, + EligibleForRetry: eligibleForRetry, + Time: eventTime, + Data: data, + Metadata: metadata, + }, }, TimeDeliveryID: timeDeliveryID, }) @@ -376,7 +375,7 @@ func scanDeliveryEvents(rows pgx.Rows) ([]deliveryEventWithTimeID, error) { func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRequest) (*models.Event, error) { var query string - var args []interface{} + var args []any if req.DestinationID != "" { query = ` @@ -395,7 +394,7 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe SELECT 1 FROM event_delivery_index idx WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.event_id = $2 AND idx.destination_id = $3 )` - args = []interface{}{req.TenantID, req.EventID, req.DestinationID} + args = []any{req.TenantID, req.EventID, req.DestinationID} } else { query = ` SELECT @@ -409,7 +408,7 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe e.metadata FROM events e WHERE ($1::text = '' OR e.tenant_id = $1) AND e.id = $2` - args = []interface{}{req.TenantID, req.EventID} + args = []any{req.TenantID, req.EventID} } row := s.db.QueryRow(ctx, query, args...) @@ -435,7 +434,7 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe return event, nil } -func (s *logStore) RetrieveDeliveryEvent(ctx context.Context, req driver.RetrieveDeliveryEventRequest) (*models.DeliveryEvent, error) { +func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { query := ` SELECT idx.event_id, @@ -471,10 +470,10 @@ func (s *logStore) RetrieveDeliveryEvent(ctx context.Context, req driver.Retriev status string tenantID string eligibleForRetry bool - data map[string]interface{} + data map[string]any metadata map[string]string code string - responseData map[string]interface{} + responseData map[string]any manual bool attempt int ) @@ -503,35 +502,34 @@ func (s *logStore) RetrieveDeliveryEvent(ctx context.Context, req driver.Retriev return nil, fmt.Errorf("scan failed: %w", err) } - return &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Manual: manual, - Attempt: attempt, - Event: models.Event{ - ID: eventID, - TenantID: tenantID, - DestinationID: destinationID, - Topic: topic, - EligibleForRetry: eligibleForRetry, - Time: eventTime, - Data: data, - Metadata: metadata, - }, + return &driver.DeliveryRecord{ Delivery: &models.Delivery{ ID: deliveryID, + TenantID: tenantID, EventID: eventID, DestinationID: destinationID, + Attempt: attempt, + Manual: manual, Status: status, Time: deliveryTime, Code: code, ResponseData: responseData, }, + Event: &models.Event{ + ID: eventID, + TenantID: tenantID, + DestinationID: destinationID, + Topic: topic, + EligibleForRetry: eligibleForRetry, + Time: eventTime, + Data: data, + Metadata: metadata, + }, }, nil } -func (s *logStore) InsertManyDeliveryEvent(ctx context.Context, deliveryEvents []*models.DeliveryEvent) error { - if len(deliveryEvents) == 0 { +func (s *logStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { + if len(events) == 0 && len(deliveries) == 0 { return nil } @@ -541,122 +539,68 @@ func (s *logStore) InsertManyDeliveryEvent(ctx context.Context, deliveryEvents [ } defer tx.Rollback(ctx) - events := make([]*models.Event, len(deliveryEvents)) - for i, de := range deliveryEvents { - events[i] = &de.Event - } - _, err = tx.Exec(ctx, ` - INSERT INTO events (id, tenant_id, destination_id, time, topic, eligible_for_retry, data, metadata) - SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::timestamptz[], $5::text[], $6::boolean[], $7::jsonb[], $8::jsonb[]) - ON CONFLICT (time, id) DO NOTHING - `, eventArrays(events)...) - if err != nil { - return err - } - - deliveries := make([]*models.Delivery, len(deliveryEvents)) - for i, de := range deliveryEvents { - if de.Delivery == nil { - // Create a pending delivery if none exists - deliveries[i] = &models.Delivery{ - ID: de.ID, - EventID: de.Event.ID, - DestinationID: de.DestinationID, - Status: "pending", - Time: time.Now(), - } - } else { - deliveries[i] = de.Delivery + if len(events) > 0 { + _, err = tx.Exec(ctx, ` + INSERT INTO events (id, tenant_id, destination_id, time, topic, eligible_for_retry, data, metadata) + SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::timestamptz[], $5::text[], $6::boolean[], $7::jsonb[], $8::jsonb[]) + ON CONFLICT (time, id) DO NOTHING + `, eventArrays(events)...) + if err != nil { + return err } } - _, err = tx.Exec(ctx, ` - INSERT INTO deliveries (id, event_id, destination_id, status, time, code, response_data, manual, attempt) - SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) - ON CONFLICT (time, id) DO UPDATE SET - status = EXCLUDED.status, - code = EXCLUDED.code, - response_data = EXCLUDED.response_data - `, deliveryArrays(deliveries, deliveryEvents)...) - if err != nil { - return err - } - - _, err = tx.Exec(ctx, ` - INSERT INTO event_delivery_index ( - event_id, delivery_id, tenant_id, destination_id, - event_time, delivery_time, topic, status, manual, attempt - ) - SELECT * FROM unnest( - $1::text[], $2::text[], $3::text[], $4::text[], - $5::timestamptz[], $6::timestamptz[], $7::text[], $8::text[], - $9::boolean[], $10::integer[] - ) - ON CONFLICT (delivery_time, event_id, delivery_id) DO UPDATE SET - status = EXCLUDED.status - `, eventDeliveryIndexArrays(deliveryEvents)...) - if err != nil { - return err - } - return tx.Commit(ctx) -} - -func eventDeliveryIndexArrays(deliveryEvents []*models.DeliveryEvent) []interface{} { - eventIDs := make([]string, len(deliveryEvents)) - deliveryIDs := make([]string, len(deliveryEvents)) - tenantIDs := make([]string, len(deliveryEvents)) - destinationIDs := make([]string, len(deliveryEvents)) - eventTimes := make([]time.Time, len(deliveryEvents)) - deliveryTimes := make([]time.Time, len(deliveryEvents)) - topics := make([]string, len(deliveryEvents)) - statuses := make([]string, len(deliveryEvents)) - manuals := make([]bool, len(deliveryEvents)) - attempts := make([]int, len(deliveryEvents)) - - for i, de := range deliveryEvents { - eventIDs[i] = de.Event.ID - if de.Delivery != nil { - deliveryIDs[i] = de.Delivery.ID - } else { - deliveryIDs[i] = de.ID + if len(deliveries) > 0 { + _, err = tx.Exec(ctx, ` + INSERT INTO deliveries (id, event_id, destination_id, status, time, code, response_data, manual, attempt) + SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) + ON CONFLICT (time, id) DO UPDATE SET + status = EXCLUDED.status, + code = EXCLUDED.code, + response_data = EXCLUDED.response_data + `, deliveryArrays(deliveries)...) + if err != nil { + return err } - tenantIDs[i] = de.Event.TenantID - destinationIDs[i] = de.DestinationID - eventTimes[i] = de.Event.Time - if de.Delivery != nil { - deliveryTimes[i] = de.Delivery.Time - statuses[i] = de.Delivery.Status - } else { - deliveryTimes[i] = time.Now() - statuses[i] = "pending" + + _, err = tx.Exec(ctx, ` + INSERT INTO event_delivery_index ( + event_id, delivery_id, tenant_id, destination_id, + event_time, delivery_time, topic, status, manual, attempt + ) + SELECT + d.event_id, + d.id, + e.tenant_id, + d.destination_id, + e.time, + d.time, + e.topic, + d.status, + d.manual, + d.attempt + FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) + AS d(id, event_id, destination_id, status, time, code, response_data, manual, attempt) + JOIN events e ON e.id = d.event_id + ON CONFLICT (delivery_time, event_id, delivery_id) DO UPDATE SET + status = EXCLUDED.status + `, deliveryArrays(deliveries)...) + if err != nil { + return err } - topics[i] = de.Event.Topic - manuals[i] = de.Manual - attempts[i] = de.Attempt } - return []interface{}{ - eventIDs, - deliveryIDs, - tenantIDs, - destinationIDs, - eventTimes, - deliveryTimes, - topics, - statuses, - manuals, - attempts, - } + return tx.Commit(ctx) } -func eventArrays(events []*models.Event) []interface{} { +func eventArrays(events []*models.Event) []any { ids := make([]string, len(events)) tenantIDs := make([]string, len(events)) destinationIDs := make([]string, len(events)) times := make([]time.Time, len(events)) topics := make([]string, len(events)) eligibleForRetries := make([]bool, len(events)) - datas := make([]map[string]interface{}, len(events)) + datas := make([]map[string]any, len(events)) metadatas := make([]map[string]string, len(events)) for i, e := range events { @@ -670,7 +614,7 @@ func eventArrays(events []*models.Event) []interface{} { metadatas[i] = e.Metadata } - return []interface{}{ + return []any{ ids, tenantIDs, destinationIDs, @@ -682,14 +626,14 @@ func eventArrays(events []*models.Event) []interface{} { } } -func deliveryArrays(deliveries []*models.Delivery, deliveryEvents []*models.DeliveryEvent) []interface{} { +func deliveryArrays(deliveries []*models.Delivery) []any { ids := make([]string, len(deliveries)) eventIDs := make([]string, len(deliveries)) destinationIDs := make([]string, len(deliveries)) statuses := make([]string, len(deliveries)) times := make([]time.Time, len(deliveries)) codes := make([]string, len(deliveries)) - responseDatas := make([]map[string]interface{}, len(deliveries)) + responseDatas := make([]map[string]any, len(deliveries)) manuals := make([]bool, len(deliveries)) attempts := make([]int, len(deliveries)) @@ -701,11 +645,11 @@ func deliveryArrays(deliveries []*models.Delivery, deliveryEvents []*models.Deli times[i] = d.Time codes[i] = d.Code responseDatas[i] = d.ResponseData - manuals[i] = deliveryEvents[i].Manual - attempts[i] = deliveryEvents[i].Attempt + manuals[i] = d.Manual + attempts[i] = d.Attempt } - return []interface{}{ + return []any{ ids, eventIDs, destinationIDs, diff --git a/internal/models/event.go b/internal/models/event.go index 73c0ea85..f6a554d4 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -130,12 +130,14 @@ const ( ) type Delivery struct { - ID string `json:"id"` - DeliveryEventID string `json:"delivery_event_id"` - EventID string `json:"event_id"` - DestinationID string `json:"destination_id"` - Status string `json:"status"` - Time time.Time `json:"time"` - Code string `json:"code"` - ResponseData map[string]interface{} `json:"response_data"` + ID string `json:"id"` + TenantID string `json:"tenant_id"` + EventID string `json:"event_id"` + DestinationID string `json:"destination_id"` + Attempt int `json:"attempt"` + Manual bool `json:"manual"` + Status string `json:"status"` + Time time.Time `json:"time"` + Code string `json:"code"` + ResponseData map[string]interface{} `json:"response_data"` } diff --git a/internal/services/builder.go b/internal/services/builder.go index 8b3809c2..c415f4e3 100644 --- a/internal/services/builder.go +++ b/internal/services/builder.go @@ -448,7 +448,8 @@ func (b *ServiceBuilder) makeBatcher(logStore logstore.LogStore, itemCountThresh } } - deliveryEvents := make([]*models.DeliveryEvent, 0, len(msgs)) + events := make([]*models.Event, 0, len(msgs)) + deliveries := make([]*models.Delivery, 0, len(msgs)) for _, msg := range msgs { deliveryEvent := models.DeliveryEvent{} if err := deliveryEvent.FromMessage(msg); err != nil { @@ -458,13 +459,17 @@ func (b *ServiceBuilder) makeBatcher(logStore logstore.LogStore, itemCountThresh nackAll() return } - deliveryEvents = append(deliveryEvents, &deliveryEvent) + events = append(events, &deliveryEvent.Event) + if deliveryEvent.Delivery != nil { + deliveries = append(deliveries, deliveryEvent.Delivery) + } } - if err := logStore.InsertManyDeliveryEvent(b.ctx, deliveryEvents); err != nil { - logger.Error("failed to insert delivery events", + if err := logStore.InsertMany(b.ctx, events, deliveries); err != nil { + logger.Error("failed to insert events/deliveries", zap.Error(err), - zap.Int("count", len(deliveryEvents))) + zap.Int("event_count", len(events)), + zap.Int("delivery_count", len(deliveries))) nackAll() return } diff --git a/internal/util/testutil/event.go b/internal/util/testutil/event.go index 4c69cb56..b882f903 100644 --- a/internal/util/testutil/event.go +++ b/internal/util/testutil/event.go @@ -105,12 +105,14 @@ type mockDeliveryFactory struct { func (f *mockDeliveryFactory) Any(opts ...func(*models.Delivery)) models.Delivery { delivery := models.Delivery{ - ID: idgen.Delivery(), - DeliveryEventID: idgen.DeliveryEvent(), - EventID: idgen.Event(), - DestinationID: idgen.Destination(), - Status: "success", - Time: time.Now(), + ID: idgen.Delivery(), + TenantID: "test-tenant", + EventID: idgen.Event(), + DestinationID: idgen.Destination(), + Attempt: 1, + Manual: false, + Status: "success", + Time: time.Now(), } for _, opt := range opts { @@ -131,9 +133,21 @@ func (f *mockDeliveryFactory) WithID(id string) func(*models.Delivery) { } } -func (f *mockDeliveryFactory) WithDeliveryEventID(deliveryEventID string) func(*models.Delivery) { +func (f *mockDeliveryFactory) WithTenantID(tenantID string) func(*models.Delivery) { return func(delivery *models.Delivery) { - delivery.DeliveryEventID = deliveryEventID + delivery.TenantID = tenantID + } +} + +func (f *mockDeliveryFactory) WithAttempt(attempt int) func(*models.Delivery) { + return func(delivery *models.Delivery) { + delivery.Attempt = attempt + } +} + +func (f *mockDeliveryFactory) WithManual(manual bool) func(*models.Delivery) { + return func(delivery *models.Delivery) { + delivery.Manual = manual } } From 11e73de2f576f8ebdd194a349c8a8e3c24726d93 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 04:06:21 +0700 Subject: [PATCH 02/23] refactor: introduce LogEntry message type for logmq --- internal/deliverymq/messagehandler.go | 8 ++++-- internal/deliverymq/messagehandler_test.go | 32 +++++++++++----------- internal/deliverymq/mock_test.go | 12 ++++---- internal/logmq/logmq.go | 4 +-- internal/models/event.go | 24 ++++++++++++++++ internal/services/builder.go | 20 ++++++++++---- 6 files changed, 68 insertions(+), 32 deletions(-) diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index f9625bd9..1bd6f4d1 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -84,7 +84,7 @@ type Publisher interface { } type LogPublisher interface { - Publish(ctx context.Context, deliveryEvent models.DeliveryEvent) error + Publish(ctx context.Context, entry models.LogEntry) error } type RetryScheduler interface { @@ -262,7 +262,11 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, deliveryEvent *m zap.Bool("manual", deliveryEvent.Manual)) // Publish delivery log - if logErr := h.logMQ.Publish(ctx, *deliveryEvent); logErr != nil { + logEntry := models.LogEntry{ + Event: &deliveryEvent.Event, + Delivery: delivery, + } + if logErr := h.logMQ.Publish(ctx, logEntry); logErr != nil { logger.Error("failed to publish delivery log", zap.Error(logErr), zap.String("delivery_event_id", deliveryEvent.ID), diff --git a/internal/deliverymq/messagehandler_test.go b/internal/deliverymq/messagehandler_test.go index 9f64c86e..857be951 100644 --- a/internal/deliverymq/messagehandler_test.go +++ b/internal/deliverymq/messagehandler_test.go @@ -139,7 +139,7 @@ func TestMessageHandler_DestinationNotFound(t *testing.T) { assert.True(t, mockMsg.nacked, "message should be nacked when destination not found") assert.False(t, mockMsg.acked, "message should not be acked when destination not found") assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") - assert.Empty(t, logPublisher.deliveries, "should not log delivery for pre-delivery error") + assert.Empty(t, logPublisher.entries, "should not log delivery for pre-delivery error") alertMonitor.AssertNotCalled(t, "HandleAttempt", mock.Anything, mock.Anything) } @@ -199,7 +199,7 @@ func TestMessageHandler_DestinationDeleted(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked when destination is deleted") assert.True(t, mockMsg.acked, "message should be acked when destination is deleted") assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") - assert.Empty(t, logPublisher.deliveries, "should not log delivery for pre-delivery error") + assert.Empty(t, logPublisher.entries, "should not log delivery for pre-delivery error") alertMonitor.AssertNotCalled(t, "HandleAttempt", mock.Anything, mock.Anything) } @@ -271,8 +271,8 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { assert.Len(t, retryScheduler.schedules, 1, "retry should be scheduled") assert.Equal(t, deliveryEvent.GetRetryID(), retryScheduler.taskIDs[0], "should use GetRetryID for task ID") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.deliveries[0].Delivery.Status, "delivery status should be Failed") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -343,8 +343,8 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { assert.True(t, mockMsg.acked, "message should be acked for ineligible retry") assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "should only attempt once") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.deliveries[0].Delivery.Status, "delivery status should be Failed") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -477,8 +477,8 @@ func TestMessageHandler_RetryFlow(t *testing.T) { assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "publish should succeed once") assert.Equal(t, event.ID, eventGetter.lastRetrievedID, "event getter should be called with correct ID") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.deliveries[0].Delivery.Status, "delivery status should be OK") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") } func TestMessageHandler_Idempotency(t *testing.T) { @@ -683,7 +683,7 @@ func TestMessageHandler_DestinationDisabled(t *testing.T) { assert.Equal(t, 0, publisher.current, "should not attempt to publish to disabled destination") assert.Empty(t, retryScheduler.schedules, "should not schedule retry") assert.Empty(t, retryScheduler.canceled, "should not attempt to cancel retries") - assert.Empty(t, logPublisher.deliveries, "should not log delivery for pre-delivery error") + assert.Empty(t, logPublisher.entries, "should not log delivery for pre-delivery error") alertMonitor.AssertNotCalled(t, "HandleAttempt", mock.Anything, mock.Anything) } @@ -957,8 +957,8 @@ func TestManualDelivery_PublishError(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked") assert.Equal(t, 1, publisher.current, "should attempt publish once") assert.Empty(t, retryScheduler.schedules, "should not schedule retry for manual delivery") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.deliveries[0].Delivery.Status, "delivery status should be Failed") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -1024,8 +1024,8 @@ func TestManualDelivery_CancelError(t *testing.T) { assert.Equal(t, 1, publisher.current, "should publish once") assert.Len(t, retryScheduler.canceled, 1, "should attempt to cancel retry") assert.Equal(t, deliveryEvent.GetRetryID(), retryScheduler.canceled[0], "should cancel with correct retry ID") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.deliveries[0].Delivery.Status, "delivery status should be OK despite cancel error") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK despite cancel error") assertAlertMonitor(t, alertMonitor, true, &destination, nil) } @@ -1090,7 +1090,7 @@ func TestManualDelivery_DestinationDisabled(t *testing.T) { assert.Equal(t, 0, publisher.current, "should not attempt to publish to disabled destination") assert.Empty(t, retryScheduler.schedules, "should not schedule retry") assert.Empty(t, retryScheduler.canceled, "should not attempt to cancel retries") - assert.Empty(t, logPublisher.deliveries, "should not log delivery for pre-delivery error") + assert.Empty(t, logPublisher.entries, "should not log delivery for pre-delivery error") alertMonitor.AssertNotCalled(t, "HandleAttempt", mock.Anything, mock.Anything) } @@ -1219,8 +1219,8 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { assert.True(t, mockMsg.acked, "message should be acked despite alert monitor error") assert.False(t, mockMsg.nacked, "message should not be nacked despite alert monitor error") assert.Equal(t, 1, publisher.current, "should publish once") - require.Len(t, logPublisher.deliveries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.deliveries[0].Delivery.Status, "delivery status should be OK") + require.Len(t, logPublisher.entries, 1, "should have one delivery") + assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") // Verify alert monitor was called but error was ignored // Wait for the HandleAttempt call to be made diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index ce4be80d..624d66e2 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -145,19 +145,19 @@ func (m *mockEventGetter) RetrieveEvent(ctx context.Context, req logstore.Retrie } type mockLogPublisher struct { - err error - deliveries []models.DeliveryEvent + err error + entries []models.LogEntry } func newMockLogPublisher(err error) *mockLogPublisher { return &mockLogPublisher{ - err: err, - deliveries: make([]models.DeliveryEvent, 0), + err: err, + entries: make([]models.LogEntry, 0), } } -func (m *mockLogPublisher) Publish(ctx context.Context, deliveryEvent models.DeliveryEvent) error { - m.deliveries = append(m.deliveries, deliveryEvent) +func (m *mockLogPublisher) Publish(ctx context.Context, entry models.LogEntry) error { + m.entries = append(m.entries, entry) return m.err } diff --git a/internal/logmq/logmq.go b/internal/logmq/logmq.go index 395824b2..540873a5 100644 --- a/internal/logmq/logmq.go +++ b/internal/logmq/logmq.go @@ -44,8 +44,8 @@ func (q *LogMQ) Init(ctx context.Context) (func(), error) { return q.queue.Init(ctx) } -func (q *LogMQ) Publish(ctx context.Context, event models.DeliveryEvent) error { - return q.queue.Publish(ctx, &event) +func (q *LogMQ) Publish(ctx context.Context, entry models.LogEntry) error { + return q.queue.Publish(ctx, &entry) } func (q *LogMQ) Subscribe(ctx context.Context) (mqs.Subscription, error) { diff --git a/internal/models/event.go b/internal/models/event.go index f6a554d4..b9f913b1 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -129,6 +129,30 @@ const ( DeliveryStatusFailed = "failed" ) +// LogEntry represents a message for the log queue. +// +// IMPORTANT: Both Event and Delivery are REQUIRED. The logstore requires both +// to exist for proper data consistency. The logmq consumer validates this +// requirement and rejects entries missing either field. +type LogEntry struct { + Event *Event `json:"event"` + Delivery *Delivery `json:"delivery"` +} + +var _ mqs.IncomingMessage = &LogEntry{} + +func (e *LogEntry) FromMessage(msg *mqs.Message) error { + return json.Unmarshal(msg.Body, e) +} + +func (e *LogEntry) ToMessage() (*mqs.Message, error) { + data, err := json.Marshal(e) + if err != nil { + return nil, err + } + return &mqs.Message{Body: data}, nil +} + type Delivery struct { ID string `json:"id"` TenantID string `json:"tenant_id"` diff --git a/internal/services/builder.go b/internal/services/builder.go index c415f4e3..beb717a3 100644 --- a/internal/services/builder.go +++ b/internal/services/builder.go @@ -451,18 +451,26 @@ func (b *ServiceBuilder) makeBatcher(logStore logstore.LogStore, itemCountThresh events := make([]*models.Event, 0, len(msgs)) deliveries := make([]*models.Delivery, 0, len(msgs)) for _, msg := range msgs { - deliveryEvent := models.DeliveryEvent{} - if err := deliveryEvent.FromMessage(msg); err != nil { - logger.Error("failed to parse delivery event", + entry := models.LogEntry{} + if err := entry.FromMessage(msg); err != nil { + logger.Error("failed to parse log entry", zap.Error(err), zap.String("message_id", msg.LoggableID)) nackAll() return } - events = append(events, &deliveryEvent.Event) - if deliveryEvent.Delivery != nil { - deliveries = append(deliveries, deliveryEvent.Delivery) + // Validate that both Event and Delivery are present. + // The logstore requires both for data consistency. + if entry.Event == nil || entry.Delivery == nil { + logger.Error("invalid log entry: both event and delivery are required", + zap.Bool("has_event", entry.Event != nil), + zap.Bool("has_delivery", entry.Delivery != nil), + zap.String("message_id", msg.LoggableID)) + msg.Nack() + continue } + events = append(events, entry.Event) + deliveries = append(deliveries, entry.Delivery) } if err := logStore.InsertMany(b.ctx, events, deliveries); err != nil { From 5165c205fc61bb1c10f7b0a886199241be13ee26 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 04:15:42 +0700 Subject: [PATCH 03/23] refactor: move batch processor from builder to logmq package --- internal/logmq/batchprocessor.go | 128 +++++++++++++ internal/logmq/batchprocessor_test.go | 252 ++++++++++++++++++++++++++ internal/logmq/messagehandler.go | 15 +- internal/services/builder.go | 86 +-------- 4 files changed, 394 insertions(+), 87 deletions(-) create mode 100644 internal/logmq/batchprocessor.go create mode 100644 internal/logmq/batchprocessor_test.go diff --git a/internal/logmq/batchprocessor.go b/internal/logmq/batchprocessor.go new file mode 100644 index 00000000..9463cea9 --- /dev/null +++ b/internal/logmq/batchprocessor.go @@ -0,0 +1,128 @@ +package logmq + +import ( + "context" + "errors" + "time" + + "github.com/hookdeck/outpost/internal/logging" + "github.com/hookdeck/outpost/internal/models" + "github.com/hookdeck/outpost/internal/mqs" + "github.com/mikestefanello/batcher" + "go.uber.org/zap" +) + +// ErrInvalidLogEntry is returned when a LogEntry is missing required fields. +var ErrInvalidLogEntry = errors.New("invalid log entry: both event and delivery are required") + +// LogStore defines the interface for persisting log entries. +// This is a consumer-defined interface containing only what logmq needs. +type LogStore interface { + InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error +} + +// BatchProcessorConfig configures the batch processor. +type BatchProcessorConfig struct { + ItemCountThreshold int + DelayThreshold time.Duration +} + +// BatchProcessor batches log entries and writes them to the log store. +type BatchProcessor struct { + ctx context.Context + logger *logging.Logger + logStore LogStore + batcher *batcher.Batcher[*mqs.Message] +} + +// NewBatchProcessor creates a new batch processor for log entries. +func NewBatchProcessor(ctx context.Context, logger *logging.Logger, logStore LogStore, cfg BatchProcessorConfig) (*BatchProcessor, error) { + bp := &BatchProcessor{ + ctx: ctx, + logger: logger, + logStore: logStore, + } + + b, err := batcher.NewBatcher(batcher.Config[*mqs.Message]{ + GroupCountThreshold: 2, + ItemCountThreshold: cfg.ItemCountThreshold, + DelayThreshold: cfg.DelayThreshold, + NumGoroutines: 1, + Processor: bp.processBatch, + }) + if err != nil { + return nil, err + } + + bp.batcher = b + return bp, nil +} + +// Add adds a message to the batch. +func (bp *BatchProcessor) Add(ctx context.Context, msg *mqs.Message) error { + bp.batcher.Add("", msg) + return nil +} + +// Shutdown gracefully shuts down the batch processor. +func (bp *BatchProcessor) Shutdown() { + bp.batcher.Shutdown() +} + +// processBatch processes a batch of messages. +func (bp *BatchProcessor) processBatch(_ string, msgs []*mqs.Message) { + logger := bp.logger.Ctx(bp.ctx) + logger.Info("processing batch", zap.Int("message_count", len(msgs))) + + events := make([]*models.Event, 0, len(msgs)) + deliveries := make([]*models.Delivery, 0, len(msgs)) + validMsgs := make([]*mqs.Message, 0, len(msgs)) + + for _, msg := range msgs { + entry := models.LogEntry{} + if err := entry.FromMessage(msg); err != nil { + logger.Error("failed to parse log entry", + zap.Error(err), + zap.String("message_id", msg.LoggableID)) + msg.Nack() + continue + } + + // Validate that both Event and Delivery are present. + // The logstore requires both for data consistency. + if entry.Event == nil || entry.Delivery == nil { + logger.Error("invalid log entry: both event and delivery are required", + zap.Bool("has_event", entry.Event != nil), + zap.Bool("has_delivery", entry.Delivery != nil), + zap.String("message_id", msg.LoggableID)) + msg.Nack() + continue + } + + events = append(events, entry.Event) + deliveries = append(deliveries, entry.Delivery) + validMsgs = append(validMsgs, msg) + } + + // Nothing valid to insert + if len(events) == 0 { + return + } + + if err := bp.logStore.InsertMany(bp.ctx, events, deliveries); err != nil { + logger.Error("failed to insert events/deliveries", + zap.Error(err), + zap.Int("event_count", len(events)), + zap.Int("delivery_count", len(deliveries))) + for _, msg := range validMsgs { + msg.Nack() + } + return + } + + logger.Info("batch processed successfully", zap.Int("count", len(validMsgs))) + + for _, msg := range validMsgs { + msg.Ack() + } +} diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go new file mode 100644 index 00000000..59df42de --- /dev/null +++ b/internal/logmq/batchprocessor_test.go @@ -0,0 +1,252 @@ +package logmq_test + +import ( + "context" + "encoding/json" + "sync" + "testing" + "time" + + "github.com/hookdeck/outpost/internal/logmq" + "github.com/hookdeck/outpost/internal/models" + "github.com/hookdeck/outpost/internal/mqs" + "github.com/hookdeck/outpost/internal/util/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockLogStore struct { + mu sync.Mutex + events []*models.Event + deliveries []*models.Delivery + err error +} + +func (m *mockLogStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.err != nil { + return m.err + } + m.events = append(m.events, events...) + m.deliveries = append(m.deliveries, deliveries...) + return nil +} + +func (m *mockLogStore) getInserted() (events []*models.Event, deliveries []*models.Delivery) { + m.mu.Lock() + defer m.mu.Unlock() + return m.events, m.deliveries +} + +// mockQueueMessage implements mqs.QueueMessage for testing. +type mockQueueMessage struct { + acked bool + nacked bool +} + +func (m *mockQueueMessage) Ack() { m.acked = true } +func (m *mockQueueMessage) Nack() { m.nacked = true } + +func newMockMessage(entry models.LogEntry) (*mockQueueMessage, *mqs.Message) { + body, _ := json.Marshal(entry) + mock := &mockQueueMessage{} + msg := &mqs.Message{ + QueueMessage: mock, + Body: body, + LoggableID: "test-msg", + } + return mock, msg +} + +func newMockMessageFromBytes(body []byte) (*mockQueueMessage, *mqs.Message) { + mock := &mockQueueMessage{} + msg := &mqs.Message{ + QueueMessage: mock, + Body: body, + LoggableID: "test-msg", + } + return mock, msg +} + +func TestBatchProcessor_ValidEntry(t *testing.T) { + ctx := context.Background() + logger := testutil.CreateTestLogger(t) + logStore := &mockLogStore{} + + bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: 1, + DelayThreshold: 10 * time.Millisecond, + }) + require.NoError(t, err) + defer bp.Shutdown() + + event := testutil.EventFactory.Any() + delivery := testutil.DeliveryFactory.Any() + entry := models.LogEntry{ + Event: &event, + Delivery: &delivery, + } + + mock, msg := newMockMessage(entry) + err = bp.Add(ctx, msg) + require.NoError(t, err) + + // Wait for batch to process + time.Sleep(50 * time.Millisecond) + + assert.True(t, mock.acked, "valid message should be acked") + assert.False(t, mock.nacked, "valid message should not be nacked") + + events, deliveries := logStore.getInserted() + assert.Len(t, events, 1) + assert.Len(t, deliveries, 1) +} + +func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { + ctx := context.Background() + logger := testutil.CreateTestLogger(t) + logStore := &mockLogStore{} + + bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: 1, + DelayThreshold: 10 * time.Millisecond, + }) + require.NoError(t, err) + defer bp.Shutdown() + + delivery := testutil.DeliveryFactory.Any() + entry := models.LogEntry{ + Event: nil, // Missing event + Delivery: &delivery, + } + + mock, msg := newMockMessage(entry) + err = bp.Add(ctx, msg) + require.NoError(t, err) + + // Wait for batch to process + time.Sleep(50 * time.Millisecond) + + assert.False(t, mock.acked, "invalid message should not be acked") + assert.True(t, mock.nacked, "invalid message should be nacked") + + events, deliveries := logStore.getInserted() + assert.Empty(t, events, "no events should be inserted for invalid entry") + assert.Empty(t, deliveries, "no deliveries should be inserted for invalid entry") +} + +func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { + ctx := context.Background() + logger := testutil.CreateTestLogger(t) + logStore := &mockLogStore{} + + bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: 1, + DelayThreshold: 10 * time.Millisecond, + }) + require.NoError(t, err) + defer bp.Shutdown() + + event := testutil.EventFactory.Any() + entry := models.LogEntry{ + Event: &event, + Delivery: nil, // Missing delivery + } + + mock, msg := newMockMessage(entry) + err = bp.Add(ctx, msg) + require.NoError(t, err) + + // Wait for batch to process + time.Sleep(50 * time.Millisecond) + + assert.False(t, mock.acked, "invalid message should not be acked") + assert.True(t, mock.nacked, "invalid message should be nacked") + + events, deliveries := logStore.getInserted() + assert.Empty(t, events, "no events should be inserted for invalid entry") + assert.Empty(t, deliveries, "no deliveries should be inserted for invalid entry") +} + +func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { + ctx := context.Background() + logger := testutil.CreateTestLogger(t) + logStore := &mockLogStore{} + + bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: 3, // Wait for 3 messages before processing + DelayThreshold: 10 * time.Millisecond, + }) + require.NoError(t, err) + defer bp.Shutdown() + + // Create valid entry 1 + event1 := testutil.EventFactory.Any() + delivery1 := testutil.DeliveryFactory.Any() + validEntry1 := models.LogEntry{Event: &event1, Delivery: &delivery1} + mock1, msg1 := newMockMessage(validEntry1) + + // Create invalid entry (missing event) + delivery2 := testutil.DeliveryFactory.Any() + invalidEntry := models.LogEntry{Event: nil, Delivery: &delivery2} + mock2, msg2 := newMockMessage(invalidEntry) + + // Create valid entry 2 + event3 := testutil.EventFactory.Any() + delivery3 := testutil.DeliveryFactory.Any() + validEntry2 := models.LogEntry{Event: &event3, Delivery: &delivery3} + mock3, msg3 := newMockMessage(validEntry2) + + // Add all messages + require.NoError(t, bp.Add(ctx, msg1)) + require.NoError(t, bp.Add(ctx, msg2)) + require.NoError(t, bp.Add(ctx, msg3)) + + // Wait for batch to process + time.Sleep(50 * time.Millisecond) + + // Valid messages should be acked + assert.True(t, mock1.acked, "valid message 1 should be acked") + assert.False(t, mock1.nacked, "valid message 1 should not be nacked") + + // Invalid message should be nacked + assert.False(t, mock2.acked, "invalid message should not be acked") + assert.True(t, mock2.nacked, "invalid message should be nacked") + + // Valid message 2 should be acked (not blocked by invalid message) + assert.True(t, mock3.acked, "valid message 2 should be acked") + assert.False(t, mock3.nacked, "valid message 2 should not be nacked") + + // Only valid entries should be inserted + events, deliveries := logStore.getInserted() + assert.Len(t, events, 2, "only 2 valid events should be inserted") + assert.Len(t, deliveries, 2, "only 2 valid deliveries should be inserted") +} + +func TestBatchProcessor_MalformedJSON(t *testing.T) { + ctx := context.Background() + logger := testutil.CreateTestLogger(t) + logStore := &mockLogStore{} + + bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: 1, + DelayThreshold: 10 * time.Millisecond, + }) + require.NoError(t, err) + defer bp.Shutdown() + + mock, msg := newMockMessageFromBytes([]byte("not valid json")) + err = bp.Add(ctx, msg) + require.NoError(t, err) + + // Wait for batch to process + time.Sleep(50 * time.Millisecond) + + assert.False(t, mock.acked, "malformed message should not be acked") + assert.True(t, mock.nacked, "malformed message should be nacked") + + events, deliveries := logStore.getInserted() + assert.Empty(t, events) + assert.Empty(t, deliveries) +} diff --git a/internal/logmq/messagehandler.go b/internal/logmq/messagehandler.go index 6bee0e4f..024e71b0 100644 --- a/internal/logmq/messagehandler.go +++ b/internal/logmq/messagehandler.go @@ -8,27 +8,28 @@ import ( "github.com/hookdeck/outpost/internal/mqs" ) -type batcher interface { +// BatchAdder is the interface for adding messages to a batch processor. +type BatchAdder interface { Add(ctx context.Context, msg *mqs.Message) error } type messageHandler struct { - logger *logging.Logger - batcher batcher + logger *logging.Logger + batchAdder BatchAdder } var _ consumer.MessageHandler = (*messageHandler)(nil) -func NewMessageHandler(logger *logging.Logger, batcher batcher) consumer.MessageHandler { +func NewMessageHandler(logger *logging.Logger, batchAdder BatchAdder) consumer.MessageHandler { return &messageHandler{ - logger: logger, - batcher: batcher, + logger: logger, + batchAdder: batchAdder, } } func (h *messageHandler) Handle(ctx context.Context, msg *mqs.Message) error { logger := h.logger.Ctx(ctx) logger.Info("logmq handler") - h.batcher.Add(ctx, msg) + h.batchAdder.Add(ctx, msg) return nil } diff --git a/internal/services/builder.go b/internal/services/builder.go index beb717a3..ce01dae3 100644 --- a/internal/services/builder.go +++ b/internal/services/builder.go @@ -19,13 +19,11 @@ import ( "github.com/hookdeck/outpost/internal/logmq" "github.com/hookdeck/outpost/internal/logstore" "github.com/hookdeck/outpost/internal/models" - "github.com/hookdeck/outpost/internal/mqs" "github.com/hookdeck/outpost/internal/publishmq" "github.com/hookdeck/outpost/internal/redis" "github.com/hookdeck/outpost/internal/scheduler" "github.com/hookdeck/outpost/internal/telemetry" "github.com/hookdeck/outpost/internal/worker" - "github.com/mikestefanello/batcher" "go.uber.org/zap" ) @@ -368,17 +366,20 @@ func (b *ServiceBuilder) BuildLogWorker(baseRouter *gin.Engine) error { } b.logger.Debug("creating log batcher") - batcher, err := b.makeBatcher(svc.logStore, batcherCfg.ItemCountThreshold, batcherCfg.DelayThreshold) + batchProcessor, err := logmq.NewBatchProcessor(b.ctx, b.logger, svc.logStore, logmq.BatchProcessorConfig{ + ItemCountThreshold: batcherCfg.ItemCountThreshold, + DelayThreshold: batcherCfg.DelayThreshold, + }) if err != nil { b.logger.Error("failed to create batcher", zap.Error(err)) return err } svc.cleanupFuncs = append(svc.cleanupFuncs, func(ctx context.Context, logger *logging.LoggerWithCtx) { - batcher.Shutdown() + batchProcessor.Shutdown() }) // Create log handler with batcher - handler := logmq.NewMessageHandler(b.logger, &handlerBatcherImpl{batcher: batcher}) + handler := logmq.NewMessageHandler(b.logger, batchProcessor) // Initialize LogMQ b.logger.Debug("configuring log message queue") @@ -431,81 +432,6 @@ func (d *destinationDisabler) DisableDestination(ctx context.Context, tenantID, return d.entityStore.UpsertDestination(ctx, *destination) } -// makeBatcher creates a batcher for batching log writes -func (b *ServiceBuilder) makeBatcher(logStore logstore.LogStore, itemCountThreshold int, delayThreshold time.Duration) (*batcher.Batcher[*mqs.Message], error) { - batchr, err := batcher.NewBatcher(batcher.Config[*mqs.Message]{ - GroupCountThreshold: 2, - ItemCountThreshold: itemCountThreshold, - DelayThreshold: delayThreshold, - NumGoroutines: 1, - Processor: func(_ string, msgs []*mqs.Message) { - logger := b.logger.Ctx(b.ctx) - logger.Info("processing batch", zap.Int("message_count", len(msgs))) - - nackAll := func() { - for _, msg := range msgs { - msg.Nack() - } - } - - events := make([]*models.Event, 0, len(msgs)) - deliveries := make([]*models.Delivery, 0, len(msgs)) - for _, msg := range msgs { - entry := models.LogEntry{} - if err := entry.FromMessage(msg); err != nil { - logger.Error("failed to parse log entry", - zap.Error(err), - zap.String("message_id", msg.LoggableID)) - nackAll() - return - } - // Validate that both Event and Delivery are present. - // The logstore requires both for data consistency. - if entry.Event == nil || entry.Delivery == nil { - logger.Error("invalid log entry: both event and delivery are required", - zap.Bool("has_event", entry.Event != nil), - zap.Bool("has_delivery", entry.Delivery != nil), - zap.String("message_id", msg.LoggableID)) - msg.Nack() - continue - } - events = append(events, entry.Event) - deliveries = append(deliveries, entry.Delivery) - } - - if err := logStore.InsertMany(b.ctx, events, deliveries); err != nil { - logger.Error("failed to insert events/deliveries", - zap.Error(err), - zap.Int("event_count", len(events)), - zap.Int("delivery_count", len(deliveries))) - nackAll() - return - } - - logger.Info("batch processed successfully", zap.Int("count", len(msgs))) - - for _, msg := range msgs { - msg.Ack() - } - }, - }) - if err != nil { - b.logger.Ctx(b.ctx).Error("failed to create batcher", zap.Error(err)) - return nil, err - } - return batchr, nil -} - -// handlerBatcherImpl implements the batcher interface expected by logmq.MessageHandler -type handlerBatcherImpl struct { - batcher *batcher.Batcher[*mqs.Message] -} - -func (hb *handlerBatcherImpl) Add(ctx context.Context, msg *mqs.Message) error { - hb.batcher.Add("", msg) - return nil -} - // Helper methods for serviceInstance to initialize common dependencies func (s *serviceInstance) initRedis(ctx context.Context, cfg *config.Config, logger *logging.Logger) error { From 9ed6ff0cfd9f942cb876e0429825015fa4c88138 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 04:22:18 +0700 Subject: [PATCH 04/23] refactor: rename RetryMessage to RetryTask Co-Authored-By: Claude Opus 4.5 --- internal/deliverymq/messagehandler.go | 6 +++--- internal/deliverymq/retry.go | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index 1bd6f4d1..e823f21c 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -422,13 +422,13 @@ func (h *messageHandler) shouldNackDeliveryError(err error) bool { func (h *messageHandler) scheduleRetry(ctx context.Context, deliveryEvent models.DeliveryEvent) error { backoffDuration := h.retryBackoff.Duration(deliveryEvent.Attempt) - retryMessage := RetryMessageFromDeliveryEvent(deliveryEvent) - retryMessageStr, err := retryMessage.ToString() + retryTask := RetryTaskFromDeliveryEvent(deliveryEvent) + retryTaskStr, err := retryTask.ToString() if err != nil { return err } - if err := h.retryScheduler.Schedule(ctx, retryMessageStr, backoffDuration, scheduler.WithTaskID(deliveryEvent.GetRetryID())); err != nil { + if err := h.retryScheduler.Schedule(ctx, retryTaskStr, backoffDuration, scheduler.WithTaskID(deliveryEvent.GetRetryID())); err != nil { h.logger.Ctx(ctx).Error("failed to schedule retry", zap.Error(err), zap.String("delivery_event_id", deliveryEvent.ID), diff --git a/internal/deliverymq/retry.go b/internal/deliverymq/retry.go index e4a5b086..1cec61e7 100644 --- a/internal/deliverymq/retry.go +++ b/internal/deliverymq/retry.go @@ -42,11 +42,11 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d // Define execution function exec := func(ctx context.Context, msg string) error { - retryMessage := RetryMessage{} - if err := retryMessage.FromString(msg); err != nil { + retryTask := RetryTask{} + if err := retryTask.FromString(msg); err != nil { return err } - deliveryEvent := retryMessage.ToDeliveryEvent() + deliveryEvent := retryTask.ToDeliveryEvent() if err := deliverymq.Publish(ctx, deliveryEvent); err != nil { return err } @@ -56,7 +56,7 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d return scheduler.New("deliverymq-retry", rsmqClient, exec, scheduler.WithPollBackoff(pollBackoff)), nil } -type RetryMessage struct { +type RetryTask struct { DeliveryEventID string EventID string TenantID string @@ -65,7 +65,7 @@ type RetryMessage struct { Telemetry *models.DeliveryEventTelemetry } -func (m *RetryMessage) ToString() (string, error) { +func (m *RetryTask) ToString() (string, error) { json, err := json.Marshal(m) if err != nil { return "", err @@ -73,11 +73,11 @@ func (m *RetryMessage) ToString() (string, error) { return string(json), nil } -func (m *RetryMessage) FromString(str string) error { +func (m *RetryTask) FromString(str string) error { return json.Unmarshal([]byte(str), &m) } -func (m *RetryMessage) ToDeliveryEvent() models.DeliveryEvent { +func (m *RetryTask) ToDeliveryEvent() models.DeliveryEvent { return models.DeliveryEvent{ ID: m.DeliveryEventID, Attempt: m.Attempt, @@ -87,8 +87,8 @@ func (m *RetryMessage) ToDeliveryEvent() models.DeliveryEvent { } } -func RetryMessageFromDeliveryEvent(deliveryEvent models.DeliveryEvent) RetryMessage { - return RetryMessage{ +func RetryTaskFromDeliveryEvent(deliveryEvent models.DeliveryEvent) RetryTask { + return RetryTask{ DeliveryEventID: deliveryEvent.ID, EventID: deliveryEvent.Event.ID, TenantID: deliveryEvent.Event.TenantID, From 4bcec3a3b50fb31a5f8b6ee70594be96468f685d Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 05:02:47 +0700 Subject: [PATCH 05/23] refactor: introduce DeliveryTask and update message queue flow - Add DeliveryTask struct with IdempotencyKey() and RetryID() methods - Update deliverymq to publish/consume DeliveryTask instead of DeliveryEvent - Update publishmq to create and enqueue DeliveryTask - Update RetryTask to convert to DeliveryTask - Update API handlers, eventtracer, alert, emetrics to use DeliveryTask - Fix Delivery fields (TenantID, Attempt, Manual) not being set before logging - Add :manual suffix to idempotency key for manual retries Co-Authored-By: Claude Opus 4.5 --- internal/alert/monitor.go | 16 +- internal/alert/monitor_test.go | 24 +-- internal/apirouter/legacy_handlers.go | 6 +- internal/apirouter/retry_handlers.go | 6 +- internal/deliverymq/deliverymq.go | 4 +- internal/deliverymq/messagehandler.go | 191 ++++++++++----------- internal/deliverymq/messagehandler_test.go | 130 ++++++-------- internal/deliverymq/mock_test.go | 6 +- internal/deliverymq/retry.go | 35 ++-- internal/deliverymq/retry_test.go | 20 +-- internal/emetrics/emetrics.go | 4 +- internal/eventtracer/eventtracer.go | 42 ++--- internal/eventtracer/noop.go | 4 +- internal/models/event.go | 56 ++++++ internal/publishmq/eventhandler.go | 26 ++- internal/publishmq/eventhandler_test.go | 36 ++-- internal/util/testutil/eventracer.go | 16 +- 17 files changed, 316 insertions(+), 306 deletions(-) diff --git a/internal/alert/monitor.go b/internal/alert/monitor.go index 6694e06c..8e4b5934 100644 --- a/internal/alert/monitor.go +++ b/internal/alert/monitor.go @@ -83,7 +83,7 @@ func WithDeploymentID(deploymentID string) AlertOption { // DeliveryAttempt represents a single delivery attempt type DeliveryAttempt struct { Success bool - DeliveryEvent *models.DeliveryEvent + DeliveryTask *models.DeliveryTask Destination *AlertDestination Timestamp time.Time DeliveryResponse map[string]interface{} @@ -156,10 +156,10 @@ func (m *alertMonitor) HandleAttempt(ctx context.Context, attempt DeliveryAttemp alert := NewConsecutiveFailureAlert(ConsecutiveFailureData{ Event: AlertedEvent{ - ID: attempt.DeliveryEvent.Event.ID, - Topic: attempt.DeliveryEvent.Event.Topic, - Metadata: attempt.DeliveryEvent.Event.Metadata, - Data: attempt.DeliveryEvent.Event.Data, + ID: attempt.DeliveryTask.Event.ID, + Topic: attempt.DeliveryTask.Event.Topic, + Metadata: attempt.DeliveryTask.Event.Metadata, + Data: attempt.DeliveryTask.Event.Data, }, MaxConsecutiveFailures: m.autoDisableFailureCount, ConsecutiveFailures: count, @@ -175,7 +175,7 @@ func (m *alertMonitor) HandleAttempt(ctx context.Context, attempt DeliveryAttemp } m.logger.Ctx(ctx).Audit("destination disabled", - zap.String("event_id", attempt.DeliveryEvent.Event.ID), + zap.String("event_id", attempt.DeliveryTask.Event.ID), zap.String("tenant_id", attempt.Destination.TenantID), zap.String("destination_id", attempt.Destination.ID), zap.String("destination_type", attempt.Destination.Type), @@ -187,7 +187,7 @@ func (m *alertMonitor) HandleAttempt(ctx context.Context, attempt DeliveryAttemp if err := m.notifier.Notify(ctx, alert); err != nil { m.logger.Ctx(ctx).Error("failed to send alert", zap.Error(err), - zap.String("event_id", attempt.DeliveryEvent.Event.ID), + zap.String("event_id", attempt.DeliveryTask.Event.ID), zap.String("tenant_id", attempt.Destination.TenantID), zap.String("destination_id", attempt.Destination.ID), zap.String("destination_type", attempt.Destination.Type), @@ -196,7 +196,7 @@ func (m *alertMonitor) HandleAttempt(ctx context.Context, attempt DeliveryAttemp } m.logger.Ctx(ctx).Audit("alert sent", - zap.String("event_id", attempt.DeliveryEvent.Event.ID), + zap.String("event_id", attempt.DeliveryTask.Event.ID), zap.String("tenant_id", attempt.Destination.TenantID), zap.String("destination_id", attempt.Destination.ID), zap.String("destination_type", attempt.Destination.Type), diff --git a/internal/alert/monitor_test.go b/internal/alert/monitor_test.go index 10f6003e..16372475 100644 --- a/internal/alert/monitor_test.go +++ b/internal/alert/monitor_test.go @@ -53,11 +53,11 @@ func TestAlertMonitor_ConsecutiveFailures_MaxFailures(t *testing.T) { dest := &alert.AlertDestination{ID: "dest_1", TenantID: "tenant_1"} event := &models.Event{Topic: "test.event"} - deliveryEvent := &models.DeliveryEvent{Event: *event} + task := &models.DeliveryTask{Event: *event} attempt := alert.DeliveryAttempt{ - Success: false, - DeliveryEvent: deliveryEvent, - Destination: dest, + Success: false, + DeliveryTask: task, + Destination: dest, DeliveryResponse: map[string]interface{}{ "status": "500", "data": map[string]any{"error": "test error"}, @@ -120,11 +120,11 @@ func TestAlertMonitor_ConsecutiveFailures_Reset(t *testing.T) { dest := &alert.AlertDestination{ID: "dest_1", TenantID: "tenant_1"} event := &models.Event{Topic: "test.event"} - deliveryEvent := &models.DeliveryEvent{Event: *event} + task := &models.DeliveryTask{Event: *event} failedAttempt := alert.DeliveryAttempt{ - Success: false, - DeliveryEvent: deliveryEvent, - Destination: dest, + Success: false, + DeliveryTask: task, + Destination: dest, DeliveryResponse: map[string]interface{}{ "status": "500", "data": map[string]any{"error": "test error"}, @@ -193,11 +193,11 @@ func TestAlertMonitor_ConsecutiveFailures_AboveThreshold(t *testing.T) { dest := &alert.AlertDestination{ID: "dest_above", TenantID: "tenant_above"} event := &models.Event{Topic: "test.event"} - deliveryEvent := &models.DeliveryEvent{Event: *event} + task := &models.DeliveryTask{Event: *event} attempt := alert.DeliveryAttempt{ - Success: false, - DeliveryEvent: deliveryEvent, - Destination: dest, + Success: false, + DeliveryTask: task, + Destination: dest, DeliveryResponse: map[string]interface{}{ "status": "500", }, diff --git a/internal/apirouter/legacy_handlers.go b/internal/apirouter/legacy_handlers.go index c94f9bcb..7e250c3f 100644 --- a/internal/apirouter/legacy_handlers.go +++ b/internal/apirouter/legacy_handlers.go @@ -90,10 +90,10 @@ func (h *LegacyHandlers) RetryByEventDestination(c *gin.Context) { return } - // 3. Create and publish retry delivery event - deliveryEvent := models.NewManualDeliveryEvent(*event, destination.ID) + // 3. Create and publish retry delivery task + task := models.NewManualDeliveryTask(*event, destination.ID) - if err := h.deliveryMQ.Publish(c.Request.Context(), deliveryEvent); err != nil { + if err := h.deliveryMQ.Publish(c.Request.Context(), task); err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } diff --git a/internal/apirouter/retry_handlers.go b/internal/apirouter/retry_handlers.go index 302854d8..ec2844c6 100644 --- a/internal/apirouter/retry_handlers.go +++ b/internal/apirouter/retry_handlers.go @@ -78,10 +78,10 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { return } - // 3. Create and publish retry delivery event - retryDeliveryEvent := models.NewManualDeliveryEvent(*deliveryRecord.Event, deliveryRecord.Delivery.DestinationID) + // 3. Create and publish manual delivery task + task := models.NewManualDeliveryTask(*deliveryRecord.Event, deliveryRecord.Delivery.DestinationID) - if err := h.deliveryMQ.Publish(c.Request.Context(), retryDeliveryEvent); err != nil { + if err := h.deliveryMQ.Publish(c.Request.Context(), task); err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } diff --git a/internal/deliverymq/deliverymq.go b/internal/deliverymq/deliverymq.go index eb3b2a7a..57dbb805 100644 --- a/internal/deliverymq/deliverymq.go +++ b/internal/deliverymq/deliverymq.go @@ -44,8 +44,8 @@ func (q *DeliveryMQ) Init(ctx context.Context) (func(), error) { return q.queue.Init(ctx) } -func (q *DeliveryMQ) Publish(ctx context.Context, event models.DeliveryEvent) error { - return q.queue.Publish(ctx, &event) +func (q *DeliveryMQ) Publish(ctx context.Context, task models.DeliveryTask) error { + return q.queue.Publish(ctx, &task) } func (q *DeliveryMQ) Subscribe(ctx context.Context) (mqs.Subscription, error) { diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index e823f21c..e56635eb 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -20,8 +20,8 @@ import ( "go.uber.org/zap" ) -func idempotencyKeyFromDeliveryEvent(deliveryEvent models.DeliveryEvent) string { - return "idempotency:deliverymq:" + deliveryEvent.ID +func idempotencyKeyFromDeliveryTask(task models.DeliveryTask) string { + return "idempotency:deliverymq:" + task.IdempotencyKey() } var ( @@ -101,7 +101,7 @@ type EventGetter interface { } type DeliveryTracer interface { - Deliver(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination) (context.Context, trace.Span) + Deliver(ctx context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) } type AlertMonitor interface { @@ -137,34 +137,33 @@ func NewMessageHandler( } func (h *messageHandler) Handle(ctx context.Context, msg *mqs.Message) error { - deliveryEvent := models.DeliveryEvent{} + task := models.DeliveryTask{} // Parse message - if err := deliveryEvent.FromMessage(msg); err != nil { + if err := task.FromMessage(msg); err != nil { return h.handleError(msg, &PreDeliveryError{err: err}) } - h.logger.Ctx(ctx).Info("processing delivery event", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID), - zap.Int("attempt", deliveryEvent.Attempt)) + h.logger.Ctx(ctx).Info("processing delivery task", + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID), + zap.Int("attempt", task.Attempt)) // Ensure event data - if err := h.ensureDeliveryEvent(ctx, &deliveryEvent); err != nil { + if err := h.ensureDeliveryTask(ctx, &task); err != nil { return h.handleError(msg, &PreDeliveryError{err: err}) } // Get destination - destination, err := h.ensurePublishableDestination(ctx, deliveryEvent) + destination, err := h.ensurePublishableDestination(ctx, task) if err != nil { return h.handleError(msg, &PreDeliveryError{err: err}) } // Handle delivery - err = h.idempotence.Exec(ctx, idempotencyKeyFromDeliveryEvent(deliveryEvent), func(ctx context.Context) error { - return h.doHandle(ctx, deliveryEvent, destination) + err = h.idempotence.Exec(ctx, idempotencyKeyFromDeliveryTask(task), func(ctx context.Context) error { + return h.doHandle(ctx, task, destination) }) return h.handleError(msg, err) } @@ -187,11 +186,11 @@ func (h *messageHandler) handleError(msg *mqs.Message, err error) error { return err } -func (h *messageHandler) doHandle(ctx context.Context, deliveryEvent models.DeliveryEvent, destination *models.Destination) error { - _, span := h.eventTracer.Deliver(ctx, &deliveryEvent, destination) +func (h *messageHandler) doHandle(ctx context.Context, task models.DeliveryTask, destination *models.Destination) error { + _, span := h.eventTracer.Deliver(ctx, &task, destination) defer span.End() - delivery, err := h.publisher.PublishEvent(ctx, destination, &deliveryEvent.Event) + delivery, err := h.publisher.PublishEvent(ctx, destination, &task.Event) if err != nil { // If delivery is nil, it means no delivery was made. // This is an unexpected error and considered a pre-delivery error. @@ -201,78 +200,75 @@ func (h *messageHandler) doHandle(ctx context.Context, deliveryEvent models.Deli h.logger.Ctx(ctx).Error("failed to publish event", zap.Error(err), - zap.String("delivery_event_id", deliveryEvent.ID), zap.String("delivery_id", delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) deliveryErr := &DeliveryError{err: err} - if h.shouldScheduleRetry(deliveryEvent, err) { - if retryErr := h.scheduleRetry(ctx, deliveryEvent); retryErr != nil { - return h.logDeliveryResult(ctx, &deliveryEvent, destination, delivery, errors.Join(err, retryErr)) + if h.shouldScheduleRetry(task, err) { + if retryErr := h.scheduleRetry(ctx, task); retryErr != nil { + return h.logDeliveryResult(ctx, &task, destination, delivery, errors.Join(err, retryErr)) } } - return h.logDeliveryResult(ctx, &deliveryEvent, destination, delivery, deliveryErr) + return h.logDeliveryResult(ctx, &task, destination, delivery, deliveryErr) } // Handle successful delivery - if deliveryEvent.Manual { + if task.Manual { logger := h.logger.Ctx(ctx) - if err := h.retryScheduler.Cancel(ctx, deliveryEvent.GetRetryID()); err != nil { + if err := h.retryScheduler.Cancel(ctx, models.RetryID(task.Event.ID, task.DestinationID)); err != nil { h.logger.Ctx(ctx).Error("failed to cancel scheduled retry", zap.Error(err), - zap.String("delivery_event_id", deliveryEvent.ID), zap.String("delivery_id", delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), - zap.String("retry_id", deliveryEvent.GetRetryID())) - return h.logDeliveryResult(ctx, &deliveryEvent, destination, delivery, err) + zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) + return h.logDeliveryResult(ctx, &task, destination, delivery, err) } logger.Audit("scheduled retry canceled", - zap.String("delivery_event_id", deliveryEvent.ID), zap.String("delivery_id", delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), - zap.String("retry_id", deliveryEvent.GetRetryID())) + zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) } - return h.logDeliveryResult(ctx, &deliveryEvent, destination, delivery, nil) + return h.logDeliveryResult(ctx, &task, destination, delivery, nil) } -func (h *messageHandler) logDeliveryResult(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination, delivery *models.Delivery, err error) error { +func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) error { logger := h.logger.Ctx(ctx) - // Set up delivery record - deliveryEvent.Delivery = delivery + // Set delivery fields from task + delivery.TenantID = task.Event.TenantID + delivery.Attempt = task.Attempt + delivery.Manual = task.Manual logger.Audit("event delivered", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("delivery_id", deliveryEvent.Delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("delivery_id", delivery.ID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), - zap.String("delivery_status", deliveryEvent.Delivery.Status), - zap.Int("attempt", deliveryEvent.Attempt), - zap.Bool("manual", deliveryEvent.Manual)) + zap.String("delivery_status", delivery.Status), + zap.Int("attempt", task.Attempt), + zap.Bool("manual", task.Manual)) // Publish delivery log logEntry := models.LogEntry{ - Event: &deliveryEvent.Event, + Event: &task.Event, Delivery: delivery, } if logErr := h.logMQ.Publish(ctx, logEntry); logErr != nil { logger.Error("failed to publish delivery log", zap.Error(logErr), - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("delivery_id", deliveryEvent.Delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("delivery_id", delivery.ID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) if err != nil { @@ -282,7 +278,7 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, deliveryEvent *m } // Call alert monitor in goroutine - go h.handleAlertAttempt(ctx, deliveryEvent, destination, err) + go h.handleAlertAttempt(ctx, task, destination, delivery, err) // If we have a DeliveryError, return it as is var delErr *DeliveryError @@ -304,10 +300,10 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, deliveryEvent *m return nil } -func (h *messageHandler) handleAlertAttempt(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination, err error) { +func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) { attempt := alert.DeliveryAttempt{ - Success: deliveryEvent.Delivery.Status == models.DeliveryStatusSuccess, - DeliveryEvent: deliveryEvent, + Success: delivery.Status == models.DeliveryStatusSuccess, + DeliveryTask: task, Destination: &alert.AlertDestination{ ID: destination.ID, TenantID: destination.TenantID, @@ -317,7 +313,7 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, deliveryEvent * CreatedAt: destination.CreatedAt, DisabledAt: destination.DisabledAt, }, - Timestamp: deliveryEvent.Delivery.Time, + Timestamp: delivery.Time, } if !attempt.Success && err != nil { @@ -343,9 +339,8 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, deliveryEvent * if monitorErr := h.alertMonitor.HandleAttempt(ctx, attempt); monitorErr != nil { h.logger.Ctx(ctx).Error("failed to handle alert attempt", zap.Error(monitorErr), - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("delivery_id", deliveryEvent.Delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), + zap.String("delivery_id", delivery.ID), + zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) @@ -353,26 +348,25 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, deliveryEvent * } h.logger.Ctx(ctx).Info("alert attempt handled", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("delivery_id", deliveryEvent.Delivery.ID), - zap.String("event_id", deliveryEvent.Event.ID), + zap.String("delivery_id", delivery.ID), + zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) } -func (h *messageHandler) shouldScheduleRetry(deliveryEvent models.DeliveryEvent, err error) bool { - if deliveryEvent.Manual { +func (h *messageHandler) shouldScheduleRetry(task models.DeliveryTask, err error) bool { + if task.Manual { return false } - if !deliveryEvent.Event.EligibleForRetry { + if !task.Event.EligibleForRetry { return false } if _, ok := err.(*destregistry.ErrDestinationPublishAttempt); !ok { return false } // Attempt starts at 0 for initial attempt, so we can compare directly - return deliveryEvent.Attempt < h.retryMaxLimit + return task.Attempt < h.retryMaxLimit } func (h *messageHandler) shouldNackError(err error) bool { @@ -419,49 +413,47 @@ func (h *messageHandler) shouldNackDeliveryError(err error) bool { return true // Nack other delivery errors } -func (h *messageHandler) scheduleRetry(ctx context.Context, deliveryEvent models.DeliveryEvent) error { - backoffDuration := h.retryBackoff.Duration(deliveryEvent.Attempt) +func (h *messageHandler) scheduleRetry(ctx context.Context, task models.DeliveryTask) error { + backoffDuration := h.retryBackoff.Duration(task.Attempt) - retryTask := RetryTaskFromDeliveryEvent(deliveryEvent) + retryTask := RetryTaskFromDeliveryTask(task) retryTaskStr, err := retryTask.ToString() if err != nil { return err } - if err := h.retryScheduler.Schedule(ctx, retryTaskStr, backoffDuration, scheduler.WithTaskID(deliveryEvent.GetRetryID())); err != nil { + if err := h.retryScheduler.Schedule(ctx, retryTaskStr, backoffDuration, scheduler.WithTaskID(models.RetryID(task.Event.ID, task.DestinationID))); err != nil { h.logger.Ctx(ctx).Error("failed to schedule retry", zap.Error(err), - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID), - zap.Int("attempt", deliveryEvent.Attempt), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID), + zap.Int("attempt", task.Attempt), zap.Duration("backoff", backoffDuration)) return err } h.logger.Ctx(ctx).Audit("retry scheduled", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID), - zap.Int("attempt", deliveryEvent.Attempt), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID), + zap.Int("attempt", task.Attempt), zap.Duration("backoff", backoffDuration)) return nil } -// ensureDeliveryEvent ensures that the delivery event struct has full data. -// In retry scenarios, the delivery event only has its ID and we'll need to query the full data. -func (h *messageHandler) ensureDeliveryEvent(ctx context.Context, deliveryEvent *models.DeliveryEvent) error { +// ensureDeliveryTask ensures that the delivery task has full event data. +// In retry scenarios, the task only has event ID and we'll need to query the full data. +func (h *messageHandler) ensureDeliveryTask(ctx context.Context, task *models.DeliveryTask) error { // TODO: consider a more deliberate way to check for retry scenario? - if !deliveryEvent.Event.Time.IsZero() { + if !task.Event.Time.IsZero() { return nil } event, err := h.logStore.RetrieveEvent(ctx, logstore.RetrieveEventRequest{ - TenantID: deliveryEvent.Event.TenantID, - EventID: deliveryEvent.Event.ID, + TenantID: task.Event.TenantID, + EventID: task.Event.ID, }) if err != nil { return err @@ -469,7 +461,7 @@ func (h *messageHandler) ensureDeliveryEvent(ctx context.Context, deliveryEvent if event == nil { return errors.New("event not found") } - deliveryEvent.Event = *event + task.Event = *event return nil } @@ -477,16 +469,15 @@ func (h *messageHandler) ensureDeliveryEvent(ctx context.Context, deliveryEvent // ensurePublishableDestination ensures that the destination exists and is in a publishable state. // Returns an error if the destination is not found, deleted, disabled, or any other state that // would prevent publishing. -func (h *messageHandler) ensurePublishableDestination(ctx context.Context, deliveryEvent models.DeliveryEvent) (*models.Destination, error) { - destination, err := h.entityStore.RetrieveDestination(ctx, deliveryEvent.Event.TenantID, deliveryEvent.DestinationID) +func (h *messageHandler) ensurePublishableDestination(ctx context.Context, task models.DeliveryTask) (*models.Destination, error) { + destination, err := h.entityStore.RetrieveDestination(ctx, task.Event.TenantID, task.DestinationID) if err != nil { logger := h.logger.Ctx(ctx) fields := []zap.Field{ zap.Error(err), - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID), } if errors.Is(err, models.ErrDestinationDeleted) { @@ -499,17 +490,15 @@ func (h *messageHandler) ensurePublishableDestination(ctx context.Context, deliv } if destination == nil { h.logger.Ctx(ctx).Info("destination not found", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID)) + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID)) return nil, models.ErrDestinationNotFound } if destination.DisabledAt != nil { h.logger.Ctx(ctx).Info("skipping disabled destination", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), zap.Time("disabled_at", *destination.DisabledAt)) diff --git a/internal/deliverymq/messagehandler_test.go b/internal/deliverymq/messagehandler_test.go index 857be951..23097f24 100644 --- a/internal/deliverymq/messagehandler_test.go +++ b/internal/deliverymq/messagehandler_test.go @@ -61,12 +61,11 @@ func TestMessageHandler_DestinationGetterError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -124,12 +123,11 @@ func TestMessageHandler_DestinationNotFound(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -184,12 +182,11 @@ func TestMessageHandler_DestinationDeleted(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -254,12 +251,11 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -269,7 +265,7 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked when scheduling retry") assert.True(t, mockMsg.acked, "message should be acked when scheduling retry") assert.Len(t, retryScheduler.schedules, 1, "retry should be scheduled") - assert.Equal(t, deliveryEvent.GetRetryID(), retryScheduler.taskIDs[0], + assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.taskIDs[0], "should use GetRetryID for task ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") @@ -327,12 +323,11 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -390,8 +385,7 @@ func TestMessageHandler_EventGetterError(t *testing.T) { ) // Create and handle message simulating a retry - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Attempt: 2, // Retry attempt DestinationID: destination.ID, Event: models.Event{ @@ -400,7 +394,7 @@ func TestMessageHandler_EventGetterError(t *testing.T) { // Minimal event data as it would be in a retry }, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -455,8 +449,7 @@ func TestMessageHandler_RetryFlow(t *testing.T) { ) // Create and handle message simulating a retry - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Attempt: 2, // Retry attempt DestinationID: destination.ID, Event: models.Event{ @@ -465,7 +458,7 @@ func TestMessageHandler_RetryFlow(t *testing.T) { // Minimal event data as it would be in a retry }, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -522,24 +515,22 @@ func TestMessageHandler_Idempotency(t *testing.T) { idempotence.New(redis, idempotence.WithSuccessfulTTL(24*time.Hour)), ) - // Create message with fixed ID for idempotency check - messageID := idgen.DeliveryEvent() - deliveryEvent := models.DeliveryEvent{ - ID: messageID, + // Create message for idempotency check + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } // First attempt - mockMsg1, msg1 := newDeliveryMockMessage(deliveryEvent) + mockMsg1, msg1 := newDeliveryMockMessage(task) err := handler.Handle(context.Background(), msg1) require.NoError(t, err) assert.True(t, mockMsg1.acked, "first attempt should be acked") assert.False(t, mockMsg1.nacked, "first attempt should not be nacked") assert.Equal(t, 1, publisher.current, "first attempt should publish") - // Second attempt with same message ID - mockMsg2, msg2 := newDeliveryMockMessage(deliveryEvent) + // Second attempt with same task + mockMsg2, msg2 := newDeliveryMockMessage(task) err = handler.Handle(context.Background(), msg2) require.NoError(t, err) assert.True(t, mockMsg2.acked, "duplicate should be acked") @@ -590,8 +581,7 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { ) // Create retry message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Attempt: 2, DestinationID: destination.ID, Event: models.Event{ @@ -601,7 +591,7 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { } // First attempt - should fail with system error - mockMsg1, msg1 := newDeliveryMockMessage(deliveryEvent) + mockMsg1, msg1 := newDeliveryMockMessage(task) err := handler.Handle(context.Background(), msg1) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get event") @@ -612,8 +602,8 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { // Clear the error for second attempt eventGetter.clearError() - // Second attempt with same message ID - should succeed - mockMsg2, msg2 := newDeliveryMockMessage(deliveryEvent) + // Second attempt with same task - should succeed + mockMsg2, msg2 := newDeliveryMockMessage(task) err = handler.Handle(context.Background(), msg2) require.NoError(t, err) assert.True(t, mockMsg2.acked, "second attempt should be acked") @@ -666,12 +656,11 @@ func TestMessageHandler_DestinationDisabled(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -728,12 +717,11 @@ func TestMessageHandler_LogPublisherError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -789,12 +777,11 @@ func TestMessageHandler_PublishAndLogError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -856,26 +843,24 @@ func TestManualDelivery_Success(t *testing.T) { ) // Step 1: Automatic delivery fails and schedules retry - autoDeliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + autoTask := models.DeliveryTask{ Event: event, DestinationID: destination.ID, Manual: false, } - _, autoMsg := newDeliveryMockMessage(autoDeliveryEvent) + _, autoMsg := newDeliveryMockMessage(autoTask) _ = handler.Handle(context.Background(), autoMsg) require.Len(t, retryScheduler.taskIDs, 1, "should schedule one retry") scheduledRetryID := retryScheduler.taskIDs[0] // Step 2: Manual retry succeeds and cancels pending retry - manualDeliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), // New delivery event ID - Event: event, // Same event - DestinationID: destination.ID, // Same destination + manualTask := models.DeliveryTask{ + Event: event, // Same event + DestinationID: destination.ID, // Same destination Manual: true, } - mockMsg, manualMsg := newDeliveryMockMessage(manualDeliveryEvent) + mockMsg, manualMsg := newDeliveryMockMessage(manualTask) err := handler.Handle(context.Background(), manualMsg) require.NoError(t, err) @@ -940,13 +925,12 @@ func TestManualDelivery_PublishError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, Manual: true, // Manual delivery } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -1005,13 +989,12 @@ func TestManualDelivery_CancelError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, Manual: true, // Manual delivery } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -1023,7 +1006,7 @@ func TestManualDelivery_CancelError(t *testing.T) { assert.False(t, mockMsg.acked, "message should not be acked on retry cancel error") assert.Equal(t, 1, publisher.current, "should publish once") assert.Len(t, retryScheduler.canceled, 1, "should attempt to cancel retry") - assert.Equal(t, deliveryEvent.GetRetryID(), retryScheduler.canceled[0], "should cancel with correct retry ID") + assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.canceled[0], "should cancel with correct retry ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK despite cancel error") assertAlertMonitor(t, alertMonitor, true, &destination, nil) @@ -1072,13 +1055,12 @@ func TestManualDelivery_DestinationDisabled(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, Manual: true, // Manual delivery } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -1124,7 +1106,7 @@ func TestMessageHandler_PublishSuccess(t *testing.T) { alertMonitor.On("HandleAttempt", mock.Anything, mock.MatchedBy(func(attempt alert.DeliveryAttempt) bool { return attempt.Success && // Should be a successful attempt attempt.Destination.ID == destination.ID && // Should have correct destination - attempt.DeliveryEvent != nil && // Should have delivery event + attempt.DeliveryTask != nil && // Should have delivery task attempt.DeliveryResponse == nil // No error data for success })).Return(nil) @@ -1144,12 +1126,11 @@ func TestMessageHandler_PublishSuccess(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -1204,12 +1185,11 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { ) // Create and handle message - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - mockMsg, msg := newDeliveryMockMessage(deliveryEvent) + mockMsg, msg := newDeliveryMockMessage(task) // Handle message err := handler.Handle(context.Background(), msg) @@ -1251,7 +1231,7 @@ func assertAlertMonitor(t *testing.T, m *mockAlertMonitor, success bool, destina assert.Equal(t, success, attempt.Success, "alert attempt success should match") assert.Equal(t, destination.ID, attempt.Destination.ID, "alert attempt destination should match") - assert.NotNil(t, attempt.DeliveryEvent, "alert attempt should have delivery event") + assert.NotNil(t, attempt.DeliveryTask, "alert attempt should have delivery task") if expectedData != nil { assert.Equal(t, expectedData, attempt.DeliveryResponse, "alert attempt data should match") @@ -1316,21 +1296,19 @@ func TestMessageHandler_RetryID_MultipleDestinations(t *testing.T) { idempotence.New(testutil.CreateTestRedisClient(t), idempotence.WithSuccessfulTTL(24*time.Hour)), ) - // Create delivery events for SAME event to DIFFERENT destinations - deliveryEvent1 := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + // Create delivery tasks for SAME event to DIFFERENT destinations + task1 := models.DeliveryTask{ Event: event, DestinationID: destination1.ID, } - deliveryEvent2 := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task2 := models.DeliveryTask{ Event: event, DestinationID: destination2.ID, } // Handle both messages - _, msg1 := newDeliveryMockMessage(deliveryEvent1) - _, msg2 := newDeliveryMockMessage(deliveryEvent2) + _, msg1 := newDeliveryMockMessage(task1) + _, msg2 := newDeliveryMockMessage(task2) _ = handler.Handle(context.Background(), msg1) _ = handler.Handle(context.Background(), msg2) diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index 624d66e2..49696d39 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -217,9 +217,9 @@ type mockMessage struct { nacked bool } -func newDeliveryMockMessage(deliveryEvent models.DeliveryEvent) (*mockMessage, *mqs.Message) { - mock := &mockMessage{id: deliveryEvent.ID} - body, err := json.Marshal(deliveryEvent) +func newDeliveryMockMessage(task models.DeliveryTask) (*mockMessage, *mqs.Message) { + mock := &mockMessage{id: task.IdempotencyKey()} + body, err := json.Marshal(task) if err != nil { panic(err) } diff --git a/internal/deliverymq/retry.go b/internal/deliverymq/retry.go index 1cec61e7..da699c0e 100644 --- a/internal/deliverymq/retry.go +++ b/internal/deliverymq/retry.go @@ -46,8 +46,8 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d if err := retryTask.FromString(msg); err != nil { return err } - deliveryEvent := retryTask.ToDeliveryEvent() - if err := deliverymq.Publish(ctx, deliveryEvent); err != nil { + deliveryTask := retryTask.ToDeliveryTask() + if err := deliverymq.Publish(ctx, deliveryTask); err != nil { return err } return nil @@ -56,13 +56,14 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d return scheduler.New("deliverymq-retry", rsmqClient, exec, scheduler.WithPollBackoff(pollBackoff)), nil } +// RetryTask contains the minimal info needed to retry a delivery. +// The full Event data will be fetched from logstore when the retry executes. type RetryTask struct { - DeliveryEventID string - EventID string - TenantID string - DestinationID string - Attempt int - Telemetry *models.DeliveryEventTelemetry + EventID string + TenantID string + DestinationID string + Attempt int + Telemetry *models.DeliveryEventTelemetry } func (m *RetryTask) ToString() (string, error) { @@ -77,9 +78,8 @@ func (m *RetryTask) FromString(str string) error { return json.Unmarshal([]byte(str), &m) } -func (m *RetryTask) ToDeliveryEvent() models.DeliveryEvent { - return models.DeliveryEvent{ - ID: m.DeliveryEventID, +func (m *RetryTask) ToDeliveryTask() models.DeliveryTask { + return models.DeliveryTask{ Attempt: m.Attempt, DestinationID: m.DestinationID, Event: models.Event{ID: m.EventID, TenantID: m.TenantID}, @@ -87,13 +87,12 @@ func (m *RetryTask) ToDeliveryEvent() models.DeliveryEvent { } } -func RetryTaskFromDeliveryEvent(deliveryEvent models.DeliveryEvent) RetryTask { +func RetryTaskFromDeliveryTask(task models.DeliveryTask) RetryTask { return RetryTask{ - DeliveryEventID: deliveryEvent.ID, - EventID: deliveryEvent.Event.ID, - TenantID: deliveryEvent.Event.TenantID, - DestinationID: deliveryEvent.DestinationID, - Attempt: deliveryEvent.Attempt + 1, - Telemetry: deliveryEvent.Telemetry, + EventID: task.Event.ID, + TenantID: task.Event.TenantID, + DestinationID: task.DestinationID, + Attempt: task.Attempt + 1, + Telemetry: task.Telemetry, } } diff --git a/internal/deliverymq/retry_test.go b/internal/deliverymq/retry_test.go index 10a1b012..1fd6b7c4 100644 --- a/internal/deliverymq/retry_test.go +++ b/internal/deliverymq/retry_test.go @@ -150,12 +150,11 @@ func TestDeliveryMQRetry_EligibleForRetryFalse(t *testing.T) { suite.SetupTest(t) defer suite.TeardownTest(t) - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - require.NoError(t, suite.deliveryMQ.Publish(ctx, deliveryEvent)) + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) <-ctx.Done() assert.Equal(t, 1, publisher.Current(), "should only attempt once when retry is not eligible") @@ -213,12 +212,11 @@ func TestDeliveryMQRetry_EligibleForRetryTrue(t *testing.T) { suite.SetupTest(t) defer suite.TeardownTest(t) - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - require.NoError(t, suite.deliveryMQ.Publish(ctx, deliveryEvent)) + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) // Wait for all attempts to complete // Note: 50ms backoff + 10ms poll interval = fast, deterministic retries @@ -271,12 +269,11 @@ func TestDeliveryMQRetry_SystemError(t *testing.T) { suite.SetupTest(t) defer suite.TeardownTest(t) - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - require.NoError(t, suite.deliveryMQ.Publish(ctx, deliveryEvent)) + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) <-ctx.Done() assert.Greater(t, destGetter.current, 1, "handler should execute multiple times on system error") @@ -341,12 +338,11 @@ func TestDeliveryMQRetry_RetryMaxCount(t *testing.T) { suite.SetupTest(t) defer suite.TeardownTest(t) - deliveryEvent := models.DeliveryEvent{ - ID: idgen.DeliveryEvent(), + task := models.DeliveryTask{ Event: event, DestinationID: destination.ID, } - require.NoError(t, suite.deliveryMQ.Publish(ctx, deliveryEvent)) + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) // Poll until we get 3 attempts or timeout // With 50ms backoff + 10ms poll: initial + 60ms + retry + 60ms + retry = ~150ms minimum diff --git a/internal/emetrics/emetrics.go b/internal/emetrics/emetrics.go index c7b2dd93..26c0e3cb 100644 --- a/internal/emetrics/emetrics.go +++ b/internal/emetrics/emetrics.go @@ -12,7 +12,7 @@ import ( type OutpostMetrics interface { DeliveryLatency(ctx context.Context, latency time.Duration, opts DeliveryLatencyOpts) - EventDelivered(ctx context.Context, deliveryEvent *models.DeliveryEvent, ok bool, destinationType string) + EventDelivered(ctx context.Context, ok bool, destinationType string) EventPublished(ctx context.Context, event *models.Event) EventEligbible(ctx context.Context, event *models.Event) APIResponseLatency(ctx context.Context, latency time.Duration, opts APIResponseLatencyOpts) @@ -99,7 +99,7 @@ func (e *emetricsImpl) DeliveryLatency(ctx context.Context, latency time.Duratio e.deliveryLatency.Record(ctx, latency.Milliseconds(), metric.WithAttributes(attribute.String("type", opts.Type))) } -func (e *emetricsImpl) EventDelivered(ctx context.Context, deliveryEvent *models.DeliveryEvent, ok bool, destinationType string) { +func (e *emetricsImpl) EventDelivered(ctx context.Context, ok bool, destinationType string) { var status string if ok { status = models.DeliveryStatusSuccess diff --git a/internal/eventtracer/eventtracer.go b/internal/eventtracer/eventtracer.go index 52a1d820..f907a593 100644 --- a/internal/eventtracer/eventtracer.go +++ b/internal/eventtracer/eventtracer.go @@ -12,8 +12,8 @@ import ( type EventTracer interface { Receive(context.Context, *models.Event) (context.Context, trace.Span) - StartDelivery(context.Context, *models.DeliveryEvent) (context.Context, trace.Span) - Deliver(context.Context, *models.DeliveryEvent, *models.Destination) (context.Context, trace.Span) + StartDelivery(context.Context, *models.DeliveryTask) (context.Context, trace.Span) + Deliver(context.Context, *models.DeliveryTask, *models.Destination) (context.Context, trace.Span) } type eventTracerImpl struct { @@ -47,10 +47,10 @@ func (t *eventTracerImpl) Receive(ctx context.Context, event *models.Event) (con return ctx, span } -func (t *eventTracerImpl) StartDelivery(_ context.Context, deliveryEvent *models.DeliveryEvent) (context.Context, trace.Span) { - ctx, span := t.tracer.Start(t.getRemoteEventSpanContext(&deliveryEvent.Event), "EventTracer.StartDelivery") +func (t *eventTracerImpl) StartDelivery(_ context.Context, task *models.DeliveryTask) (context.Context, trace.Span) { + ctx, span := t.tracer.Start(t.getRemoteEventSpanContext(&task.Event), "EventTracer.StartDelivery") - deliveryEvent.Telemetry = &models.DeliveryEventTelemetry{ + task.Telemetry = &models.DeliveryEventTelemetry{ TraceID: span.SpanContext().TraceID().String(), SpanID: span.SpanContext().SpanID().String(), } @@ -60,10 +60,10 @@ func (t *eventTracerImpl) StartDelivery(_ context.Context, deliveryEvent *models type DeliverSpan struct { trace.Span - emeter emetrics.OutpostMetrics - deliveryEvent *models.DeliveryEvent - destination *models.Destination - err error + emeter emetrics.OutpostMetrics + task *models.DeliveryTask + destination *models.Destination + err error } func (d *DeliverSpan) RecordError(err error, options ...trace.EventOption) { @@ -72,17 +72,12 @@ func (d *DeliverSpan) RecordError(err error, options ...trace.EventOption) { } func (d *DeliverSpan) End(options ...trace.SpanEndOption) { - if d.deliveryEvent.Event.Telemetry == nil { - d.Span.End(options...) - return - } - if d.deliveryEvent.Delivery == nil { + if d.task.Event.Telemetry == nil { d.Span.End(options...) return } - ok := d.deliveryEvent.Delivery.Status == models.DeliveryStatusSuccess - startTime, err := time.Parse(time.RFC3339Nano, d.deliveryEvent.Event.Telemetry.ReceivedTime) + startTime, err := time.Parse(time.RFC3339Nano, d.task.Event.Telemetry.ReceivedTime) if err != nil { // TODO: handle error? d.Span.End(options...) @@ -92,14 +87,13 @@ func (d *DeliverSpan) End(options ...trace.SpanEndOption) { d.emeter.DeliveryLatency(context.Background(), time.Since(startTime), emetrics.DeliveryLatencyOpts{Type: d.destination.Type}) - d.emeter.EventDelivered(context.Background(), d.deliveryEvent, ok, d.destination.Type) d.Span.End(options...) } -func (t *eventTracerImpl) Deliver(_ context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination) (context.Context, trace.Span) { - ctx, span := t.tracer.Start(t.getRemoteDeliveryEventSpanContext(deliveryEvent), "EventTracer.Deliver") - deliverySpan := &DeliverSpan{Span: span, emeter: t.emeter, deliveryEvent: deliveryEvent, destination: destination} +func (t *eventTracerImpl) Deliver(_ context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) { + ctx, span := t.tracer.Start(t.getRemoteDeliveryTaskSpanContext(task), "EventTracer.Deliver") + deliverySpan := &DeliverSpan{Span: span, emeter: t.emeter, task: task, destination: destination} return ctx, deliverySpan } @@ -128,17 +122,17 @@ func (t *eventTracerImpl) getRemoteEventSpanContext(event *models.Event) context return trace.ContextWithRemoteSpanContext(context.Background(), remoteCtx) } -func (t *eventTracerImpl) getRemoteDeliveryEventSpanContext(deliveryEvent *models.DeliveryEvent) context.Context { - if deliveryEvent.Telemetry == nil { +func (t *eventTracerImpl) getRemoteDeliveryTaskSpanContext(task *models.DeliveryTask) context.Context { + if task.Telemetry == nil { return context.Background() } - traceID, err := trace.TraceIDFromHex(deliveryEvent.Telemetry.TraceID) + traceID, err := trace.TraceIDFromHex(task.Telemetry.TraceID) if err != nil { // TODO: handle error return context.Background() } - spanID, err := trace.SpanIDFromHex(deliveryEvent.Telemetry.SpanID) + spanID, err := trace.SpanIDFromHex(task.Telemetry.SpanID) if err != nil { // TODO: handle error return context.Background() diff --git a/internal/eventtracer/noop.go b/internal/eventtracer/noop.go index 67eeb691..ef051813 100644 --- a/internal/eventtracer/noop.go +++ b/internal/eventtracer/noop.go @@ -24,12 +24,12 @@ func (t *noopEventTracer) Receive(ctx context.Context, _ *models.Event) (context return ctx, span } -func (t *noopEventTracer) StartDelivery(ctx context.Context, deliveryEvent *models.DeliveryEvent) (context.Context, trace.Span) { +func (t *noopEventTracer) StartDelivery(ctx context.Context, task *models.DeliveryTask) (context.Context, trace.Span) { _, span := t.tracer.Start(ctx, "EventTracer.StartDelivery") return ctx, span } -func (t *noopEventTracer) Deliver(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination) (context.Context, trace.Span) { +func (t *noopEventTracer) Deliver(ctx context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) { _, span := t.tracer.Start(ctx, "EventTracer.Deliver") return ctx, span } diff --git a/internal/models/event.go b/internal/models/event.go index b9f913b1..5b950c2e 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -72,6 +72,62 @@ type DeliveryEventTelemetry struct { SpanID string } +// DeliveryTask represents a task to deliver an event to a destination. +// This is a message type (no ID) used by: publishmq -> deliverymq, retry -> deliverymq +type DeliveryTask struct { + Event Event `json:"event"` + DestinationID string `json:"destination_id"` + Attempt int `json:"attempt"` + Manual bool `json:"manual"` + Telemetry *DeliveryEventTelemetry `json:"telemetry,omitempty"` +} + +var _ mqs.IncomingMessage = &DeliveryTask{} + +func (t *DeliveryTask) FromMessage(msg *mqs.Message) error { + return json.Unmarshal(msg.Body, t) +} + +func (t *DeliveryTask) ToMessage() (*mqs.Message, error) { + data, err := json.Marshal(t) + if err != nil { + return nil, err + } + return &mqs.Message{Body: data}, nil +} + +// IdempotencyKey returns the key used for idempotency checks. +// Uses Event.ID + DestinationID + Manual flag. +// Manual retries get a different key so they can bypass idempotency of failed automatic deliveries. +func (t *DeliveryTask) IdempotencyKey() string { + if t.Manual { + return t.Event.ID + ":" + t.DestinationID + ":manual" + } + return t.Event.ID + ":" + t.DestinationID +} + +// RetryID returns the ID used for scheduling and canceling retries. +// Uses event_id:destination_id to allow manual retries to cancel pending automatic retries. +func RetryID(eventID, destinationID string) string { + return eventID + ":" + destinationID +} + +// NewDeliveryTask creates a new DeliveryTask for an event and destination. +func NewDeliveryTask(event Event, destinationID string) DeliveryTask { + return DeliveryTask{ + Event: event, + DestinationID: destinationID, + Attempt: 0, + } +} + +// NewManualDeliveryTask creates a new DeliveryTask for a manual retry. +func NewManualDeliveryTask(event Event, destinationID string) DeliveryTask { + task := NewDeliveryTask(event, destinationID) + task.Manual = true + return task +} + type DeliveryEvent struct { ID string Attempt int diff --git a/internal/publishmq/eventhandler.go b/internal/publishmq/eventhandler.go index a7bdac40..740dbb3a 100644 --- a/internal/publishmq/eventhandler.go +++ b/internal/publishmq/eventhandler.go @@ -142,7 +142,7 @@ func (h *eventHandler) doPublish(ctx context.Context, event *models.Event, match for _, destinationSummary := range matchedDestinations { destID := destinationSummary.ID g.Go(func() error { - return h.enqueueDeliveryEvent(ctx, models.NewDeliveryEvent(*event, destID)) + return h.enqueueDeliveryTask(ctx, models.NewDeliveryTask(*event, destID)) }) } if err := g.Wait(); err != nil { @@ -176,25 +176,23 @@ func (h *eventHandler) matchSpecificDestination(ctx context.Context, event *mode return []models.DestinationSummary{*destination.ToSummary()}, nil } -func (h *eventHandler) enqueueDeliveryEvent(ctx context.Context, deliveryEvent models.DeliveryEvent) error { - _, deliverySpan := h.eventTracer.StartDelivery(ctx, &deliveryEvent) - if err := h.deliveryMQ.Publish(ctx, deliveryEvent); err != nil { - h.logger.Ctx(ctx).Error("failed to enqueue delivery event", +func (h *eventHandler) enqueueDeliveryTask(ctx context.Context, task models.DeliveryTask) error { + _, deliverySpan := h.eventTracer.StartDelivery(ctx, &task) + if err := h.deliveryMQ.Publish(ctx, task); err != nil { + h.logger.Ctx(ctx).Error("failed to enqueue delivery task", zap.Error(err), - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID)) + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID)) deliverySpan.RecordError(err) deliverySpan.End() return err } - h.logger.Ctx(ctx).Audit("delivery event enqueued", - zap.String("delivery_event_id", deliveryEvent.ID), - zap.String("event_id", deliveryEvent.Event.ID), - zap.String("tenant_id", deliveryEvent.Event.TenantID), - zap.String("destination_id", deliveryEvent.DestinationID)) + h.logger.Ctx(ctx).Audit("delivery task enqueued", + zap.String("event_id", task.Event.ID), + zap.String("tenant_id", task.Event.TenantID), + zap.String("destination_id", task.DestinationID)) deliverySpan.End() return nil } diff --git a/internal/publishmq/eventhandler_test.go b/internal/publishmq/eventhandler_test.go index cb6d9a4f..74b147fe 100644 --- a/internal/publishmq/eventhandler_test.go +++ b/internal/publishmq/eventhandler_test.go @@ -176,22 +176,22 @@ func TestEventHandler_WildcardTopic(t *testing.T) { msg, err := subscription.Receive(receiveCtx) require.NoError(t, err) - var deliveryEvent models.DeliveryEvent - err = deliveryEvent.FromMessage(msg) + var task models.DeliveryTask + err = task.FromMessage(msg) require.NoError(t, err) // Verify this is a destination we expect - _, exists := destinationIDs[deliveryEvent.DestinationID] - require.True(t, exists, "delivery to unexpected destination: %s", deliveryEvent.DestinationID) - destinationIDs[deliveryEvent.DestinationID] = true + _, exists := destinationIDs[task.DestinationID] + require.True(t, exists, "delivery to unexpected destination: %s", task.DestinationID) + destinationIDs[task.DestinationID] = true // Verify this is not the disabled destination - require.NotEqual(t, disabledDest.ID, deliveryEvent.DestinationID, "disabled destination should not receive events") + require.NotEqual(t, disabledDest.ID, task.DestinationID, "disabled destination should not receive events") // Verify event data is correct - require.Equal(t, event.ID, deliveryEvent.Event.ID) - require.Equal(t, event.Topic, deliveryEvent.Event.Topic) - require.Equal(t, event.TenantID, deliveryEvent.Event.TenantID) + require.Equal(t, event.ID, task.Event.ID) + require.Equal(t, event.Topic, task.Event.Topic) + require.Equal(t, event.TenantID, task.Event.TenantID) // Acknowledge the message msg.Ack() @@ -454,9 +454,9 @@ func TestEventHandler_Filter(t *testing.T) { require.NoError(t, err) require.NotNil(t, msg) - var deliveryEvent models.DeliveryEvent - require.NoError(t, deliveryEvent.FromMessage(msg)) - require.Equal(t, dest.ID, deliveryEvent.DestinationID) + var task models.DeliveryTask + require.NoError(t, task.FromMessage(msg)) + require.Equal(t, dest.ID, task.DestinationID) msg.Ack() }) @@ -522,9 +522,9 @@ func TestEventHandler_Filter(t *testing.T) { require.NoError(t, err) require.NotNil(t, msg) - var deliveryEvent models.DeliveryEvent - require.NoError(t, deliveryEvent.FromMessage(msg)) - require.Equal(t, dest.ID, deliveryEvent.DestinationID) + var task models.DeliveryTask + require.NoError(t, task.FromMessage(msg)) + require.Equal(t, dest.ID, task.DestinationID) msg.Ack() }) @@ -586,9 +586,9 @@ func TestEventHandler_Filter(t *testing.T) { require.NoError(t, err) require.NotNil(t, msg) - var deliveryEvent models.DeliveryEvent - require.NoError(t, deliveryEvent.FromMessage(msg)) - require.Equal(t, dest.ID, deliveryEvent.DestinationID) + var task models.DeliveryTask + require.NoError(t, task.FromMessage(msg)) + require.Equal(t, dest.ID, task.DestinationID) msg.Ack() }) } diff --git a/internal/util/testutil/eventracer.go b/internal/util/testutil/eventracer.go index 13a7c5e3..82a0e8a5 100644 --- a/internal/util/testutil/eventracer.go +++ b/internal/util/testutil/eventracer.go @@ -12,8 +12,8 @@ import ( type mockEventTracerImpl struct { tracer trace.Tracer receive func(context.Context, *models.Event) (context.Context, trace.Span) - startDelivery func(context.Context, *models.DeliveryEvent) (context.Context, trace.Span) - deliver func(context.Context, *models.DeliveryEvent, *models.Destination) (context.Context, trace.Span) + startDelivery func(context.Context, *models.DeliveryTask) (context.Context, trace.Span) + deliver func(context.Context, *models.DeliveryTask, *models.Destination) (context.Context, trace.Span) } var _ eventtracer.EventTracer = (*mockEventTracerImpl)(nil) @@ -22,12 +22,12 @@ func (m *mockEventTracerImpl) Receive(ctx context.Context, event *models.Event) return m.receive(ctx, event) } -func (m *mockEventTracerImpl) StartDelivery(ctx context.Context, deliveryEvent *models.DeliveryEvent) (context.Context, trace.Span) { - return m.startDelivery(ctx, deliveryEvent) +func (m *mockEventTracerImpl) StartDelivery(ctx context.Context, task *models.DeliveryTask) (context.Context, trace.Span) { + return m.startDelivery(ctx, task) } -func (m *mockEventTracerImpl) Deliver(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination) (context.Context, trace.Span) { - return m.deliver(ctx, deliveryEvent, destination) +func (m *mockEventTracerImpl) Deliver(ctx context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) { + return m.deliver(ctx, task, destination) } func NewMockEventTracer(exporter traceSDK.SpanExporter) *mockEventTracerImpl { @@ -39,10 +39,10 @@ func NewMockEventTracer(exporter traceSDK.SpanExporter) *mockEventTracerImpl { mockEventTracer.receive = func(ctx context.Context, event *models.Event) (context.Context, trace.Span) { return mockEventTracer.tracer.Start(ctx, "Receive") } - mockEventTracer.startDelivery = func(ctx context.Context, deliveryEvent *models.DeliveryEvent) (context.Context, trace.Span) { + mockEventTracer.startDelivery = func(ctx context.Context, task *models.DeliveryTask) (context.Context, trace.Span) { return mockEventTracer.tracer.Start(ctx, "StartDelivery") } - mockEventTracer.deliver = func(ctx context.Context, deliveryEvent *models.DeliveryEvent, destination *models.Destination) (context.Context, trace.Span) { + mockEventTracer.deliver = func(ctx context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) { return mockEventTracer.tracer.Start(ctx, "Deliver") } From 3e97fbde2b30bd2295cfa9d324dee39a1296db33 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 05:13:19 +0700 Subject: [PATCH 06/23] refactor: remove DeliveryEvent type and legacy API handlers Co-Authored-By: Claude Opus 4.5 --- cmd/e2e/log_test.go | 287 ----------------- internal/apirouter/legacy_handlers.go | 265 ---------------- internal/apirouter/legacy_handlers_test.go | 348 --------------------- internal/apirouter/log_handlers_test.go | 46 +-- internal/apirouter/retry_handlers_test.go | 19 +- internal/apirouter/router.go | 46 --- internal/app/app.go | 12 +- internal/config/id_gen.go | 9 +- internal/idgen/idgen.go | 44 +-- internal/models/event.go | 53 ---- 10 files changed, 32 insertions(+), 1097 deletions(-) delete mode 100644 internal/apirouter/legacy_handlers.go delete mode 100644 internal/apirouter/legacy_handlers_test.go diff --git a/cmd/e2e/log_test.go b/cmd/e2e/log_test.go index c6c1729a..5dc72343 100644 --- a/cmd/e2e/log_test.go +++ b/cmd/e2e/log_test.go @@ -776,293 +776,6 @@ func (suite *basicSuite) TestRetryAPI() { suite.RunAPITests(suite.T(), cleanupTests) } -// TestLegacyLogAPI tests the deprecated legacy endpoints for backward compatibility. -// All legacy endpoints return "Deprecation: true" header to signal migration. -// -// Setup: -// 1. Create a tenant -// 2. Configure mock webhook server to accept deliveries -// 3. Create a destination pointing to the mock server -// 4. Publish an event and wait for delivery to complete -// -// Test Cases: -// - GET /:tenantID/destinations/:destID/events - Legacy list events (returns {data, count}) -// - GET /:tenantID/destinations/:destID/events/:eventID - Legacy retrieve event -// - GET /:tenantID/events/:eventID/deliveries - Legacy list deliveries (returns bare array, not {data}) -// - POST /:tenantID/destinations/:destID/events/:eventID/retry - Legacy retry endpoint -// -// All responses include: -// - Deprecation: true header -// - X-Deprecated-Message header with migration guidance -func (suite *basicSuite) TestLegacyLogAPI() { - tenantID := idgen.String() - destinationID := idgen.Destination() - eventID := idgen.Event() - - // Setup - setupTests := []APITest{ - { - Name: "PUT /:tenantID - create tenant", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodPUT, - Path: "/tenants/" + tenantID, - }), - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusCreated, - }, - }, - }, - { - Name: "PUT mockserver/destinations - setup mock", - Request: httpclient.Request{ - Method: httpclient.MethodPUT, - BaseURL: suite.mockServerBaseURL, - Path: "/destinations", - Body: map[string]interface{}{ - "id": destinationID, - "type": "webhook", - "config": map[string]interface{}{ - "url": fmt.Sprintf("%s/webhook/%s", suite.mockServerBaseURL, destinationID), - }, - }, - }, - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusOK, - }, - }, - }, - { - Name: "POST /:tenantID/destinations - create destination", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/destinations", - Body: map[string]interface{}{ - "id": destinationID, - "type": "webhook", - "topics": "*", - "config": map[string]interface{}{ - "url": fmt.Sprintf("%s/webhook/%s", suite.mockServerBaseURL, destinationID), - }, - }, - }), - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusCreated, - }, - }, - }, - { - Name: "POST /publish - publish event", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodPOST, - Path: "/publish", - Body: map[string]interface{}{ - "id": eventID, - "tenant_id": tenantID, - "topic": "user.created", - "eligible_for_retry": true, - "data": map[string]interface{}{ - "user_id": "789", - }, - }, - }), - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusAccepted, - }, - }, - }, - } - suite.RunAPITests(suite.T(), setupTests) - - // Wait for delivery - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries", 1, 5*time.Second) - - // Test legacy endpoints - all should return deprecation headers - legacyTests := []APITest{ - // GET /:tenantID/destinations/:destinationID/events - legacy list events by destination - { - Name: "GET /:tenantID/destinations/:destinationID/events - legacy endpoint", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/destinations/" + destinationID + "/events", - }), - Expected: APITestExpectation{ - Validate: map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "statusCode": map[string]interface{}{"const": 200}, - "headers": map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "Deprecation": map[string]interface{}{ - "type": "array", - "items": map[string]interface{}{ - "const": "true", - }, - }, - }, - }, - "body": map[string]interface{}{ - "type": "object", - "required": []interface{}{"data", "count"}, - "properties": map[string]interface{}{ - "data": map[string]interface{}{ - "type": "array", - "minItems": 1, - }, - "count": map[string]interface{}{"type": "number"}, - }, - }, - }, - }, - }, - }, - // GET /:tenantID/destinations/:destinationID/events/:eventID - legacy retrieve event - { - Name: "GET /:tenantID/destinations/:destinationID/events/:eventID - legacy endpoint", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/destinations/" + destinationID + "/events/" + eventID, - }), - Expected: APITestExpectation{ - Validate: map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "statusCode": map[string]interface{}{"const": 200}, - "headers": map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "Deprecation": map[string]interface{}{ - "type": "array", - "items": map[string]interface{}{ - "const": "true", - }, - }, - }, - }, - "body": map[string]interface{}{ - "type": "object", - "required": []interface{}{"id", "topic"}, - "properties": map[string]interface{}{ - "id": map[string]interface{}{"const": eventID}, - "topic": map[string]interface{}{"const": "user.created"}, - }, - }, - }, - }, - }, - }, - // GET /:tenantID/events/:eventID/deliveries - legacy list deliveries by event - { - Name: "GET /:tenantID/events/:eventID/deliveries - legacy endpoint (returns bare array)", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/events/" + eventID + "/deliveries", - }), - Expected: APITestExpectation{ - Validate: map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "statusCode": map[string]interface{}{"const": 200}, - "headers": map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "Deprecation": map[string]interface{}{ - "type": "array", - "items": map[string]interface{}{ - "const": "true", - }, - }, - }, - }, - // Legacy endpoint returns bare array, not {data: [...]} - "body": map[string]interface{}{ - "type": "array", - "minItems": 1, - "items": map[string]interface{}{ - "type": "object", - "required": []interface{}{"id", "status", "delivered_at"}, - "properties": map[string]interface{}{ - "id": map[string]interface{}{"type": "string"}, - "status": map[string]interface{}{"type": "string"}, - "delivered_at": map[string]interface{}{"type": "string"}, - }, - }, - }, - }, - }, - }, - }, - // POST /:tenantID/destinations/:destinationID/events/:eventID/retry - legacy retry - { - Name: "POST /:tenantID/destinations/:destinationID/events/:eventID/retry - legacy endpoint", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/destinations/" + destinationID + "/events/" + eventID + "/retry", - }), - Expected: APITestExpectation{ - Validate: map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "statusCode": map[string]interface{}{"const": 202}, - "headers": map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "Deprecation": map[string]interface{}{ - "type": "array", - "items": map[string]interface{}{ - "const": "true", - }, - }, - }, - }, - "body": map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "success": map[string]interface{}{"const": true}, - }, - }, - }, - }, - }, - }, - } - suite.RunAPITests(suite.T(), legacyTests) - - // Cleanup - cleanupTests := []APITest{ - { - Name: "DELETE mockserver/destinations/:destinationID", - Request: httpclient.Request{ - Method: httpclient.MethodDELETE, - BaseURL: suite.mockServerBaseURL, - Path: "/destinations/" + destinationID, - }, - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusOK, - }, - }, - }, - { - Name: "DELETE /:tenantID", - Request: suite.AuthRequest(httpclient.Request{ - Method: httpclient.MethodDELETE, - Path: "/tenants/" + tenantID, - }), - Expected: APITestExpectation{ - Match: &httpclient.Response{ - StatusCode: http.StatusOK, - }, - }, - }, - } - suite.RunAPITests(suite.T(), cleanupTests) -} - // TestAdminLogEndpoints tests the admin-only /events and /deliveries endpoints. // // These endpoints allow cross-tenant queries with optional tenant_id filter. diff --git a/internal/apirouter/legacy_handlers.go b/internal/apirouter/legacy_handlers.go deleted file mode 100644 index 7e250c3f..00000000 --- a/internal/apirouter/legacy_handlers.go +++ /dev/null @@ -1,265 +0,0 @@ -package apirouter - -import ( - "errors" - "net/http" - "strconv" - - "github.com/gin-gonic/gin" - "github.com/hookdeck/outpost/internal/deliverymq" - "github.com/hookdeck/outpost/internal/logging" - "github.com/hookdeck/outpost/internal/logstore" - "github.com/hookdeck/outpost/internal/models" - "go.uber.org/zap" -) - -var ( - ErrDestinationDisabled = errors.New("destination is disabled") -) - -// LegacyHandlers provides backward-compatible endpoints for the old API. -// These handlers are deprecated and will be removed in a future version. -type LegacyHandlers struct { - logger *logging.Logger - entityStore models.EntityStore - logStore logstore.LogStore - deliveryMQ *deliverymq.DeliveryMQ -} - -func NewLegacyHandlers( - logger *logging.Logger, - entityStore models.EntityStore, - logStore logstore.LogStore, - deliveryMQ *deliverymq.DeliveryMQ, -) *LegacyHandlers { - return &LegacyHandlers{ - logger: logger, - entityStore: entityStore, - logStore: logStore, - deliveryMQ: deliveryMQ, - } -} - -// setDeprecationHeader adds deprecation warning headers to the response. -func setDeprecationHeader(c *gin.Context, newEndpoint string) { - c.Header("Deprecation", "true") - c.Header("X-Deprecated-Message", "This endpoint is deprecated. Use "+newEndpoint+" instead.") -} - -// RetryByEventDestination handles the legacy retry endpoint: -// POST /:tenantID/destinations/:destinationID/events/:eventID/retry -// -// This shim finds the latest delivery for the event+destination pair and retries it. -// Deprecated: Use POST /:tenantID/deliveries/:deliveryID/retry instead. -func (h *LegacyHandlers) RetryByEventDestination(c *gin.Context) { - setDeprecationHeader(c, "POST /:tenantID/deliveries/:deliveryID/retry") - - tenant := mustTenantFromContext(c) - if tenant == nil { - return - } - destinationID := c.Param("destinationID") - eventID := c.Param("eventID") - - // 1. Check destination exists and is enabled - destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, destinationID) - if err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - if destination == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("destination")) - return - } - if destination.DisabledAt != nil { - AbortWithError(c, http.StatusBadRequest, NewErrBadRequest(ErrDestinationDisabled)) - return - } - - // 2. Retrieve event - event, err := h.logStore.RetrieveEvent(c.Request.Context(), logstore.RetrieveEventRequest{ - TenantID: tenant.ID, - EventID: eventID, - }) - if err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - if event == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("event")) - return - } - - // 3. Create and publish retry delivery task - task := models.NewManualDeliveryTask(*event, destination.ID) - - if err := h.deliveryMQ.Publish(c.Request.Context(), task); err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - - h.logger.Ctx(c.Request.Context()).Audit("manual retry initiated (legacy)", - zap.String("event_id", event.ID), - zap.String("destination_id", destination.ID)) - - c.JSON(http.StatusAccepted, gin.H{ - "success": true, - }) -} - -// ListEventsByDestination handles the legacy endpoint: -// GET /:tenantID/destinations/:destinationID/events -// -// This shim queries deliveries filtered by destination and returns unique events. -// Deprecated: Use GET /:tenantID/deliveries?destination_id=X&include=event instead. -func (h *LegacyHandlers) ListEventsByDestination(c *gin.Context) { - setDeprecationHeader(c, "GET /:tenantID/deliveries?destination_id=X&include=event") - - tenant := mustTenantFromContext(c) - if tenant == nil { - return - } - destinationID := c.Param("destinationID") - - // Parse and validate cursors (next/prev are mutually exclusive) - cursors, errResp := ParseCursors(c) - if errResp != nil { - AbortWithError(c, errResp.Code, *errResp) - return - } - - // Parse pagination params - limit := 100 - if limitStr := c.Query("limit"); limitStr != "" { - if parsed, err := strconv.Atoi(limitStr); err == nil && parsed > 0 { - limit = parsed - } - } - - // Query deliveries for this destination with pagination - response, err := h.logStore.ListDelivery(c.Request.Context(), logstore.ListDeliveryRequest{ - TenantID: tenant.ID, - DestinationIDs: []string{destinationID}, - Limit: limit, - Next: cursors.Next, - Prev: cursors.Prev, - SortOrder: "desc", - }) - if err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - - // Extract unique events (by event ID, keep first occurrence) - seen := make(map[string]bool) - events := []models.Event{} - for _, dr := range response.Data { - if !seen[dr.Event.ID] { - seen[dr.Event.ID] = true - events = append(events, *dr.Event) - } - } - - // Return empty array (not null) if no events - if len(events) == 0 { - c.JSON(http.StatusOK, gin.H{ - "data": []models.Event{}, - "next": "", - "prev": "", - "count": 0, - }) - return - } - - c.JSON(http.StatusOK, gin.H{ - "data": events, - "next": response.Next, - "prev": response.Prev, - "count": len(events), - }) -} - -// RetrieveEventByDestination handles the legacy endpoint: -// GET /:tenantID/destinations/:destinationID/events/:eventID -// -// Deprecated: Use GET /:tenantID/events/:eventID instead. -func (h *LegacyHandlers) RetrieveEventByDestination(c *gin.Context) { - setDeprecationHeader(c, "GET /:tenantID/events/:eventID") - - tenant := mustTenantFromContext(c) - if tenant == nil { - return - } - eventID := c.Param("eventID") - // destinationID is available but not strictly needed for retrieval - - event, err := h.logStore.RetrieveEvent(c.Request.Context(), logstore.RetrieveEventRequest{ - TenantID: tenant.ID, - EventID: eventID, - }) - if err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - if event == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("event")) - return - } - - c.JSON(http.StatusOK, event) -} - -// LegacyDeliveryResponse matches the old delivery response format. -type LegacyDeliveryResponse struct { - ID string `json:"id"` - DeliveredAt string `json:"delivered_at"` - Status string `json:"status"` - Code string `json:"code"` - ResponseData map[string]interface{} `json:"response_data"` -} - -// ListDeliveriesByEvent handles the legacy endpoint: -// GET /:tenantID/events/:eventID/deliveries -// -// Deprecated: Use GET /:tenantID/deliveries?event_id=X instead. -func (h *LegacyHandlers) ListDeliveriesByEvent(c *gin.Context) { - setDeprecationHeader(c, "GET /:tenantID/deliveries?event_id=X") - - tenant := mustTenantFromContext(c) - if tenant == nil { - return - } - eventID := c.Param("eventID") - - // Query deliveries for this event - response, err := h.logStore.ListDelivery(c.Request.Context(), logstore.ListDeliveryRequest{ - TenantID: tenant.ID, - EventID: eventID, - Limit: 100, - SortOrder: "desc", - }) - if err != nil { - AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) - return - } - - // Return empty array (not null) if no deliveries - if len(response.Data) == 0 { - c.JSON(http.StatusOK, []LegacyDeliveryResponse{}) - return - } - - // Transform to legacy delivery response format (bare array) - deliveries := make([]LegacyDeliveryResponse, len(response.Data)) - for i, dr := range response.Data { - deliveries[i] = LegacyDeliveryResponse{ - ID: dr.Delivery.ID, - DeliveredAt: dr.Delivery.Time.UTC().Format("2006-01-02T15:04:05Z07:00"), - Status: dr.Delivery.Status, - Code: dr.Delivery.Code, - ResponseData: dr.Delivery.ResponseData, - } - } - - c.JSON(http.StatusOK, deliveries) -} diff --git a/internal/apirouter/legacy_handlers_test.go b/internal/apirouter/legacy_handlers_test.go deleted file mode 100644 index 98822806..00000000 --- a/internal/apirouter/legacy_handlers_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package apirouter_test - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/hookdeck/outpost/internal/idgen" - "github.com/hookdeck/outpost/internal/models" - "github.com/hookdeck/outpost/internal/util/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLegacyRetryByEventDestination(t *testing.T) { - t.Parallel() - - result := setupTestRouterFull(t, "", "") - - // Create a tenant and destination - tenantID := idgen.String() - destinationID := idgen.Destination() - require.NoError(t, result.entityStore.UpsertTenant(context.Background(), models.Tenant{ - ID: tenantID, - CreatedAt: time.Now(), - })) - require.NoError(t, result.entityStore.UpsertDestination(context.Background(), models.Destination{ - ID: destinationID, - TenantID: tenantID, - Type: "webhook", - Topics: []string{"*"}, - CreatedAt: time.Now(), - })) - - // Seed a delivery event - eventID := idgen.Event() - deliveryID := idgen.Delivery() - eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) - - event := testutil.EventFactory.AnyPointer( - testutil.EventFactory.WithID(eventID), - testutil.EventFactory.WithTenantID(tenantID), - testutil.EventFactory.WithDestinationID(destinationID), - testutil.EventFactory.WithTopic("order.created"), - testutil.EventFactory.WithTime(eventTime), - ) - - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), - ) - - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) - - t.Run("should retry via legacy endpoint and return deprecation header", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+destinationID+"/events/"+eventID+"/retry", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusAccepted, w.Code) - assert.Equal(t, "true", w.Header().Get("Deprecation")) - assert.Contains(t, w.Header().Get("X-Deprecated-Message"), "POST /:tenantID/deliveries/:deliveryID/retry") - - var response map[string]interface{} - require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) - assert.Equal(t, true, response["success"]) - }) - - t.Run("should return 404 for non-existent event", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+destinationID+"/events/nonexistent/retry", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusNotFound, w.Code) - }) - - t.Run("should return 404 for non-existent destination", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/destinations/nonexistent/events/"+eventID+"/retry", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusNotFound, w.Code) - }) - - t.Run("should return 400 when destination is disabled", func(t *testing.T) { - // Create a disabled destination - disabledDestinationID := idgen.Destination() - disabledAt := time.Now() - require.NoError(t, result.entityStore.UpsertDestination(context.Background(), models.Destination{ - ID: disabledDestinationID, - TenantID: tenantID, - Type: "webhook", - Topics: []string{"*"}, - CreatedAt: time.Now(), - DisabledAt: &disabledAt, - })) - - w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+disabledDestinationID+"/events/"+eventID+"/retry", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusBadRequest, w.Code) - }) -} - -func TestLegacyListEventsByDestination(t *testing.T) { - t.Parallel() - - result := setupTestRouterFull(t, "", "") - - // Create a tenant and destination - tenantID := idgen.String() - destinationID := idgen.Destination() - require.NoError(t, result.entityStore.UpsertTenant(context.Background(), models.Tenant{ - ID: tenantID, - CreatedAt: time.Now(), - })) - require.NoError(t, result.entityStore.UpsertDestination(context.Background(), models.Destination{ - ID: destinationID, - TenantID: tenantID, - Type: "webhook", - Topics: []string{"*"}, - CreatedAt: time.Now(), - })) - - // Seed delivery events - eventID := idgen.Event() - deliveryID := idgen.Delivery() - eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) - - event := testutil.EventFactory.AnyPointer( - testutil.EventFactory.WithID(eventID), - testutil.EventFactory.WithTenantID(tenantID), - testutil.EventFactory.WithDestinationID(destinationID), - testutil.EventFactory.WithTopic("order.created"), - testutil.EventFactory.WithTime(eventTime), - ) - - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), - ) - - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) - - t.Run("should list events for destination with deprecation header", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+destinationID+"/events", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "true", w.Header().Get("Deprecation")) - - var response map[string]interface{} - require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) - - events := response["data"].([]interface{}) - assert.Len(t, events, 1) - - firstEvent := events[0].(map[string]interface{}) - assert.Equal(t, eventID, firstEvent["id"]) - assert.Equal(t, "order.created", firstEvent["topic"]) - }) -} - -func TestLegacyRetrieveEventByDestination(t *testing.T) { - t.Parallel() - - result := setupTestRouterFull(t, "", "") - - // Create a tenant and destination - tenantID := idgen.String() - destinationID := idgen.Destination() - require.NoError(t, result.entityStore.UpsertTenant(context.Background(), models.Tenant{ - ID: tenantID, - CreatedAt: time.Now(), - })) - require.NoError(t, result.entityStore.UpsertDestination(context.Background(), models.Destination{ - ID: destinationID, - TenantID: tenantID, - Type: "webhook", - Topics: []string{"*"}, - CreatedAt: time.Now(), - })) - - // Seed a delivery event - eventID := idgen.Event() - deliveryID := idgen.Delivery() - eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) - - event := testutil.EventFactory.AnyPointer( - testutil.EventFactory.WithID(eventID), - testutil.EventFactory.WithTenantID(tenantID), - testutil.EventFactory.WithDestinationID(destinationID), - testutil.EventFactory.WithTopic("order.created"), - testutil.EventFactory.WithTime(eventTime), - ) - - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), - ) - - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) - - t.Run("should retrieve event by destination with deprecation header", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+destinationID+"/events/"+eventID, nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "true", w.Header().Get("Deprecation")) - - var response map[string]interface{} - require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) - - assert.Equal(t, eventID, response["id"]) - assert.Equal(t, "order.created", response["topic"]) - }) - - t.Run("should return 404 for non-existent event", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/destinations/"+destinationID+"/events/nonexistent", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusNotFound, w.Code) - }) -} - -func TestLegacyListDeliveriesByEvent(t *testing.T) { - t.Parallel() - - result := setupTestRouterFull(t, "", "") - - // Create a tenant and destination - tenantID := idgen.String() - destinationID := idgen.Destination() - require.NoError(t, result.entityStore.UpsertTenant(context.Background(), models.Tenant{ - ID: tenantID, - CreatedAt: time.Now(), - })) - require.NoError(t, result.entityStore.UpsertDestination(context.Background(), models.Destination{ - ID: destinationID, - TenantID: tenantID, - Type: "webhook", - Topics: []string{"*"}, - CreatedAt: time.Now(), - })) - - // Seed a delivery event - eventID := idgen.Event() - deliveryID := idgen.Delivery() - eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) - - event := testutil.EventFactory.AnyPointer( - testutil.EventFactory.WithID(eventID), - testutil.EventFactory.WithTenantID(tenantID), - testutil.EventFactory.WithDestinationID(destinationID), - testutil.EventFactory.WithTopic("order.created"), - testutil.EventFactory.WithTime(eventTime), - ) - - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), - ) - - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) - - t.Run("should list deliveries for event with deprecation header", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events/"+eventID+"/deliveries", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "true", w.Header().Get("Deprecation")) - - // Old format returns bare array, not {data: [...]} - var deliveries []map[string]interface{} - require.NoError(t, json.Unmarshal(w.Body.Bytes(), &deliveries)) - - assert.Len(t, deliveries, 1) - assert.Equal(t, deliveryID, deliveries[0]["id"]) - assert.Equal(t, "success", deliveries[0]["status"]) - }) - - t.Run("should return empty list for non-existent event", func(t *testing.T) { - w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events/nonexistent/deliveries", nil) - result.router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - // Old format returns bare array - var deliveries []map[string]interface{} - require.NoError(t, json.Unmarshal(w.Body.Bytes(), &deliveries)) - - assert.Len(t, deliveries, 0) - }) -} diff --git a/internal/apirouter/log_handlers_test.go b/internal/apirouter/log_handlers_test.go index 54196fd5..ad101e7e 100644 --- a/internal/apirouter/log_handlers_test.go +++ b/internal/apirouter/log_handlers_test.go @@ -3,7 +3,6 @@ package apirouter_test import ( "context" "encoding/json" - "fmt" "net/http" "net/http/httptest" "testing" @@ -73,14 +72,7 @@ func TestListDeliveries(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) @@ -221,14 +213,7 @@ func TestListDeliveries(t *testing.T) { "status": float64(200), } - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=response_data", nil) @@ -344,14 +329,7 @@ func TestRetrieveDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) t.Run("should retrieve delivery by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -466,14 +444,7 @@ func TestRetrieveEvent(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) t.Run("should retrieve event by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -570,14 +541,7 @@ func TestListEvents(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events", nil) diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index 6e58e5e2..4cf7668e 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -3,7 +3,6 @@ package apirouter_test import ( "context" "encoding/json" - "fmt" "net/http" "net/http/httptest" "testing" @@ -58,14 +57,7 @@ func TestRetryDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - de := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", eventID, deliveryID), - DestinationID: destinationID, - Event: *event, - Delivery: delivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&de.Event}, []*models.Delivery{de.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) t.Run("should retry delivery successfully", func(t *testing.T) { w := httptest.NewRecorder() @@ -128,14 +120,7 @@ func TestRetryDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - disabledDE := &models.DeliveryEvent{ - ID: fmt.Sprintf("%s_%s", disabledEventID, disabledDeliveryID), - DestinationID: disabledDestinationID, - Event: *disabledEvent, - Delivery: disabledDelivery, - } - - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{&disabledDE.Event}, []*models.Delivery{disabledDE.Delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{disabledEvent}, []*models.Delivery{disabledDelivery})) w := httptest.NewRecorder() req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+disabledDeliveryID+"/retry", nil) diff --git a/internal/apirouter/router.go b/internal/apirouter/router.go index 070760ca..8d42539c 100644 --- a/internal/apirouter/router.go +++ b/internal/apirouter/router.go @@ -144,7 +144,6 @@ func NewRouter( logHandlers := NewLogHandlers(logger, logStore) retryHandlers := NewRetryHandlers(logger, entityStore, logStore, deliveryMQ) topicHandlers := NewTopicHandlers(logger, cfg.Topics) - legacyHandlers := NewLegacyHandlers(logger, entityStore, logStore, deliveryMQ) // Non-tenant routes (no :tenantID in path) nonTenantRoutes := []RouteDefinition{ @@ -353,16 +352,6 @@ func NewRouter( RequireTenantMiddleware(entityStore), }, }, - { - Method: http.MethodGet, - Path: "/:tenantID/events/:eventID/deliveries", - Handler: legacyHandlers.ListDeliveriesByEvent, - AuthScope: AuthScopeAdminOrTenant, - Mode: RouteModeAlways, - Middlewares: []gin.HandlerFunc{ - RequireTenantMiddleware(entityStore), - }, - }, // Delivery routes { @@ -397,40 +386,6 @@ func NewRouter( }, } - // Legacy routes (deprecated, for backward compatibility) - legacyRoutes := []RouteDefinition{ - { - Method: http.MethodGet, - Path: "/:tenantID/destinations/:destinationID/events", - Handler: legacyHandlers.ListEventsByDestination, - AuthScope: AuthScopeAdminOrTenant, - Mode: RouteModeAlways, - Middlewares: []gin.HandlerFunc{ - RequireTenantMiddleware(entityStore), - }, - }, - { - Method: http.MethodGet, - Path: "/:tenantID/destinations/:destinationID/events/:eventID", - Handler: legacyHandlers.RetrieveEventByDestination, - AuthScope: AuthScopeAdminOrTenant, - Mode: RouteModeAlways, - Middlewares: []gin.HandlerFunc{ - RequireTenantMiddleware(entityStore), - }, - }, - { - Method: http.MethodPost, - Path: "/:tenantID/destinations/:destinationID/events/:eventID/retry", - Handler: legacyHandlers.RetryByEventDestination, - AuthScope: AuthScopeAdminOrTenant, - Mode: RouteModeAlways, - Middlewares: []gin.HandlerFunc{ - RequireTenantMiddleware(entityStore), - }, - }, - } - // Register non-tenant routes at root registerRoutes(apiRouter, cfg, nonTenantRoutes) @@ -440,7 +395,6 @@ func NewRouter( tenantScopedRoutes = append(tenantScopedRoutes, portalRoutes...) tenantScopedRoutes = append(tenantScopedRoutes, tenantAgnosticRoutes...) tenantScopedRoutes = append(tenantScopedRoutes, tenantSpecificRoutes...) - tenantScopedRoutes = append(tenantScopedRoutes, legacyRoutes...) // Register tenant-scoped routes under /tenants prefix tenantsGroup := apiRouter.Group("/tenants") diff --git a/internal/app/app.go b/internal/app/app.go index 7d0ced4f..3dd86e61 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -175,15 +175,13 @@ func (a *App) configureIDGenerators() error { zap.String("type", a.config.IDGen.Type), zap.String("event_prefix", a.config.IDGen.EventPrefix), zap.String("destination_prefix", a.config.IDGen.DestinationPrefix), - zap.String("delivery_prefix", a.config.IDGen.DeliveryPrefix), - zap.String("delivery_event_prefix", a.config.IDGen.DeliveryEventPrefix)) + zap.String("delivery_prefix", a.config.IDGen.DeliveryPrefix)) if err := idgen.Configure(idgen.IDGenConfig{ - Type: a.config.IDGen.Type, - EventPrefix: a.config.IDGen.EventPrefix, - DestinationPrefix: a.config.IDGen.DestinationPrefix, - DeliveryPrefix: a.config.IDGen.DeliveryPrefix, - DeliveryEventPrefix: a.config.IDGen.DeliveryEventPrefix, + Type: a.config.IDGen.Type, + EventPrefix: a.config.IDGen.EventPrefix, + DestinationPrefix: a.config.IDGen.DestinationPrefix, + DeliveryPrefix: a.config.IDGen.DeliveryPrefix, }); err != nil { a.logger.Error("failed to configure ID generators", zap.Error(err)) return err diff --git a/internal/config/id_gen.go b/internal/config/id_gen.go index 790b4c02..ec556b75 100644 --- a/internal/config/id_gen.go +++ b/internal/config/id_gen.go @@ -2,9 +2,8 @@ package config // IDGenConfig is the configuration for ID generation type IDGenConfig struct { - Type string `yaml:"type" env:"IDGEN_TYPE" desc:"ID generation type for all entities: uuidv4, uuidv7, nanoid. Default: uuidv4" required:"N"` - EventPrefix string `yaml:"event_prefix" env:"IDGEN_EVENT_PREFIX" desc:"Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix)" required:"N"` - DestinationPrefix string `yaml:"destination_prefix" env:"IDGEN_DESTINATION_PREFIX" desc:"Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix)" required:"N"` - DeliveryPrefix string `yaml:"delivery_prefix" env:"IDGEN_DELIVERY_PREFIX" desc:"Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix)" required:"N"` - DeliveryEventPrefix string `yaml:"delivery_event_prefix" env:"IDGEN_DELIVERY_EVENT_PREFIX" desc:"Prefix for delivery event IDs, prepended with underscore (e.g., 'dev_123'). Default: empty (no prefix)" required:"N"` + Type string `yaml:"type" env:"IDGEN_TYPE" desc:"ID generation type for all entities: uuidv4, uuidv7, nanoid. Default: uuidv4" required:"N"` + EventPrefix string `yaml:"event_prefix" env:"IDGEN_EVENT_PREFIX" desc:"Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix)" required:"N"` + DestinationPrefix string `yaml:"destination_prefix" env:"IDGEN_DESTINATION_PREFIX" desc:"Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix)" required:"N"` + DeliveryPrefix string `yaml:"delivery_prefix" env:"IDGEN_DELIVERY_PREFIX" desc:"Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix)" required:"N"` } diff --git a/internal/idgen/idgen.go b/internal/idgen/idgen.go index 894466f5..1fff29d0 100644 --- a/internal/idgen/idgen.go +++ b/internal/idgen/idgen.go @@ -14,11 +14,10 @@ var ( func init() { // Initialize with default UUID v4 generator globalGenerator = &IDGenerator{ - generator: &uuidv4Generator{}, - eventPrefix: "", - destinationPrefix: "", - deliveryPrefix: "", - deliveryEventPrefix: "", + generator: &uuidv4Generator{}, + eventPrefix: "", + destinationPrefix: "", + deliveryPrefix: "", } } @@ -27,11 +26,10 @@ type idGenerator interface { } type IDGenerator struct { - generator idGenerator - eventPrefix string - destinationPrefix string - deliveryPrefix string - deliveryEventPrefix string + generator idGenerator + eventPrefix string + destinationPrefix string + deliveryPrefix string } func (g *IDGenerator) Event() string { @@ -46,10 +44,6 @@ func (g *IDGenerator) Delivery() string { return g.generate(g.deliveryPrefix) } -func (g *IDGenerator) DeliveryEvent() string { - return g.generate(g.deliveryEventPrefix) -} - func (g *IDGenerator) Installation() string { return g.generate("") } @@ -113,11 +107,10 @@ func (g *nanoidGenerator) generate() string { } type IDGenConfig struct { - Type string - EventPrefix string - DestinationPrefix string - DeliveryPrefix string - DeliveryEventPrefix string + Type string + EventPrefix string + DestinationPrefix string + DeliveryPrefix string } func Configure(cfg IDGenConfig) error { @@ -127,11 +120,10 @@ func Configure(cfg IDGenConfig) error { } globalGenerator = &IDGenerator{ - generator: gen, - eventPrefix: cfg.EventPrefix, - destinationPrefix: cfg.DestinationPrefix, - deliveryPrefix: cfg.DeliveryPrefix, - deliveryEventPrefix: cfg.DeliveryEventPrefix, + generator: gen, + eventPrefix: cfg.EventPrefix, + destinationPrefix: cfg.DestinationPrefix, + deliveryPrefix: cfg.DeliveryPrefix, } return nil @@ -149,10 +141,6 @@ func Delivery() string { return globalGenerator.Delivery() } -func DeliveryEvent() string { - return globalGenerator.DeliveryEvent() -} - func Installation() string { return globalGenerator.Installation() } diff --git a/internal/models/event.go b/internal/models/event.go index 5b950c2e..6a607075 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -6,7 +6,6 @@ import ( "fmt" "time" - "github.com/hookdeck/outpost/internal/idgen" "github.com/hookdeck/outpost/internal/mqs" ) @@ -128,58 +127,6 @@ func NewManualDeliveryTask(event Event, destinationID string) DeliveryTask { return task } -type DeliveryEvent struct { - ID string - Attempt int - DestinationID string - Event Event - Delivery *Delivery - Telemetry *DeliveryEventTelemetry - Manual bool // Indicates if this is a manual retry -} - -var _ mqs.IncomingMessage = &DeliveryEvent{} - -func (e *DeliveryEvent) FromMessage(msg *mqs.Message) error { - return json.Unmarshal(msg.Body, e) -} - -func (e *DeliveryEvent) ToMessage() (*mqs.Message, error) { - data, err := json.Marshal(e) - if err != nil { - return nil, err - } - return &mqs.Message{Body: data}, nil -} - -// GetRetryID returns the ID used for scheduling retries. -// -// We use Event.ID + DestinationID (not DeliveryEvent.ID) because: -// 1. Multiple destinations: The same event can be delivered to multiple destinations. -// Each needs its own retry, so we include DestinationID to avoid collisions. -// 2. Manual retry cancellation: When a manual retry succeeds, it must cancel any -// pending automatic retry. Manual retries create a NEW DeliveryEvent with a NEW ID, -// but share the same Event.ID + DestinationID. This allows Cancel() to find and -// remove the pending automatic retry. -func (e *DeliveryEvent) GetRetryID() string { - return e.Event.ID + ":" + e.DestinationID -} - -func NewDeliveryEvent(event Event, destinationID string) DeliveryEvent { - return DeliveryEvent{ - ID: idgen.DeliveryEvent(), - DestinationID: destinationID, - Event: event, - Attempt: 0, - } -} - -func NewManualDeliveryEvent(event Event, destinationID string) DeliveryEvent { - deliveryEvent := NewDeliveryEvent(event, destinationID) - deliveryEvent.Manual = true - return deliveryEvent -} - const ( DeliveryStatusSuccess = "success" DeliveryStatusFailed = "failed" From 45af90017263e6e7a4736db2f97b3ec86b0c086e Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 05:15:23 +0700 Subject: [PATCH 07/23] docs: update README and comments to reflect DeliveryEvent removal - Update chlogstore/README.md method names and SQL examples - Update pglogstore/README.md method names - Update tracer_test.go comment to reference DeliveryTask Co-Authored-By: Claude Opus 4.5 --- internal/deliverymq/retry.go | 2 +- internal/deliverymq/tracer_test.go | 2 +- internal/eventtracer/eventtracer.go | 2 +- internal/logstore/chlogstore/README.md | 16 ++++++++-------- internal/logstore/pglogstore/README.md | 4 ++-- internal/models/event.go | 12 ++++++------ 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/internal/deliverymq/retry.go b/internal/deliverymq/retry.go index da699c0e..9ea1a84c 100644 --- a/internal/deliverymq/retry.go +++ b/internal/deliverymq/retry.go @@ -63,7 +63,7 @@ type RetryTask struct { TenantID string DestinationID string Attempt int - Telemetry *models.DeliveryEventTelemetry + Telemetry *models.DeliveryTelemetry } func (m *RetryTask) ToString() (string, error) { diff --git a/internal/deliverymq/tracer_test.go b/internal/deliverymq/tracer_test.go index 307ef4c3..788d473f 100644 --- a/internal/deliverymq/tracer_test.go +++ b/internal/deliverymq/tracer_test.go @@ -8,7 +8,7 @@ and metrics functionality. These are recommendations and can be adapted based on Potential Test Scenarios: 1. Successful Delivery - - Verify eventTracer.Deliver() is called with correct DeliveryEvent + - Verify eventTracer.Deliver() is called with correct DeliveryTask - Verify span is created and ended properly - Verify metrics are recorded: * Delivery latency diff --git a/internal/eventtracer/eventtracer.go b/internal/eventtracer/eventtracer.go index f907a593..de669245 100644 --- a/internal/eventtracer/eventtracer.go +++ b/internal/eventtracer/eventtracer.go @@ -50,7 +50,7 @@ func (t *eventTracerImpl) Receive(ctx context.Context, event *models.Event) (con func (t *eventTracerImpl) StartDelivery(_ context.Context, task *models.DeliveryTask) (context.Context, trace.Span) { ctx, span := t.tracer.Start(t.getRemoteEventSpanContext(&task.Event), "EventTracer.StartDelivery") - task.Telemetry = &models.DeliveryEventTelemetry{ + task.Telemetry = &models.DeliveryTelemetry{ TraceID: span.SpanContext().TraceID().String(), SpanID: span.SpanContext().SpanID().String(), } diff --git a/internal/logstore/chlogstore/README.md b/internal/logstore/chlogstore/README.md index 360452ce..8c89605a 100644 --- a/internal/logstore/chlogstore/README.md +++ b/internal/logstore/chlogstore/README.md @@ -37,7 +37,7 @@ For most use cases (log viewing), brief duplicates are acceptable. ## Operations -### ListDeliveryEvent +### ListDelivery Direct index scan with cursor-based pagination. @@ -45,7 +45,7 @@ Direct index scan with cursor-based pagination. SELECT event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, delivery_event_id, status, delivery_time, code, response_data + delivery_id, status, delivery_time, code, response_data FROM event_log WHERE tenant_id = ? AND [optional filters: destination_id, status, topic, time ranges] @@ -80,7 +80,7 @@ LIMIT 1 With destination filter, adds `AND destination_id = ?`. -### InsertManyDeliveryEvent +### InsertMany Batch insert using ClickHouse's native batch API. @@ -89,11 +89,11 @@ batch, _ := conn.PrepareBatch(ctx, ` INSERT INTO event_log ( event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, delivery_event_id, status, delivery_time, code, response_data + delivery_id, status, delivery_time, code, response_data ) `) -for _, de := range deliveryEvents { - batch.Append(...) +for i := range events { + batch.Append(events[i], deliveries[i], ...) } batch.Send() ``` @@ -104,6 +104,6 @@ batch.Send() | Operation | Complexity | Notes | |-----------|------------|-------| -| ListDeliveryEvent | O(limit) | Index scan, stops at LIMIT | +| ListDelivery | O(limit) | Index scan, stops at LIMIT | | RetrieveEvent | O(1) | Single row lookup via bloom filter | -| InsertManyDeliveryEvent | O(batch) | Batch insert, async dedup | +| InsertMany | O(batch) | Batch insert, async dedup | diff --git a/internal/logstore/pglogstore/README.md b/internal/logstore/pglogstore/README.md index 7d76df6d..58c05e81 100644 --- a/internal/logstore/pglogstore/README.md +++ b/internal/logstore/pglogstore/README.md @@ -14,7 +14,7 @@ All tables are partitioned by time. ## Operations -### ListDeliveryEvent +### ListDelivery Query pattern: **Index → Hydrate** @@ -49,7 +49,7 @@ WHERE tenant_id = $1 AND id = $2 AND EXISTS (SELECT 1 FROM event_delivery_index WHERE event_id = $2 AND destination_id = $3) ``` -### InsertManyDeliveryEvent +### InsertMany Batch insert using `unnest()` arrays in a single transaction across all 3 tables. diff --git a/internal/models/event.go b/internal/models/event.go index 6a607075..4d7a68ff 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -66,7 +66,7 @@ func (e *Event) ToMessage() (*mqs.Message, error) { return &mqs.Message{Body: data}, nil } -type DeliveryEventTelemetry struct { +type DeliveryTelemetry struct { TraceID string SpanID string } @@ -74,11 +74,11 @@ type DeliveryEventTelemetry struct { // DeliveryTask represents a task to deliver an event to a destination. // This is a message type (no ID) used by: publishmq -> deliverymq, retry -> deliverymq type DeliveryTask struct { - Event Event `json:"event"` - DestinationID string `json:"destination_id"` - Attempt int `json:"attempt"` - Manual bool `json:"manual"` - Telemetry *DeliveryEventTelemetry `json:"telemetry,omitempty"` + Event Event `json:"event"` + DestinationID string `json:"destination_id"` + Attempt int `json:"attempt"` + Manual bool `json:"manual"` + Telemetry *DeliveryTelemetry `json:"telemetry,omitempty"` } var _ mqs.IncomingMessage = &DeliveryTask{} From 7a0972acab09b92aa4f5cb1fe489883832762c54 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 05:25:40 +0700 Subject: [PATCH 08/23] refactor: remove delivery_event_id column and legacy API docs Co-Authored-By: Claude Opus 4.5 --- docs/apis/openapi.yaml | 202 ------------------ internal/logstore/chlogstore/chlogstore.go | 5 +- .../migrations/clickhouse/000001_init.up.sql | 1 - 3 files changed, 1 insertion(+), 207 deletions(-) diff --git a/docs/apis/openapi.yaml b/docs/apis/openapi.yaml index 39760b65..66b2165f 100644 --- a/docs/apis/openapi.yaml +++ b/docs/apis/openapi.yaml @@ -3790,208 +3790,6 @@ paths: "404": description: Tenant or Event not found. - /tenants/{tenant_id}/destinations/{destination_id}/events: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: destination_id - in: path - required: true - schema: - type: string - description: The ID of the destination. - get: - tags: [Events] - summary: List Events by Destination - deprecated: true - description: | - **Deprecated**: Use `GET /tenants/{tenant_id}/deliveries?destination_id={destination_id}` instead. - - Retrieves events associated with a specific destination for the tenant. - operationId: listTenantEventsByDestination - parameters: - - name: status - in: query - required: false - schema: - type: string - enum: [success, failed] - description: Filter events by delivery status. - - name: next - in: query - required: false - schema: - type: string - description: Cursor for next page of results. - - name: prev - in: query - required: false - schema: - type: string - description: Cursor for previous page of results. - - name: limit - in: query - required: false - schema: - type: integer - default: 100 - minimum: 1 - maximum: 1000 - description: Number of items per page (default 100, max 1000). - - name: time[gte] - in: query - required: false - schema: - type: string - format: date-time - description: Filter events with time >= value (RFC3339 or YYYY-MM-DD format). - - name: time[lte] - in: query - required: false - schema: - type: string - format: date-time - description: Filter events with time <= value (RFC3339 or YYYY-MM-DD format). - - name: order_by - in: query - required: false - schema: - type: string - enum: [time] - default: time - description: Field to sort by. - - name: dir - in: query - required: false - schema: - type: string - enum: [asc, desc] - default: desc - description: Sort direction. - responses: - "200": - description: A paginated list of events for the destination. - content: - application/json: - schema: - $ref: "#/components/schemas/EventPaginatedResult" - examples: - EventsListExample: - value: - models: - - id: "evt_123" - destination_id: "des_456" - topic: "user.created" - time: "2024-01-01T00:00:00Z" - successful_at: "2024-01-01T00:00:05Z" - metadata: { "source": "crm" } - data: { "user_id": "userid", "status": "active" } - - id: "evt_789" - destination_id: "des_456" - topic: "order.shipped" - time: "2024-01-02T10:00:00Z" - successful_at: null - metadata: { "source": "oms" } - data: { "order_id": "orderid", "tracking": "1Z..." } - pagination: - order_by: "time" - dir: "desc" - limit: 100 - next: null - prev: null - "404": - description: Tenant or Destination not found. - - /tenants/{tenant_id}/destinations/{destination_id}/events/{event_id}: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: destination_id - in: path - required: true - schema: - type: string - description: The ID of the destination. - - name: event_id - in: path - required: true - schema: - type: string - description: The ID of the event. - get: - tags: [Events] - summary: Get Event by Destination - deprecated: true - description: | - **Deprecated**: Use `GET /tenants/{tenant_id}/deliveries/{delivery_id}?include=event.data` instead. - - Retrieves a specific event associated with a specific destination for the tenant. - operationId: getTenantEventByDestination - responses: - "200": - description: Event details. - content: - application/json: - schema: - $ref: "#/components/schemas/Event" - examples: - EventExample: # Same as /{tenant_id}/events/{event_id} example - value: - id: "evt_123" - destination_id: "des_456" - topic: "user.created" - time: "2024-01-01T00:00:00Z" - successful_at: "2024-01-01T00:00:05Z" - metadata: { "source": "crm" } - data: { "user_id": "userid", "status": "active" } - "404": - description: Tenant, Destination or Event not found. - - /tenants/{tenant_id}/destinations/{destination_id}/events/{event_id}/retry: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: destination_id - in: path - required: true - schema: - type: string - description: The ID of the destination. - - name: event_id - in: path - required: true - schema: - type: string - description: The ID of the event to retry. - post: - tags: [Events] - summary: Retry Event Delivery - deprecated: true - description: | - **Deprecated**: Use `POST /tenants/{tenant_id}/deliveries/{delivery_id}/retry` instead. - - Triggers a retry for a failed event delivery. - operationId: retryTenantEvent - responses: - "202": - description: Retry accepted for processing. - "404": - description: Tenant, Destination or Event not found. - "409": # Conflict might be appropriate if event is not retryable - description: Event not eligible for retry. - # Tenant Agnostic Routes (JWT Auth Only) - Mirroring tenant-specific routes where AllowTenantFromJWT=true # Note: Portal routes (/portal, /token) still require AdminApiKey even when tenant is inferred from JWT, diff --git a/internal/logstore/chlogstore/chlogstore.go b/internal/logstore/chlogstore/chlogstore.go index c55c0e29..df650210 100644 --- a/internal/logstore/chlogstore/chlogstore.go +++ b/internal/logstore/chlogstore/chlogstore.go @@ -373,7 +373,6 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati orderByClause := fmt.Sprintf("ORDER BY delivery_time %s, delivery_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) - // Note: delivery_event_id is read but we write empty string for backwards compat query := fmt.Sprintf(` SELECT event_id, @@ -755,7 +754,7 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, d deliveryBatch, err := s.chDB.PrepareBatch(ctx, fmt.Sprintf(`INSERT INTO %s ( event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, delivery_event_id, status, delivery_time, code, response_data, manual, attempt + delivery_id, status, delivery_time, code, response_data, manual, attempt )`, s.deliveriesTable), ) if err != nil { @@ -786,7 +785,6 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, d return fmt.Errorf("failed to marshal response_data: %w", err) } - // Write empty string to delivery_event_id for backwards compat (column still exists) // Use event.TenantID for tenant_id since it's denormalized from the event if err := deliveryBatch.Append( d.EventID, @@ -798,7 +796,6 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, d string(metadataJSON), string(dataJSON), d.ID, - "", // delivery_event_id - empty for backwards compat d.Status, d.Time, d.Code, diff --git a/internal/migrator/migrations/clickhouse/000001_init.up.sql b/internal/migrator/migrations/clickhouse/000001_init.up.sql index 13f18332..1e44f64f 100644 --- a/internal/migrator/migrations/clickhouse/000001_init.up.sql +++ b/internal/migrator/migrations/clickhouse/000001_init.up.sql @@ -38,7 +38,6 @@ CREATE TABLE IF NOT EXISTS {deployment_prefix}deliveries ( -- Delivery fields delivery_id String, - delivery_event_id String, status String, -- 'success', 'failed' delivery_time DateTime64(3), code String, From 7a370fa7b44495ea69ac4a218c2944f761930bc6 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 05:35:26 +0700 Subject: [PATCH 09/23] docs: generate config --- docs/pages/references/configuration.mdx | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/pages/references/configuration.mdx b/docs/pages/references/configuration.mdx index 0003569f..d081cf40 100644 --- a/docs/pages/references/configuration.mdx +++ b/docs/pages/references/configuration.mdx @@ -74,7 +74,6 @@ Global configurations are provided through env variables or a YAML file. ConfigM | `GCP_PUBSUB_SERVICE_ACCOUNT_CREDENTIALS` | JSON string or path to a file containing GCP service account credentials for Pub/Sub. Required if GCP Pub/Sub is the chosen MQ provider and not running in an environment with implicit credentials (e.g., GCE, GKE). | `nil` | Conditional | | `GIN_MODE` | Sets the Gin framework mode (e.g., 'debug', 'release', 'test'). See Gin documentation for details. | `release` | No | | `HTTP_USER_AGENT` | Custom HTTP User-Agent string for outgoing webhook deliveries. If unset, a default (OrganizationName/Version) is used. | `nil` | No | -| `IDGEN_DELIVERY_EVENT_PREFIX` | Prefix for delivery event IDs, prepended with underscore (e.g., 'dev_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_DELIVERY_PREFIX` | Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_DESTINATION_PREFIX` | Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_EVENT_PREFIX` | Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix) | `nil` | No | @@ -270,9 +269,6 @@ gin_mode: "release" http_user_agent: "" idgen: - # Prefix for delivery event IDs, prepended with underscore (e.g., 'dev_123'). Default: empty (no prefix) - delivery_event_prefix: "" - # Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix) delivery_prefix: "" From 9277a3f407cfaf9332a5094350b866dbc9a29674 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Thu, 22 Jan 2026 06:01:41 +0700 Subject: [PATCH 10/23] refactor: change InsertMany to accept []*LogEntry Preserves Event-Delivery pairing through the insert flow, eliminating the need for eventMap reconstruction in ClickHouse implementation. Co-Authored-By: Claude Opus 4.5 --- internal/apirouter/log_handlers_test.go | 10 +- internal/apirouter/retry_handlers_test.go | 4 +- internal/logmq/batchprocessor.go | 19 ++- internal/logmq/batchprocessor_test.go | 18 +-- internal/logstore/chlogstore/chlogstore.go | 119 +++++++++---------- internal/logstore/driver/driver.go | 2 +- internal/logstore/drivertest/crud.go | 12 +- internal/logstore/drivertest/misc.go | 26 ++-- internal/logstore/drivertest/pagination.go | 68 ++++++----- internal/logstore/logstore.go | 3 +- internal/logstore/memlogstore/memlogstore.go | 13 +- internal/logstore/pglogstore/pglogstore.go | 20 +++- 12 files changed, 162 insertions(+), 152 deletions(-) diff --git a/internal/apirouter/log_handlers_test.go b/internal/apirouter/log_handlers_test.go index ad101e7e..693d04e2 100644 --- a/internal/apirouter/log_handlers_test.go +++ b/internal/apirouter/log_handlers_test.go @@ -72,7 +72,7 @@ func TestListDeliveries(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) @@ -213,7 +213,7 @@ func TestListDeliveries(t *testing.T) { "status": float64(200), } - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=response_data", nil) @@ -329,7 +329,7 @@ func TestRetrieveDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) t.Run("should retrieve delivery by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -444,7 +444,7 @@ func TestRetrieveEvent(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) t.Run("should retrieve event by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -541,7 +541,7 @@ func TestListEvents(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events", nil) diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index 4cf7668e..f5cc52d7 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -57,7 +57,7 @@ func TestRetryDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) t.Run("should retry delivery successfully", func(t *testing.T) { w := httptest.NewRecorder() @@ -120,7 +120,7 @@ func TestRetryDelivery(t *testing.T) { testutil.DeliveryFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.Event{disabledEvent}, []*models.Delivery{disabledDelivery})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: disabledEvent, Delivery: disabledDelivery}})) w := httptest.NewRecorder() req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+disabledDeliveryID+"/retry", nil) diff --git a/internal/logmq/batchprocessor.go b/internal/logmq/batchprocessor.go index 9463cea9..472931a8 100644 --- a/internal/logmq/batchprocessor.go +++ b/internal/logmq/batchprocessor.go @@ -18,7 +18,7 @@ var ErrInvalidLogEntry = errors.New("invalid log entry: both event and delivery // LogStore defines the interface for persisting log entries. // This is a consumer-defined interface containing only what logmq needs. type LogStore interface { - InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error + InsertMany(ctx context.Context, entries []*models.LogEntry) error } // BatchProcessorConfig configures the batch processor. @@ -74,12 +74,11 @@ func (bp *BatchProcessor) processBatch(_ string, msgs []*mqs.Message) { logger := bp.logger.Ctx(bp.ctx) logger.Info("processing batch", zap.Int("message_count", len(msgs))) - events := make([]*models.Event, 0, len(msgs)) - deliveries := make([]*models.Delivery, 0, len(msgs)) + entries := make([]*models.LogEntry, 0, len(msgs)) validMsgs := make([]*mqs.Message, 0, len(msgs)) for _, msg := range msgs { - entry := models.LogEntry{} + entry := &models.LogEntry{} if err := entry.FromMessage(msg); err != nil { logger.Error("failed to parse log entry", zap.Error(err), @@ -99,21 +98,19 @@ func (bp *BatchProcessor) processBatch(_ string, msgs []*mqs.Message) { continue } - events = append(events, entry.Event) - deliveries = append(deliveries, entry.Delivery) + entries = append(entries, entry) validMsgs = append(validMsgs, msg) } // Nothing valid to insert - if len(events) == 0 { + if len(entries) == 0 { return } - if err := bp.logStore.InsertMany(bp.ctx, events, deliveries); err != nil { - logger.Error("failed to insert events/deliveries", + if err := bp.logStore.InsertMany(bp.ctx, entries); err != nil { + logger.Error("failed to insert log entries", zap.Error(err), - zap.Int("event_count", len(events)), - zap.Int("delivery_count", len(deliveries))) + zap.Int("entry_count", len(entries))) for _, msg := range validMsgs { msg.Nack() } diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go index 59df42de..7d3c08c6 100644 --- a/internal/logmq/batchprocessor_test.go +++ b/internal/logmq/batchprocessor_test.go @@ -16,27 +16,29 @@ import ( ) type mockLogStore struct { - mu sync.Mutex - events []*models.Event - deliveries []*models.Delivery - err error + mu sync.Mutex + entries []*models.LogEntry + err error } -func (m *mockLogStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { +func (m *mockLogStore) InsertMany(ctx context.Context, entries []*models.LogEntry) error { m.mu.Lock() defer m.mu.Unlock() if m.err != nil { return m.err } - m.events = append(m.events, events...) - m.deliveries = append(m.deliveries, deliveries...) + m.entries = append(m.entries, entries...) return nil } func (m *mockLogStore) getInserted() (events []*models.Event, deliveries []*models.Delivery) { m.mu.Lock() defer m.mu.Unlock() - return m.events, m.deliveries + for _, entry := range m.entries { + events = append(events, entry.Event) + deliveries = append(deliveries, entry.Delivery) + } + return events, deliveries } // mockQueueMessage implements mqs.QueueMessage for testing. diff --git a/internal/logstore/chlogstore/chlogstore.go b/internal/logstore/chlogstore/chlogstore.go index df650210..0f42117f 100644 --- a/internal/logstore/chlogstore/chlogstore.go +++ b/internal/logstore/chlogstore/chlogstore.go @@ -700,12 +700,18 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve }, nil } -func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { - if len(events) == 0 && len(deliveries) == 0 { +func (s *logStoreImpl) InsertMany(ctx context.Context, entries []*models.LogEntry) error { + if len(entries) == 0 { return nil } - if len(events) > 0 { + // Extract and dedupe events by ID + eventMap := make(map[string]*models.Event) + for _, entry := range entries { + eventMap[entry.Event.ID] = entry.Event + } + + if len(eventMap) > 0 { eventBatch, err := s.chDB.PrepareBatch(ctx, fmt.Sprintf(`INSERT INTO %s ( event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data @@ -715,7 +721,7 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, d return fmt.Errorf("prepare events batch failed: %w", err) } - for _, e := range events { + for _, e := range eventMap { metadataJSON, err := json.Marshal(e.Metadata) if err != nil { return fmt.Errorf("failed to marshal metadata: %w", err) @@ -744,74 +750,59 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, events []*models.Event, d } } - if len(deliveries) > 0 { - // Build a map of events for looking up event data when inserting deliveries - eventMap := make(map[string]*models.Event) - for _, e := range events { - eventMap[e.ID] = e - } + // Insert deliveries with their paired event data + deliveryBatch, err := s.chDB.PrepareBatch(ctx, + fmt.Sprintf(`INSERT INTO %s ( + event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, + delivery_id, status, delivery_time, code, response_data, manual, attempt + )`, s.deliveriesTable), + ) + if err != nil { + return fmt.Errorf("prepare deliveries batch failed: %w", err) + } - deliveryBatch, err := s.chDB.PrepareBatch(ctx, - fmt.Sprintf(`INSERT INTO %s ( - event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, status, delivery_time, code, response_data, manual, attempt - )`, s.deliveriesTable), - ) + for _, entry := range entries { + event := entry.Event + d := entry.Delivery + + metadataJSON, err := json.Marshal(event.Metadata) if err != nil { - return fmt.Errorf("prepare deliveries batch failed: %w", err) + return fmt.Errorf("failed to marshal metadata: %w", err) } - - for _, d := range deliveries { - event := eventMap[d.EventID] - if event == nil { - // If event not in current batch, use delivery's data as fallback - event = &models.Event{ - ID: d.EventID, - TenantID: d.TenantID, - DestinationID: d.DestinationID, - } - } - - metadataJSON, err := json.Marshal(event.Metadata) - if err != nil { - return fmt.Errorf("failed to marshal metadata: %w", err) - } - dataJSON, err := json.Marshal(event.Data) - if err != nil { - return fmt.Errorf("failed to marshal data: %w", err) - } - responseDataJSON, err := json.Marshal(d.ResponseData) - if err != nil { - return fmt.Errorf("failed to marshal response_data: %w", err) - } - - // Use event.TenantID for tenant_id since it's denormalized from the event - if err := deliveryBatch.Append( - d.EventID, - event.TenantID, // Use event's TenantID, not delivery's - d.DestinationID, - event.Topic, - event.EligibleForRetry, - event.Time, - string(metadataJSON), - string(dataJSON), - d.ID, - d.Status, - d.Time, - d.Code, - string(responseDataJSON), - d.Manual, - uint32(d.Attempt), - ); err != nil { - return fmt.Errorf("deliveries batch append failed: %w", err) - } + dataJSON, err := json.Marshal(event.Data) + if err != nil { + return fmt.Errorf("failed to marshal data: %w", err) + } + responseDataJSON, err := json.Marshal(d.ResponseData) + if err != nil { + return fmt.Errorf("failed to marshal response_data: %w", err) } - if err := deliveryBatch.Send(); err != nil { - return fmt.Errorf("deliveries batch send failed: %w", err) + if err := deliveryBatch.Append( + d.EventID, + event.TenantID, + d.DestinationID, + event.Topic, + event.EligibleForRetry, + event.Time, + string(metadataJSON), + string(dataJSON), + d.ID, + d.Status, + d.Time, + d.Code, + string(responseDataJSON), + d.Manual, + uint32(d.Attempt), + ); err != nil { + return fmt.Errorf("deliveries batch append failed: %w", err) } } + if err := deliveryBatch.Send(); err != nil { + return fmt.Errorf("deliveries batch send failed: %w", err) + } + return nil } diff --git a/internal/logstore/driver/driver.go b/internal/logstore/driver/driver.go index 33b9be8b..b2ae46c4 100644 --- a/internal/logstore/driver/driver.go +++ b/internal/logstore/driver/driver.go @@ -21,7 +21,7 @@ type LogStore interface { ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) - InsertMany(context.Context, []*models.Event, []*models.Delivery) error + InsertMany(context.Context, []*models.LogEntry) error } type ListEventRequest struct { diff --git a/internal/logstore/drivertest/crud.go b/internal/logstore/drivertest/crud.go index 071656ec..09c3e063 100644 --- a/internal/logstore/drivertest/crud.go +++ b/internal/logstore/drivertest/crud.go @@ -58,7 +58,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.DeliveryFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - err := logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery}) + err := logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}}) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) @@ -91,8 +91,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { t.Run("batch deliveries", func(t *testing.T) { // Create 15 events spread across destinations and topics for filter testing - var events []*models.Event - var deliveries []*models.Delivery + var entries []*models.LogEntry for i := range 15 { destID := destinationIDs[i%len(destinationIDs)] @@ -119,15 +118,14 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.DeliveryFactory.WithTime(eventTime.Add(time.Millisecond)), ) - events = append(events, event) - deliveries = append(deliveries, delivery) + entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) allDeliveries = append(allDeliveries, delivery) destinationEvents[destID] = append(destinationEvents[destID], event) topicEvents[topic] = append(topicEvents[topic], event) statusDeliveries[status] = append(statusDeliveries[status], delivery) } - err := logStore.InsertMany(ctx, events, deliveries) + err := logStore.InsertMany(ctx, entries) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) @@ -143,7 +141,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) t.Run("empty batch is no-op", func(t *testing.T) { - err := logStore.InsertMany(ctx, []*models.Event{}, []*models.Delivery{}) + err := logStore.InsertMany(ctx, []*models.LogEntry{}) require.NoError(t, err) }) }) diff --git a/internal/logstore/drivertest/misc.go b/internal/logstore/drivertest/misc.go index 09bcf496..022b481e 100644 --- a/internal/logstore/drivertest/misc.go +++ b/internal/logstore/drivertest/misc.go @@ -77,7 +77,10 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithTime(baseTime.Add(-5*time.Minute)), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event1, event2}, []*models.Delivery{delivery1, delivery2})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{ + {Event: event1, Delivery: delivery1}, + {Event: event2, Delivery: delivery2}, + })) require.NoError(t, h.FlushWrites(ctx)) t.Run("TenantIsolation", func(t *testing.T) { @@ -192,8 +195,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, destinationID := idgen.Destination() baseTime := time.Now().Truncate(time.Second) - var events []*models.Event - var deliveries []*models.Delivery + var entries []*models.LogEntry for i := range 3 { event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(fmt.Sprintf("sort_evt_%d", i)), @@ -208,10 +210,9 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - events = append(events, event) - deliveries = append(deliveries, delivery) + entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) } - require.NoError(t, logStore.InsertMany(ctx, events, deliveries)) + require.NoError(t, logStore.InsertMany(ctx, entries)) startTime := baseTime.Add(-48 * time.Hour) @@ -244,7 +245,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) t.Run("nil DestinationIDs equals empty DestinationIDs", func(t *testing.T) { responseNil, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ @@ -302,7 +303,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(evt.Time), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.Event{evt}, []*models.Delivery{delivery})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: evt, Delivery: delivery}})) } t.Run("GTE is inclusive (>=)", func(t *testing.T) { @@ -341,7 +342,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithEventID(event.ID), testutil.DeliveryFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) t.Run("modifying ListDelivery result doesn't affect subsequent queries", func(t *testing.T) { response1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ @@ -385,8 +386,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.DeliveryFactory.WithStatus("success"), testutil.DeliveryFactory.WithTime(deliveryTime), ) - eventBatch := []*models.Event{event} - deliveryBatch := []*models.Delivery{delivery} + entries := []*models.LogEntry{{Event: event, Delivery: delivery}} // Race N goroutines all inserting the same record const numGoroutines = 10 @@ -395,7 +395,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, wg.Add(1) go func() { defer wg.Done() - _ = logStore.InsertMany(ctx, eventBatch, deliveryBatch) + _ = logStore.InsertMany(ctx, entries) }() } wg.Wait() @@ -460,7 +460,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log testutil.DeliveryFactory.WithDestinationID(destinationID), testutil.DeliveryFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.Event{event}, []*models.Delivery{delivery})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) } require.NoError(t, h.FlushWrites(ctx)) diff --git a/internal/logstore/drivertest/pagination.go b/internal/logstore/drivertest/pagination.go index 695dca74..d1bc7f2e 100644 --- a/internal/logstore/drivertest/pagination.go +++ b/internal/logstore/drivertest/pagination.go @@ -73,13 +73,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { - events := make([]*models.Event, len(items)) - deliveries := make([]*models.Delivery, len(items)) + entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - events[i] = dr.Event - deliveries[i] = dr.Delivery + entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} } - return logStore.InsertMany(ctx, events, deliveries) + return logStore.InsertMany(ctx, entries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { @@ -164,13 +162,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { - events := make([]*models.Event, len(items)) - deliveries := make([]*models.Delivery, len(items)) + entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - events[i] = dr.Event - deliveries[i] = dr.Delivery + entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} } - return logStore.InsertMany(ctx, events, deliveries) + return logStore.InsertMany(ctx, entries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { @@ -238,20 +234,23 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*models.Event) error { - deliveries := make([]*models.Delivery, len(items)) + entries := make([]*models.LogEntry, len(items)) for i, evt := range items { deliveryTime := evt.Time.Add(100 * time.Millisecond) - deliveries[i] = &models.Delivery{ - ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), - TenantID: evt.TenantID, - EventID: evt.ID, - DestinationID: evt.DestinationID, - Status: "success", - Time: deliveryTime, - Code: "200", + entries[i] = &models.LogEntry{ + Event: evt, + Delivery: &models.Delivery{ + ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: evt.TenantID, + EventID: evt.ID, + DestinationID: evt.DestinationID, + Status: "success", + Time: deliveryTime, + Code: "200", + }, } } - return logStore.InsertMany(ctx, items, deliveries) + return logStore.InsertMany(ctx, entries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.Event], error) { @@ -320,20 +319,23 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, InsertMany: func(ctx context.Context, items []*models.Event) error { - deliveries := make([]*models.Delivery, len(items)) + entries := make([]*models.LogEntry, len(items)) for i, evt := range items { deliveryTime := evt.Time.Add(100 * time.Millisecond) - deliveries[i] = &models.Delivery{ - ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), - TenantID: evt.TenantID, - EventID: evt.ID, - DestinationID: evt.DestinationID, - Status: "success", - Time: deliveryTime, - Code: "200", + entries[i] = &models.LogEntry{ + Event: evt, + Delivery: &models.Delivery{ + ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), + TenantID: evt.TenantID, + EventID: evt.ID, + DestinationID: evt.DestinationID, + Status: "success", + Time: deliveryTime, + Code: "200", + }, } } - return logStore.InsertMany(ctx, items, deliveries) + return logStore.InsertMany(ctx, entries) }, List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*models.Event], error) { @@ -445,7 +447,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { allDeliveries = append(allDeliveries, delivery) } - require.NoError(t, logStore.InsertMany(ctx, allEvents, allDeliveries)) + entries := make([]*models.LogEntry, len(allEvents)) + for i := range allEvents { + entries[i] = &models.LogEntry{Event: allEvents[i], Delivery: allDeliveries[i]} + } + require.NoError(t, logStore.InsertMany(ctx, entries)) require.NoError(t, h.FlushWrites(ctx)) t.Run("paginate within time-bounded window", func(t *testing.T) { diff --git a/internal/logstore/logstore.go b/internal/logstore/logstore.go index 6990cb31..e88f6e22 100644 --- a/internal/logstore/logstore.go +++ b/internal/logstore/logstore.go @@ -21,13 +21,14 @@ type ListDeliveryResponse = driver.ListDeliveryResponse type RetrieveEventRequest = driver.RetrieveEventRequest type RetrieveDeliveryRequest = driver.RetrieveDeliveryRequest type DeliveryRecord = driver.DeliveryRecord +type LogEntry = models.LogEntry type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) - InsertMany(context.Context, []*models.Event, []*models.Delivery) error + InsertMany(context.Context, []*models.LogEntry) error } type DriverOpts struct { diff --git a/internal/logstore/memlogstore/memlogstore.go b/internal/logstore/memlogstore/memlogstore.go index 11d88a98..99d1a5d0 100644 --- a/internal/logstore/memlogstore/memlogstore.go +++ b/internal/logstore/memlogstore/memlogstore.go @@ -186,17 +186,16 @@ func (s *memLogStore) matchesEventFilter(event *models.Event, req driver.ListEve return true } -func (s *memLogStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { +func (s *memLogStore) InsertMany(ctx context.Context, entries []*models.LogEntry) error { s.mu.Lock() defer s.mu.Unlock() - // Insert events (dedupe by ID) - for _, event := range events { - s.events[event.ID] = copyEvent(event) - } + for _, entry := range entries { + // Insert event (dedupe by ID) + s.events[entry.Event.ID] = copyEvent(entry.Event) - // Insert deliveries (idempotent upsert: match on event_id + delivery_id) - for _, d := range deliveries { + // Insert delivery (idempotent upsert: match on event_id + delivery_id) + d := entry.Delivery copied := copyDelivery(d) found := false diff --git a/internal/logstore/pglogstore/pglogstore.go b/internal/logstore/pglogstore/pglogstore.go index e97fc517..4c6fb28f 100644 --- a/internal/logstore/pglogstore/pglogstore.go +++ b/internal/logstore/pglogstore/pglogstore.go @@ -528,11 +528,27 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli }, nil } -func (s *logStore) InsertMany(ctx context.Context, events []*models.Event, deliveries []*models.Delivery) error { - if len(events) == 0 && len(deliveries) == 0 { +func (s *logStore) InsertMany(ctx context.Context, entries []*models.LogEntry) error { + if len(entries) == 0 { return nil } + // Extract and dedupe events by ID + eventMap := make(map[string]*models.Event) + for _, entry := range entries { + eventMap[entry.Event.ID] = entry.Event + } + events := make([]*models.Event, 0, len(eventMap)) + for _, e := range eventMap { + events = append(events, e) + } + + // Extract deliveries + deliveries := make([]*models.Delivery, 0, len(entries)) + for _, entry := range entries { + deliveries = append(deliveries, entry.Delivery) + } + tx, err := s.db.Begin(ctx) if err != nil { return err From da4bd25dfb556f5d5c2ff2f5bfa5dd794de87812 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 00:38:38 +0700 Subject: [PATCH 11/23] refactor: move retry event data query into retry flow (#654) * refactor: add event fetching to retry scheduler Co-Authored-By: Claude Opus 4.5 * fix: align mock eventGetter with logstore behavior Co-Authored-By: Claude Opus 4.5 * refactor: remove logStore from messagehandler Co-Authored-By: Claude Opus 4.5 * chore: remove dead eventGetter code from messagehandler tests Co-Authored-By: Claude Opus 4.5 * test: verify manual retry publishes full event data Extends TestRetryDelivery to verify that the manual retry API publishes a DeliveryTask with complete event data to deliveryMQ. This ensures the deliverymq handler receives full event data for manual retries, consistent with the scheduled retry flow which fetches event data in the retry scheduler. Co-Authored-By: Claude Opus 4.5 * test: add failing tests for retry race condition Add unit and e2e tests verifying that retries are not lost when the retry scheduler queries logstore before the event has been persisted. Test scenario: 1. Initial delivery fails, retry is scheduled 2. Retry scheduler queries logstore for event data 3. Event is not yet persisted (logmq batching delay) 4. Retry should remain in queue and be reprocessed later Tests added: - TestRetryScheduler_RaceCondition_EventNotYetPersisted (unit test) - TestE2E_Regression_RetryRaceCondition (e2e test) Also adds: - RetryVisibilityTimeoutSeconds config option - WithRetryVisibilityTimeout scheduler option - mockDelayedEventGetter for simulating delayed persistence Co-Authored-By: Claude Opus 4.5 * fix: return error when event not found in logstore during retry Return error instead of nil so the retry message stays in queue and will be reprocessed after the visibility timeout. Co-Authored-By: Claude Opus 4.5 * test: improve flaky tests * chore: dev yaml * chore: `make test` skip cmd/e2e by default --------- Co-authored-by: Claude Opus 4.5 --- .outpost.yaml.dev | 2 +- Makefile | 2 +- cmd/e2e/suites_test.go | 163 ++++++++++ internal/apirouter/retry_handlers_test.go | 28 +- internal/apirouter/router_test.go | 2 + internal/config/config.go | 4 +- internal/deliverymq/messagehandler.go | 36 --- internal/deliverymq/messagehandler_test.go | 150 +-------- internal/deliverymq/mock_test.go | 47 +-- internal/deliverymq/retry.go | 72 ++++- internal/deliverymq/retry_test.go | 336 ++++++++++++++++++++- internal/idempotence/idempotence_test.go | 22 +- internal/logmq/batchprocessor_test.go | 20 +- internal/services/builder.go | 10 +- 14 files changed, 667 insertions(+), 227 deletions(-) diff --git a/.outpost.yaml.dev b/.outpost.yaml.dev index 087498f4..9e30a388 100644 --- a/.outpost.yaml.dev +++ b/.outpost.yaml.dev @@ -73,7 +73,7 @@ idgen: event_prefix: "evt" destination_prefix: "des" delivery_prefix: "dlv" - delivery_event_prefix: "dev" +" # Concurrency publish_max_concurrency: 1 diff --git a/Makefile b/Makefile index a8638aad..cbb890fd 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -TEST?=./... +TEST?=./internal/... RUN?= # Build targets diff --git a/cmd/e2e/suites_test.go b/cmd/e2e/suites_test.go index baf5bc3a..48321bf4 100644 --- a/cmd/e2e/suites_test.go +++ b/cmd/e2e/suites_test.go @@ -544,3 +544,166 @@ func TestE2E_Regression_AutoDisableWithoutCallbackURL(t *testing.T) { // Cleanup mock server _ = mockServerInfra } + +// TestE2E_Regression_RetryRaceCondition verifies that retries are not lost when +// the retry scheduler queries logstore before the event has been persisted. +// +// Test configuration creates a timing window where retry fires before log persistence: +// - LogBatchThresholdSeconds = 5 (slow persistence) +// - RetryIntervalSeconds = 1 (fast retry) +// - RetryVisibilityTimeoutSeconds = 2 (quick reprocessing when event not found) +// +// Expected behavior: retry remains in queue until event is available, then succeeds. +func TestE2E_Regression_RetryRaceCondition(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("skipping e2e test") + } + + // Setup infrastructure + testinfraCleanup := testinfra.Start(t) + defer testinfraCleanup() + gin.SetMode(gin.TestMode) + mockServerBaseURL := testinfra.GetMockServer(t) + + // Configure with slow log persistence and fast retry + cfg := configs.Basic(t, configs.BasicOpts{ + LogStorage: configs.LogStorageTypeClickHouse, + }) + + // SLOW log persistence: batch won't flush for 5 seconds + cfg.LogBatchThresholdSeconds = 5 + cfg.LogBatchSize = 10000 // High batch size to prevent early flush + + // FAST retry: retry fires after ~1 second + cfg.RetryIntervalSeconds = 1 + cfg.RetryPollBackoffMs = 50 + cfg.RetryMaxLimit = 5 + cfg.RetryVisibilityTimeoutSeconds = 2 // Short VT so retry happens quickly after event not found + + require.NoError(t, cfg.Validate(config.Flags{})) + + // Start application + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + appDone := make(chan struct{}) + go func() { + defer close(appDone) + application := app.New(&cfg) + if err := application.Run(ctx); err != nil { + log.Println("Application stopped:", err) + } + }() + defer func() { + cancel() + <-appDone + }() + + // Wait for services to start + waitForHealthy(t, cfg.APIPort, 5*time.Second) + + // Setup test client + client := httpclient.New(fmt.Sprintf("http://localhost:%d/api/v1", cfg.APIPort), cfg.APIKey) + mockServerInfra := testinfra.NewMockServerInfra(mockServerBaseURL) + + // Test data + tenantID := fmt.Sprintf("tenant_race_%d", time.Now().UnixNano()) + destinationID := fmt.Sprintf("dest_race_%d", time.Now().UnixNano()) + secret := "testsecret1234567890abcdefghijklmnop" + + // Create tenant + resp, err := client.Do(httpclient.Request{ + Method: httpclient.MethodPUT, + Path: "/tenants/" + tenantID, + Headers: map[string]string{"Authorization": "Bearer " + cfg.APIKey}, + }) + require.NoError(t, err) + require.Equal(t, 201, resp.StatusCode, "failed to create tenant") + + // Configure mock server destination + resp, err = client.Do(httpclient.Request{ + Method: httpclient.MethodPUT, + BaseURL: mockServerBaseURL, + Path: "/destinations", + Body: map[string]interface{}{ + "id": destinationID, + "type": "webhook", + "config": map[string]interface{}{ + "url": fmt.Sprintf("%s/webhook/%s", mockServerBaseURL, destinationID), + }, + "credentials": map[string]interface{}{ + "secret": secret, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode, "failed to configure mock server") + + // Create destination + resp, err = client.Do(httpclient.Request{ + Method: httpclient.MethodPOST, + Path: "/tenants/" + tenantID + "/destinations", + Headers: map[string]string{"Authorization": "Bearer " + cfg.APIKey}, + Body: map[string]interface{}{ + "id": destinationID, + "type": "webhook", + "topics": "*", + "config": map[string]interface{}{ + "url": fmt.Sprintf("%s/webhook/%s", mockServerBaseURL, destinationID), + }, + "credentials": map[string]interface{}{ + "secret": secret, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, 201, resp.StatusCode, "failed to create destination") + + // Publish event that will always fail (should_err: true) + // We want to verify that retries happen (mock server is hit multiple times) + resp, err = client.Do(httpclient.Request{ + Method: httpclient.MethodPOST, + Path: "/publish", + Headers: map[string]string{"Authorization": "Bearer " + cfg.APIKey}, + Body: map[string]interface{}{ + "tenant_id": tenantID, + "topic": "user.created", + "eligible_for_retry": true, + "metadata": map[string]interface{}{ + "should_err": "true", // All deliveries fail + }, + "data": map[string]interface{}{ + "test": "race-condition-test", + }, + }, + }) + require.NoError(t, err) + require.Equal(t, 202, resp.StatusCode, "failed to publish event") + + // Wait for retries to complete + // - t=0: Event published, first delivery fails + // - t=1s: Retry fires, event not in logstore yet, message returns to queue + // - t=3s: Message visible again after 2s VT, retry fires again + // - t=5s: Log batch flushes, event now in logstore + // - t=5s+: Retry finds event, delivery succeeds + time.Sleep(10 * time.Second) + + // Verify mock server received multiple delivery attempts + resp, err = client.Do(httpclient.Request{ + Method: httpclient.MethodGET, + BaseURL: mockServerBaseURL, + Path: "/destinations/" + destinationID + "/events", + }) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + + events, ok := resp.Body.([]interface{}) + require.True(t, ok, "expected events array") + + // Should have at least 2 attempts: initial failure + successful retry + require.GreaterOrEqual(t, len(events), 2, + "expected multiple delivery attempts (initial + retry after event persisted)") + + _ = mockServerInfra +} diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index f5cc52d7..170b147c 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -59,7 +59,15 @@ func TestRetryDelivery(t *testing.T) { require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) - t.Run("should retry delivery successfully", func(t *testing.T) { + t.Run("should retry delivery successfully with full event data", func(t *testing.T) { + // Subscribe to deliveryMQ to capture published task + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + subscription, err := result.deliveryMQ.Subscribe(ctx) + require.NoError(t, err) + + // Trigger manual retry w := httptest.NewRecorder() req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"/retry", nil) result.router.ServeHTTP(w, req) @@ -69,6 +77,24 @@ func TestRetryDelivery(t *testing.T) { var response map[string]interface{} require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) assert.Equal(t, true, response["success"]) + + // Verify published task has full event data + msg, err := subscription.Receive(ctx) + require.NoError(t, err) + + var task models.DeliveryTask + require.NoError(t, json.Unmarshal(msg.Body, &task)) + + assert.Equal(t, eventID, task.Event.ID) + assert.Equal(t, tenantID, task.Event.TenantID) + assert.Equal(t, destinationID, task.Event.DestinationID) + assert.Equal(t, "order.created", task.Event.Topic) + assert.False(t, task.Event.Time.IsZero(), "event time should be set") + assert.Equal(t, eventTime.UTC(), task.Event.Time.UTC()) + assert.Equal(t, event.Data, task.Event.Data, "event data should match original") + assert.True(t, task.Manual, "should be marked as manual retry") + + msg.Ack() }) t.Run("should return 404 for non-existent delivery", func(t *testing.T) { diff --git a/internal/apirouter/router_test.go b/internal/apirouter/router_test.go index 7b05249d..c782685e 100644 --- a/internal/apirouter/router_test.go +++ b/internal/apirouter/router_test.go @@ -34,6 +34,7 @@ type testRouterResult struct { redisClient redis.Client entityStore models.EntityStore logStore logstore.LogStore + deliveryMQ *deliverymq.DeliveryMQ } func setupTestRouter(t *testing.T, apiKey, jwtSecret string, funcs ...func(t *testing.T) clickhouse.DB) (http.Handler, *logging.Logger, redis.Client) { @@ -73,6 +74,7 @@ func setupTestRouterFull(t *testing.T, apiKey, jwtSecret string, funcs ...func(t redisClient: redisClient, entityStore: entityStore, logStore: logStore, + deliveryMQ: deliveryMQ, } } diff --git a/internal/config/config.go b/internal/config/config.go index 48371ba1..035c20c4 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -79,7 +79,8 @@ type Config struct { RetrySchedule []int `yaml:"retry_schedule" env:"RETRY_SCHEDULE" envSeparator:"," desc:"Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h." required:"N"` RetryIntervalSeconds int `yaml:"retry_interval_seconds" env:"RETRY_INTERVAL_SECONDS" desc:"Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided." required:"N"` RetryMaxLimit int `yaml:"retry_max_limit" env:"MAX_RETRY_LIMIT" desc:"Maximum number of retry attempts for a single event delivery before giving up. Ignored if retry_schedule is provided." required:"N"` - RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` + RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` + RetryVisibilityTimeoutSeconds int `yaml:"retry_visibility_timeout_seconds" env:"RETRY_VISIBILITY_TIMEOUT_SECONDS" desc:"Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30" required:"N"` // Event Delivery MaxDestinationsPerTenant int `yaml:"max_destinations_per_tenant" env:"MAX_DESTINATIONS_PER_TENANT" desc:"Maximum number of destinations allowed per tenant/organization." required:"N"` @@ -165,6 +166,7 @@ func (c *Config) InitDefaults() { c.RetryIntervalSeconds = 30 c.RetryMaxLimit = 10 c.RetryPollBackoffMs = 100 + c.RetryVisibilityTimeoutSeconds = 30 c.MaxDestinationsPerTenant = 20 c.DeliveryTimeoutSeconds = 5 c.PublishIdempotencyKeyTTL = 3600 // 1 hour diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index e56635eb..f005e047 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -12,7 +12,6 @@ import ( "github.com/hookdeck/outpost/internal/destregistry" "github.com/hookdeck/outpost/internal/idempotence" "github.com/hookdeck/outpost/internal/logging" - "github.com/hookdeck/outpost/internal/logstore" "github.com/hookdeck/outpost/internal/models" "github.com/hookdeck/outpost/internal/mqs" "github.com/hookdeck/outpost/internal/scheduler" @@ -70,7 +69,6 @@ type messageHandler struct { logger *logging.Logger logMQ LogPublisher entityStore DestinationGetter - logStore EventGetter retryScheduler RetryScheduler retryBackoff backoff.Backoff retryMaxLimit int @@ -96,10 +94,6 @@ type DestinationGetter interface { RetrieveDestination(ctx context.Context, tenantID, destID string) (*models.Destination, error) } -type EventGetter interface { - RetrieveEvent(ctx context.Context, request logstore.RetrieveEventRequest) (*models.Event, error) -} - type DeliveryTracer interface { Deliver(ctx context.Context, task *models.DeliveryTask, destination *models.Destination) (context.Context, trace.Span) } @@ -112,7 +106,6 @@ func NewMessageHandler( logger *logging.Logger, logMQ LogPublisher, entityStore DestinationGetter, - logStore EventGetter, publisher Publisher, eventTracer DeliveryTracer, retryScheduler RetryScheduler, @@ -126,7 +119,6 @@ func NewMessageHandler( logger: logger, logMQ: logMQ, entityStore: entityStore, - logStore: logStore, publisher: publisher, retryScheduler: retryScheduler, retryBackoff: retryBackoff, @@ -150,11 +142,6 @@ func (h *messageHandler) Handle(ctx context.Context, msg *mqs.Message) error { zap.String("destination_id", task.DestinationID), zap.Int("attempt", task.Attempt)) - // Ensure event data - if err := h.ensureDeliveryTask(ctx, &task); err != nil { - return h.handleError(msg, &PreDeliveryError{err: err}) - } - // Get destination destination, err := h.ensurePublishableDestination(ctx, task) if err != nil { @@ -443,29 +430,6 @@ func (h *messageHandler) scheduleRetry(ctx context.Context, task models.Delivery return nil } -// ensureDeliveryTask ensures that the delivery task has full event data. -// In retry scenarios, the task only has event ID and we'll need to query the full data. -func (h *messageHandler) ensureDeliveryTask(ctx context.Context, task *models.DeliveryTask) error { - // TODO: consider a more deliberate way to check for retry scenario? - if !task.Event.Time.IsZero() { - return nil - } - - event, err := h.logStore.RetrieveEvent(ctx, logstore.RetrieveEventRequest{ - TenantID: task.Event.TenantID, - EventID: task.Event.ID, - }) - if err != nil { - return err - } - if event == nil { - return errors.New("event not found") - } - task.Event = *event - - return nil -} - // ensurePublishableDestination ensures that the destination exists and is in a publishable state. // Returns an error if the destination is not found, deleted, disabled, or any other state that // would prevent publishing. diff --git a/internal/deliverymq/messagehandler_test.go b/internal/deliverymq/messagehandler_test.go index 23097f24..8ccf8b1f 100644 --- a/internal/deliverymq/messagehandler_test.go +++ b/internal/deliverymq/messagehandler_test.go @@ -40,8 +40,6 @@ func TestMessageHandler_DestinationGetterError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{err: errors.New("destination lookup failed")} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() alertMonitor := newMockAlertMonitor() @@ -50,7 +48,6 @@ func TestMessageHandler_DestinationGetterError(t *testing.T) { testutil.CreateTestLogger(t), newMockLogPublisher(nil), destGetter, - eventGetter, newMockPublisher(nil), // won't be called due to early error testutil.NewMockEventTracer(nil), retryScheduler, @@ -101,8 +98,6 @@ func TestMessageHandler_DestinationNotFound(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: nil, err: nil} // destination not found - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() logPublisher := newMockLogPublisher(nil) alertMonitor := newMockAlertMonitor() @@ -112,7 +107,6 @@ func TestMessageHandler_DestinationNotFound(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, newMockPublisher(nil), // won't be called testutil.NewMockEventTracer(nil), retryScheduler, @@ -160,8 +154,6 @@ func TestMessageHandler_DestinationDeleted(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{err: models.ErrDestinationDeleted} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() logPublisher := newMockLogPublisher(nil) alertMonitor := newMockAlertMonitor() @@ -171,7 +163,6 @@ func TestMessageHandler_DestinationDeleted(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, newMockPublisher(nil), // won't be called testutil.NewMockEventTracer(nil), retryScheduler, @@ -220,8 +211,6 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publishErr := &destregistry.ErrDestinationPublishAttempt{ Err: errors.New("webhook returned 429"), @@ -240,7 +229,6 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -292,8 +280,6 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publishErr := &destregistry.ErrDestinationPublishAttempt{ Err: errors.New("webhook returned 400"), @@ -312,7 +298,6 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -343,75 +328,10 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } -func TestMessageHandler_EventGetterError(t *testing.T) { - // Test scenario: - // - Event getter fails to retrieve event during retry - // - Should be treated as system error - // - Should nack for retry - - // Setup test data - tenant := models.Tenant{ID: idgen.String()} - destination := testutil.DestinationFactory.Any( - testutil.DestinationFactory.WithType("webhook"), - testutil.DestinationFactory.WithTenantID(tenant.ID), - ) - event := testutil.EventFactory.Any( - testutil.EventFactory.WithTenantID(tenant.ID), - testutil.EventFactory.WithDestinationID(destination.ID), - ) - - // Setup mocks - destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.err = errors.New("failed to get event") - retryScheduler := newMockRetryScheduler() - publisher := newMockPublisher([]error{nil}) - logPublisher := newMockLogPublisher(nil) - alertMonitor := newMockAlertMonitor() - - // Setup message handler - handler := deliverymq.NewMessageHandler( - testutil.CreateTestLogger(t), - logPublisher, - destGetter, - eventGetter, - publisher, - testutil.NewMockEventTracer(nil), - retryScheduler, - &backoff.ConstantBackoff{Interval: 1 * time.Second}, - 10, - alertMonitor, - idempotence.New(testutil.CreateTestRedisClient(t), idempotence.WithSuccessfulTTL(24*time.Hour)), - ) - - // Create and handle message simulating a retry - task := models.DeliveryTask{ - Attempt: 2, // Retry attempt - DestinationID: destination.ID, - Event: models.Event{ - ID: event.ID, - TenantID: event.TenantID, - // Minimal event data as it would be in a retry - }, - } - mockMsg, msg := newDeliveryMockMessage(task) - - // Handle message - err := handler.Handle(context.Background(), msg) - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to get event") - - // Assert behavior - assert.True(t, mockMsg.nacked, "message should be nacked on event getter error") - assert.False(t, mockMsg.acked, "message should not be acked on event getter error") - assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled for system error") - assert.Equal(t, 0, publisher.current, "publish should not be attempted") -} - func TestMessageHandler_RetryFlow(t *testing.T) { // Test scenario: // - Message is a retry attempt (Attempt > 1) - // - Event getter successfully retrieves full event data + // - DeliveryTask contains full event data (populated by retry scheduler from logstore) // - Message is processed normally // Setup test data @@ -427,8 +347,6 @@ func TestMessageHandler_RetryFlow(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // Successful publish logPublisher := newMockLogPublisher(nil) @@ -438,7 +356,6 @@ func TestMessageHandler_RetryFlow(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -449,14 +366,11 @@ func TestMessageHandler_RetryFlow(t *testing.T) { ) // Create and handle message simulating a retry + // Full event data is now populated by retry scheduler before publishing to deliverymq task := models.DeliveryTask{ Attempt: 2, // Retry attempt DestinationID: destination.ID, - Event: models.Event{ - ID: event.ID, - TenantID: event.TenantID, - // Minimal event data as it would be in a retry - }, + Event: event, // Full event data (populated by retry scheduler) } mockMsg, msg := newDeliveryMockMessage(task) @@ -469,7 +383,6 @@ func TestMessageHandler_RetryFlow(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked on successful retry") assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "publish should succeed once") - assert.Equal(t, event.ID, eventGetter.lastRetrievedID, "event getter should be called with correct ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") } @@ -493,8 +406,6 @@ func TestMessageHandler_Idempotency(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) logPublisher := newMockLogPublisher(nil) @@ -505,7 +416,6 @@ func TestMessageHandler_Idempotency(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -540,7 +450,7 @@ func TestMessageHandler_Idempotency(t *testing.T) { func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { // Test scenario: - // - First attempt fails with system error (event getter error) + // - First attempt fails with system error (destination getter error) // - Second attempt with same message ID succeeds after error is cleared // - Should demonstrate that system errors don't affect idempotency @@ -555,11 +465,10 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { testutil.EventFactory.WithDestinationID(destination.ID), ) - // Setup mocks - destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) - eventGetter.err = errors.New("failed to get event") // Will fail first attempt + // Setup mocks - destGetter will fail first, then succeed + destGetter := newMockMultiDestinationGetter() + destGetter.registerDestination(&destination) + destGetter.err = errors.New("failed to get destination") // Will fail first attempt retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) logPublisher := newMockLogPublisher(nil) @@ -570,7 +479,6 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -580,27 +488,24 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { idempotence.New(redis, idempotence.WithSuccessfulTTL(24*time.Hour)), ) - // Create retry message + // Create retry message with full event data (populated by retry scheduler) task := models.DeliveryTask{ Attempt: 2, DestinationID: destination.ID, - Event: models.Event{ - ID: event.ID, - TenantID: event.TenantID, - }, + Event: event, } // First attempt - should fail with system error mockMsg1, msg1 := newDeliveryMockMessage(task) err := handler.Handle(context.Background(), msg1) require.Error(t, err) - assert.Contains(t, err.Error(), "failed to get event") + assert.Contains(t, err.Error(), "failed to get destination") assert.True(t, mockMsg1.nacked, "first attempt should be nacked") assert.False(t, mockMsg1.acked, "first attempt should not be acked") assert.Equal(t, 0, publisher.current, "publish should not be attempted") // Clear the error for second attempt - eventGetter.clearError() + destGetter.err = nil // Second attempt with same task - should succeed mockMsg2, msg2 := newDeliveryMockMessage(task) @@ -609,7 +514,6 @@ func TestMessageHandler_IdempotencyWithSystemError(t *testing.T) { assert.True(t, mockMsg2.acked, "second attempt should be acked") assert.False(t, mockMsg2.nacked, "second attempt should not be nacked") assert.Equal(t, 1, publisher.current, "publish should succeed once") - assert.Equal(t, event.ID, eventGetter.lastRetrievedID, "event getter should be called with correct ID") } func TestMessageHandler_DestinationDisabled(t *testing.T) { @@ -633,8 +537,6 @@ func TestMessageHandler_DestinationDisabled(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // won't be called logPublisher := newMockLogPublisher(nil) @@ -645,7 +547,6 @@ func TestMessageHandler_DestinationDisabled(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -695,8 +596,6 @@ func TestMessageHandler_LogPublisherError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // publish succeeds logPublisher := newMockLogPublisher(errors.New("log publish failed")) @@ -706,7 +605,6 @@ func TestMessageHandler_LogPublisherError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -755,8 +653,6 @@ func TestMessageHandler_PublishAndLogError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{errors.New("publish failed")}) logPublisher := newMockLogPublisher(errors.New("log publish failed")) @@ -766,7 +662,6 @@ func TestMessageHandler_PublishAndLogError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -815,8 +710,6 @@ func TestManualDelivery_Success(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publishErr := &destregistry.ErrDestinationPublishAttempt{ Err: errors.New("webhook returned 500"), @@ -832,7 +725,6 @@ func TestManualDelivery_Success(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -894,8 +786,6 @@ func TestManualDelivery_PublishError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publishErr := &destregistry.ErrDestinationPublishAttempt{ Err: errors.New("webhook returned 429"), @@ -914,7 +804,6 @@ func TestManualDelivery_PublishError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -965,8 +854,6 @@ func TestManualDelivery_CancelError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() retryScheduler.cancelResp = []error{errors.New("failed to cancel retry")} publisher := newMockPublisher([]error{nil}) // successful publish @@ -978,7 +865,6 @@ func TestManualDelivery_CancelError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -1032,8 +918,6 @@ func TestManualDelivery_DestinationDisabled(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // won't be called logPublisher := newMockLogPublisher(nil) @@ -1044,7 +928,6 @@ func TestManualDelivery_DestinationDisabled(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -1094,8 +977,6 @@ func TestMessageHandler_PublishSuccess(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // Successful publish logPublisher := newMockLogPublisher(nil) @@ -1115,7 +996,6 @@ func TestMessageHandler_PublishSuccess(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -1161,8 +1041,6 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { // Setup mocks destGetter := &mockDestinationGetter{dest: &destination} - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publisher := newMockPublisher([]error{nil}) // Successful publish logPublisher := newMockLogPublisher(nil) @@ -1174,7 +1052,6 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -1269,8 +1146,6 @@ func TestMessageHandler_RetryID_MultipleDestinations(t *testing.T) { destGetter := newMockMultiDestinationGetter() destGetter.registerDestination(&destination1) destGetter.registerDestination(&destination2) - eventGetter := newMockEventGetter() - eventGetter.registerEvent(&event) retryScheduler := newMockRetryScheduler() publishErr := &destregistry.ErrDestinationPublishAttempt{ Err: errors.New("webhook returned 500"), @@ -1286,7 +1161,6 @@ func TestMessageHandler_RetryID_MultipleDestinations(t *testing.T) { testutil.CreateTestLogger(t), logPublisher, destGetter, - eventGetter, publisher, testutil.NewMockEventTracer(nil), retryScheduler, diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index 49696d39..b96e4eb3 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -3,7 +3,6 @@ package deliverymq_test import ( "context" "encoding/json" - "errors" "sync" "time" @@ -16,11 +15,6 @@ import ( "github.com/stretchr/testify/mock" ) -// scheduleOptions mirrors the private type in scheduler package -type scheduleOptions struct { - id string -} - type mockPublisher struct { responses []error current int @@ -137,11 +131,36 @@ func (m *mockEventGetter) RetrieveEvent(ctx context.Context, req logstore.Retrie return nil, m.err } m.lastRetrievedID = req.EventID - event, ok := m.events[req.EventID] - if !ok { - return nil, errors.New("event not found") + // Match actual logstore behavior: return (nil, nil) when event not found + return m.events[req.EventID], nil +} + +// mockDelayedEventGetter simulates the race condition where event is not yet +// persisted to logstore when retry scheduler first queries it. +// Returns (nil, nil) for the first N calls, then returns the event. +type mockDelayedEventGetter struct { + event *models.Event + callCount int + returnAfterCall int // Return event after this many calls + mu sync.Mutex +} + +func newMockDelayedEventGetter(event *models.Event, returnAfterCall int) *mockDelayedEventGetter { + return &mockDelayedEventGetter{ + event: event, + returnAfterCall: returnAfterCall, } - return event, nil +} + +func (m *mockDelayedEventGetter) RetrieveEvent(ctx context.Context, req logstore.RetrieveEventRequest) (*models.Event, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.callCount++ + if m.callCount <= m.returnAfterCall { + // Simulate event not yet persisted + return nil, nil + } + return m.event, nil } type mockLogPublisher struct { @@ -229,14 +248,6 @@ func newDeliveryMockMessage(task models.DeliveryTask) (*mockMessage, *mqs.Messag } } -func newMockMessage(id string) *mqs.Message { - mock := &mockMessage{id: id} - return &mqs.Message{ - QueueMessage: mock, - Body: nil, - } -} - func (m *mockMessage) ID() string { return m.id } diff --git a/internal/deliverymq/retry.go b/internal/deliverymq/retry.go index 9ea1a84c..cfb3985c 100644 --- a/internal/deliverymq/retry.go +++ b/internal/deliverymq/retry.go @@ -7,13 +7,42 @@ import ( "time" "github.com/hookdeck/outpost/internal/logging" + "github.com/hookdeck/outpost/internal/logstore" "github.com/hookdeck/outpost/internal/models" "github.com/hookdeck/outpost/internal/redis" "github.com/hookdeck/outpost/internal/rsmq" "github.com/hookdeck/outpost/internal/scheduler" + "go.uber.org/zap" ) -func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, deploymentID string, pollBackoff time.Duration, logger *logging.Logger) (scheduler.Scheduler, error) { +// RetryEventGetter is the interface for fetching events from logstore. +// This is defined separately from EventGetter in messagehandler.go to avoid circular dependencies. +type RetryEventGetter interface { + RetrieveEvent(ctx context.Context, request logstore.RetrieveEventRequest) (*models.Event, error) +} + +// RetrySchedulerOption is a functional option for configuring the retry scheduler. +type RetrySchedulerOption func(*retrySchedulerConfig) + +type retrySchedulerConfig struct { + visibilityTimeout uint +} + +// WithRetryVisibilityTimeout sets the visibility timeout for the retry scheduler queue. +// This controls how long a message is hidden after being received before it becomes +// visible again (for retry if the executor returned an error). +func WithRetryVisibilityTimeout(vt uint) RetrySchedulerOption { + return func(c *retrySchedulerConfig) { + c.visibilityTimeout = vt + } +} + +func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, deploymentID string, pollBackoff time.Duration, logger *logging.Logger, eventGetter RetryEventGetter, opts ...RetrySchedulerOption) (scheduler.Scheduler, error) { + // Apply options + cfg := &retrySchedulerConfig{} + for _, opt := range opts { + opt(cfg) + } // Create Redis client for RSMQ ctx := context.Background() redisClient, err := redis.New(ctx, redisConfig) @@ -46,13 +75,48 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d if err := retryTask.FromString(msg); err != nil { return err } - deliveryTask := retryTask.ToDeliveryTask() + + // Fetch full event data from logstore + event, err := eventGetter.RetrieveEvent(ctx, logstore.RetrieveEventRequest{ + TenantID: retryTask.TenantID, + EventID: retryTask.EventID, + }) + if err != nil { + // Transient error (DB connection issue, etc) - return error so scheduler retries + if logger != nil { + logger.Ctx(ctx).Error("failed to fetch event for retry", + zap.Error(err), + zap.String("event_id", retryTask.EventID), + zap.String("tenant_id", retryTask.TenantID), + zap.String("destination_id", retryTask.DestinationID)) + } + return err + } + if event == nil { + // Event not found - may be race condition with logmq batching delay. + // Return error so scheduler retries later. + if logger != nil { + logger.Ctx(ctx).Warn("event not found in logstore, will retry", + zap.String("event_id", retryTask.EventID), + zap.String("tenant_id", retryTask.TenantID), + zap.String("destination_id", retryTask.DestinationID)) + } + return fmt.Errorf("event not found in logstore") + } + + deliveryTask := retryTask.ToDeliveryTask(*event) if err := deliverymq.Publish(ctx, deliveryTask); err != nil { return err } return nil } + // Build scheduler options - pass visibility timeout if configured + if cfg.visibilityTimeout > 0 { + return scheduler.New("deliverymq-retry", rsmqClient, exec, + scheduler.WithPollBackoff(pollBackoff), + scheduler.WithVisibilityTimeout(cfg.visibilityTimeout)), nil + } return scheduler.New("deliverymq-retry", rsmqClient, exec, scheduler.WithPollBackoff(pollBackoff)), nil } @@ -78,11 +142,11 @@ func (m *RetryTask) FromString(str string) error { return json.Unmarshal([]byte(str), &m) } -func (m *RetryTask) ToDeliveryTask() models.DeliveryTask { +func (m *RetryTask) ToDeliveryTask(event models.Event) models.DeliveryTask { return models.DeliveryTask{ Attempt: m.Attempt, DestinationID: m.DestinationID, - Event: models.Event{ID: m.EventID, TenantID: m.TenantID}, + Event: event, Telemetry: m.Telemetry, } } diff --git a/internal/deliverymq/retry_test.go b/internal/deliverymq/retry_test.go index 1fd6b7c4..86257a96 100644 --- a/internal/deliverymq/retry_test.go +++ b/internal/deliverymq/retry_test.go @@ -25,7 +25,7 @@ type RetryDeliveryMQSuite struct { retryBackoff backoff.Backoff schedulerPollBackoff time.Duration publisher deliverymq.Publisher - eventGetter deliverymq.EventGetter + eventGetter deliverymq.RetryEventGetter logPublisher deliverymq.LogPublisher destGetter deliverymq.DestinationGetter alertMonitor deliverymq.AlertMonitor @@ -53,7 +53,7 @@ func (s *RetryDeliveryMQSuite) SetupTest(t *testing.T) { if pollBackoff == 0 { pollBackoff = 100 * time.Millisecond } - retryScheduler, err := deliverymq.NewRetryScheduler(s.deliveryMQ, testutil.CreateTestRedisConfig(t), "", pollBackoff, testutil.CreateTestLogger(t)) + retryScheduler, err := deliverymq.NewRetryScheduler(s.deliveryMQ, testutil.CreateTestRedisConfig(t), "", pollBackoff, testutil.CreateTestLogger(t), s.eventGetter) require.NoError(t, err) require.NoError(t, retryScheduler.Init(s.ctx)) go retryScheduler.Monitor(s.ctx) @@ -68,7 +68,6 @@ func (s *RetryDeliveryMQSuite) SetupTest(t *testing.T) { testutil.CreateTestLogger(t), s.logPublisher, s.destGetter, - s.eventGetter, s.publisher, testutil.NewMockEventTracer(nil), retryScheduler, @@ -352,3 +351,334 @@ func TestDeliveryMQRetry_RetryMaxCount(t *testing.T) { assert.Equal(t, 3, publisher.Current(), "should stop after max retries (1 initial + 2 retries = 3 total attempts)") } + +func TestRetryScheduler_EventNotFound(t *testing.T) { + // Test scenario: + // - Initial delivery fails and schedules a retry + // - Before retry executes, the event is deleted from logstore + // - Retry scheduler should skip publishing (not error) when event returns (nil, nil) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Setup test data + tenant := models.Tenant{ID: idgen.String()} + destination := testutil.DestinationFactory.Any( + testutil.DestinationFactory.WithType("webhook"), + testutil.DestinationFactory.WithTenantID(tenant.ID), + ) + event := testutil.EventFactory.Any( + testutil.EventFactory.WithTenantID(tenant.ID), + testutil.EventFactory.WithDestinationID(destination.ID), + testutil.EventFactory.WithEligibleForRetry(true), + ) + + // Setup mocks - publisher fails on first attempt + publisher := newMockPublisher([]error{ + &destregistry.ErrDestinationPublishAttempt{ + Err: errors.New("webhook returned 503"), + Provider: "webhook", + }, + }) + + // Event getter does NOT have the event registered + // This simulates event being deleted from logstore before retry + eventGetter := newMockEventGetter() + // Intentionally NOT calling: eventGetter.registerEvent(&event) + + suite := &RetryDeliveryMQSuite{ + ctx: ctx, + mqConfig: &mqs.QueueConfig{InMemory: &mqs.InMemoryConfig{Name: testutil.RandomString(5)}}, + publisher: publisher, + eventGetter: eventGetter, + logPublisher: newMockLogPublisher(nil), + destGetter: &mockDestinationGetter{dest: &destination}, + alertMonitor: newMockAlertMonitor(), + retryMaxCount: 10, + retryBackoff: &backoff.ConstantBackoff{Interval: 50 * time.Millisecond}, + schedulerPollBackoff: 10 * time.Millisecond, + } + suite.SetupTest(t) + defer suite.TeardownTest(t) + + // Publish task with full event data (simulates initial delivery) + task := models.DeliveryTask{ + Event: event, + DestinationID: destination.ID, + } + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) + + // Wait for initial delivery attempt and retry scheduling + require.Eventually(t, func() bool { + return publisher.Current() >= 1 + }, 2*time.Second, 10*time.Millisecond, "should complete initial delivery attempt") + + // Wait enough time for retry to be processed (if it were to happen) + // 50ms backoff + 10ms poll = 60ms minimum for retry + time.Sleep(200 * time.Millisecond) + + // Should only have 1 attempt - the retry was skipped because event not found + assert.Equal(t, 1, publisher.Current(), "should skip retry when event not found in logstore (returns nil, nil)") +} + +func TestRetryScheduler_EventFetchError(t *testing.T) { + // Test scenario: + // - Initial delivery fails and schedules a retry + // - When retry scheduler tries to fetch event, it gets a transient error + // - Retry scheduler should return error (which means message is not deleted) + // - The message stays in queue for retry after visibility timeout + // - Delivery should NOT proceed when event fetch fails + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Setup test data + tenant := models.Tenant{ID: idgen.String()} + destination := testutil.DestinationFactory.Any( + testutil.DestinationFactory.WithType("webhook"), + testutil.DestinationFactory.WithTenantID(tenant.ID), + ) + event := testutil.EventFactory.Any( + testutil.EventFactory.WithTenantID(tenant.ID), + testutil.EventFactory.WithDestinationID(destination.ID), + testutil.EventFactory.WithEligibleForRetry(true), + ) + + // Setup mocks - publisher fails on first attempt + publisher := newMockPublisher([]error{ + &destregistry.ErrDestinationPublishAttempt{ + Err: errors.New("webhook returned 503"), + Provider: "webhook", + }, + nil, // Second attempt would succeed if it were reached + }) + + // Event getter returns error (simulating transient DB error) + eventGetter := newMockEventGetter() + eventGetter.registerEvent(&event) + eventGetter.err = errors.New("database connection error") + + suite := &RetryDeliveryMQSuite{ + ctx: ctx, + mqConfig: &mqs.QueueConfig{InMemory: &mqs.InMemoryConfig{Name: testutil.RandomString(5)}}, + publisher: publisher, + eventGetter: eventGetter, + logPublisher: newMockLogPublisher(nil), + destGetter: &mockDestinationGetter{dest: &destination}, + alertMonitor: newMockAlertMonitor(), + retryMaxCount: 10, + retryBackoff: &backoff.ConstantBackoff{Interval: 50 * time.Millisecond}, + schedulerPollBackoff: 10 * time.Millisecond, + } + suite.SetupTest(t) + defer suite.TeardownTest(t) + + // Publish task with full event data (simulates initial delivery) + task := models.DeliveryTask{ + Event: event, + DestinationID: destination.ID, + } + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) + + // Wait for initial delivery attempt + require.Eventually(t, func() bool { + return publisher.Current() >= 1 + }, 2*time.Second, 10*time.Millisecond, "should complete initial delivery attempt") + + // Wait enough time for retry to be attempted (but it should fail with event fetch error) + // 50ms backoff + 10ms poll = 60ms minimum for retry attempt + time.Sleep(200 * time.Millisecond) + + // Delivery should still be at 1 because event fetch error prevented retry delivery + // Note: The retry message is NOT deleted, it will be retried after visibility timeout (30s) + assert.Equal(t, 1, publisher.Current(), "retry delivery should not proceed when event fetch fails") +} + +func TestRetryScheduler_EventFetchSuccess(t *testing.T) { + // Test scenario: + // - Initial delivery fails and schedules a retry + // - Retry scheduler successfully fetches event from logstore + // - DeliveryTask published to deliverymq should have full event data (non-zero Time) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Setup test data + tenant := models.Tenant{ID: idgen.String()} + destination := testutil.DestinationFactory.Any( + testutil.DestinationFactory.WithType("webhook"), + testutil.DestinationFactory.WithTenantID(tenant.ID), + ) + event := testutil.EventFactory.Any( + testutil.EventFactory.WithTenantID(tenant.ID), + testutil.EventFactory.WithDestinationID(destination.ID), + testutil.EventFactory.WithEligibleForRetry(true), + ) + + // Setup mocks - publisher fails on first attempt, succeeds on second + publisher := newMockPublisher([]error{ + &destregistry.ErrDestinationPublishAttempt{ + Err: errors.New("webhook returned 503"), + Provider: "webhook", + }, + nil, // Second attempt succeeds + }) + + // Event getter has the event registered + eventGetter := newMockEventGetter() + eventGetter.registerEvent(&event) + + logPublisher := newMockLogPublisher(nil) + + suite := &RetryDeliveryMQSuite{ + ctx: ctx, + mqConfig: &mqs.QueueConfig{InMemory: &mqs.InMemoryConfig{Name: testutil.RandomString(5)}}, + publisher: publisher, + eventGetter: eventGetter, + logPublisher: logPublisher, + destGetter: &mockDestinationGetter{dest: &destination}, + alertMonitor: newMockAlertMonitor(), + retryMaxCount: 10, + retryBackoff: &backoff.ConstantBackoff{Interval: 50 * time.Millisecond}, + schedulerPollBackoff: 10 * time.Millisecond, + } + suite.SetupTest(t) + defer suite.TeardownTest(t) + + // Publish task with full event data (simulates initial delivery) + task := models.DeliveryTask{ + Event: event, + DestinationID: destination.ID, + } + require.NoError(t, suite.deliveryMQ.Publish(ctx, task)) + + // Wait for both delivery attempts to complete + require.Eventually(t, func() bool { + return publisher.Current() >= 2 + }, 3*time.Second, 10*time.Millisecond, "should complete 2 delivery attempts") + + assert.Equal(t, 2, publisher.Current(), "should complete 2 delivery attempts (initial failure + successful retry)") + + // Verify that the retry delivery had full event data by checking log entries + require.Len(t, logPublisher.entries, 2, "should have 2 delivery log entries") + + // Both log entries should have non-zero event Time (full event data) + assert.False(t, logPublisher.entries[0].Event.Time.IsZero(), "first delivery should have full event data") + assert.False(t, logPublisher.entries[1].Event.Time.IsZero(), "retry delivery should have full event data (fetched from logstore)") +} + +// TestRetryScheduler_RaceCondition_EventNotYetPersisted verifies that retries are not +// lost when the retry scheduler queries logstore before the event has been persisted. +// +// Scenario: +// 1. Initial delivery fails, retry is scheduled +// 2. Retry scheduler runs and queries logstore for event data +// 3. Event is not yet persisted (logmq batching delay) +// 4. Retry should remain in queue and be reprocessed later +// 5. Once event is available, retry succeeds +func TestRetryScheduler_RaceCondition_EventNotYetPersisted(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup test data + tenant := models.Tenant{ID: idgen.String()} + destination := testutil.DestinationFactory.Any( + testutil.DestinationFactory.WithType("webhook"), + testutil.DestinationFactory.WithTenantID(tenant.ID), + ) + event := testutil.EventFactory.Any( + testutil.EventFactory.WithTenantID(tenant.ID), + testutil.EventFactory.WithDestinationID(destination.ID), + testutil.EventFactory.WithEligibleForRetry(true), + ) + + // Publisher: fails first attempt, succeeds after + publisher := newMockPublisher([]error{ + &destregistry.ErrDestinationPublishAttempt{ + Err: errors.New("webhook returned 503"), + Provider: "webhook", + }, + }) + logPublisher := newMockLogPublisher(nil) + destGetter := &mockDestinationGetter{dest: &destination} + alertMonitor := newMockAlertMonitor() + + // Event getter returns (nil, nil) on first call, then returns event + // This simulates: logmq hasn't persisted the event yet when retry first runs + eventGetter := newMockDelayedEventGetter(&event, 1) // Return nil for first call + + // Setup deliveryMQ + mqConfig := &mqs.QueueConfig{InMemory: &mqs.InMemoryConfig{Name: testutil.RandomString(5)}} + deliveryMQ := deliverymq.New(deliverymq.WithQueue(mqConfig)) + cleanup, err := deliveryMQ.Init(ctx) + require.NoError(t, err) + defer cleanup() + + // Setup retry scheduler with short visibility timeout for faster test + // When event is not found, the message will be retried after 1 second + retryScheduler, err := deliverymq.NewRetryScheduler( + deliveryMQ, + testutil.CreateTestRedisConfig(t), + "", + 10*time.Millisecond, // Fast polling + testutil.CreateTestLogger(t), + eventGetter, + deliverymq.WithRetryVisibilityTimeout(1), // 1 second visibility timeout + ) + require.NoError(t, err) + require.NoError(t, retryScheduler.Init(ctx)) + go retryScheduler.Monitor(ctx) + defer retryScheduler.Shutdown() + + // Setup message handler with short retry backoff + handler := deliverymq.NewMessageHandler( + testutil.CreateTestLogger(t), + logPublisher, + destGetter, + publisher, + testutil.NewMockEventTracer(nil), + retryScheduler, + &backoff.ConstantBackoff{Interval: 50 * time.Millisecond}, // Short backoff + 10, + alertMonitor, + idempotence.New(testutil.CreateTestRedisClient(t), idempotence.WithSuccessfulTTL(24*time.Hour)), + ) + + // Setup message consumer + mq := mqs.NewQueue(mqConfig) + subscription, err := mq.Subscribe(ctx) + require.NoError(t, err) + defer subscription.Shutdown(ctx) + + go func() { + for { + msg, err := subscription.Receive(ctx) + if err != nil { + return + } + handler.Handle(ctx, msg) + } + }() + + // Publish task with full event data (simulates initial delivery) + task := models.DeliveryTask{ + Event: event, + DestinationID: destination.ID, + } + require.NoError(t, deliveryMQ.Publish(ctx, task)) + + // Wait for initial delivery to fail and retry to be scheduled + require.Eventually(t, func() bool { + return publisher.Current() >= 1 + }, 2*time.Second, 10*time.Millisecond, "initial delivery should complete") + + // Wait for retry to be processed: + // - First retry attempt: event not found, message returns to queue + // - After 1s visibility timeout: message becomes visible again + // - Second retry attempt: event now available, delivery succeeds + time.Sleep(2 * time.Second) + + // Should have 2 publish attempts: initial failure + successful retry + assert.Equal(t, 2, publisher.Current(), + "expected 2 delivery attempts (initial + retry after event becomes available)") +} diff --git a/internal/idempotence/idempotence_test.go b/internal/idempotence/idempotence_test.go index 9bc81c05..269bcb36 100644 --- a/internal/idempotence/idempotence_test.go +++ b/internal/idempotence/idempotence_test.go @@ -450,17 +450,6 @@ func TestIntegrationIdempotence_WithConcurrentHandlerAndSuccess(t *testing.T) { go consumerFn("1") go consumerFn("2") - errs := []error{} - go func() { - for { - select { - case err := <-errchan: - errs = append(errs, err) - case <-ctx.Done(): - return - } - } - }() id := idgen.String() err = mq.Publish(ctx, &MockMsg{ID: id}) @@ -468,7 +457,16 @@ func TestIntegrationIdempotence_WithConcurrentHandlerAndSuccess(t *testing.T) { err = mq.Publish(ctx, &MockMsg{ID: id}) require.Nil(t, err) - <-ctx.Done() + // Collect exactly 2 errors (one per published message) + errs := make([]error, 0, 2) + for i := 0; i < 2; i++ { + select { + case err := <-errchan: + errs = append(errs, err) + case <-ctx.Done(): + require.Fail(t, "timeout waiting for consumer results") + } + } assert.Len(t, execTimestamps, 1) require.Len(t, errs, 2, "should have 2 errors") diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go index 7d3c08c6..12dc5d63 100644 --- a/internal/logmq/batchprocessor_test.go +++ b/internal/logmq/batchprocessor_test.go @@ -78,7 +78,7 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ ItemCountThreshold: 1, - DelayThreshold: 10 * time.Millisecond, + DelayThreshold: 1 * time.Second, }) require.NoError(t, err) defer bp.Shutdown() @@ -95,7 +95,7 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { require.NoError(t, err) // Wait for batch to process - time.Sleep(50 * time.Millisecond) + time.Sleep(200 * time.Millisecond) assert.True(t, mock.acked, "valid message should be acked") assert.False(t, mock.nacked, "valid message should not be nacked") @@ -112,7 +112,7 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ ItemCountThreshold: 1, - DelayThreshold: 10 * time.Millisecond, + DelayThreshold: 1 * time.Second, }) require.NoError(t, err) defer bp.Shutdown() @@ -128,7 +128,7 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { require.NoError(t, err) // Wait for batch to process - time.Sleep(50 * time.Millisecond) + time.Sleep(200 * time.Millisecond) assert.False(t, mock.acked, "invalid message should not be acked") assert.True(t, mock.nacked, "invalid message should be nacked") @@ -145,7 +145,7 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ ItemCountThreshold: 1, - DelayThreshold: 10 * time.Millisecond, + DelayThreshold: 1 * time.Second, }) require.NoError(t, err) defer bp.Shutdown() @@ -161,7 +161,7 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { require.NoError(t, err) // Wait for batch to process - time.Sleep(50 * time.Millisecond) + time.Sleep(200 * time.Millisecond) assert.False(t, mock.acked, "invalid message should not be acked") assert.True(t, mock.nacked, "invalid message should be nacked") @@ -178,7 +178,7 @@ func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ ItemCountThreshold: 3, // Wait for 3 messages before processing - DelayThreshold: 10 * time.Millisecond, + DelayThreshold: 1 * time.Second, }) require.NoError(t, err) defer bp.Shutdown() @@ -206,7 +206,7 @@ func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { require.NoError(t, bp.Add(ctx, msg3)) // Wait for batch to process - time.Sleep(50 * time.Millisecond) + time.Sleep(200 * time.Millisecond) // Valid messages should be acked assert.True(t, mock1.acked, "valid message 1 should be acked") @@ -233,7 +233,7 @@ func TestBatchProcessor_MalformedJSON(t *testing.T) { bp, err := logmq.NewBatchProcessor(ctx, logger, logStore, logmq.BatchProcessorConfig{ ItemCountThreshold: 1, - DelayThreshold: 10 * time.Millisecond, + DelayThreshold: 1 * time.Second, }) require.NoError(t, err) defer bp.Shutdown() @@ -243,7 +243,7 @@ func TestBatchProcessor_MalformedJSON(t *testing.T) { require.NoError(t, err) // Wait for batch to process - time.Sleep(50 * time.Millisecond) + time.Sleep(200 * time.Millisecond) assert.False(t, mock.acked, "malformed message should not be acked") assert.True(t, mock.nacked, "malformed message should be nacked") diff --git a/internal/services/builder.go b/internal/services/builder.go index ce01dae3..3a67eb82 100644 --- a/internal/services/builder.go +++ b/internal/services/builder.go @@ -308,7 +308,6 @@ func (b *ServiceBuilder) BuildDeliveryWorker(baseRouter *gin.Engine) error { b.logger, svc.logMQ, svc.entityStore, - svc.logStore, svc.destRegistry, svc.eventTracer, svc.retryScheduler, @@ -555,9 +554,16 @@ func (s *serviceInstance) initRetryScheduler(ctx context.Context, cfg *config.Co if s.deliveryMQ == nil { return fmt.Errorf("delivery MQ must be initialized before retry scheduler") } + if s.logStore == nil { + return fmt.Errorf("log store must be initialized before retry scheduler") + } logger.Debug("creating delivery MQ retry scheduler", zap.String("service", s.name)) pollBackoff := time.Duration(cfg.RetryPollBackoffMs) * time.Millisecond - retryScheduler, err := deliverymq.NewRetryScheduler(s.deliveryMQ, cfg.Redis.ToConfig(), cfg.DeploymentID, pollBackoff, logger) + var retrySchedulerOpts []deliverymq.RetrySchedulerOption + if cfg.RetryVisibilityTimeoutSeconds > 0 { + retrySchedulerOpts = append(retrySchedulerOpts, deliverymq.WithRetryVisibilityTimeout(uint(cfg.RetryVisibilityTimeoutSeconds))) + } + retryScheduler, err := deliverymq.NewRetryScheduler(s.deliveryMQ, cfg.Redis.ToConfig(), cfg.DeploymentID, pollBackoff, logger, s.logStore, retrySchedulerOpts...) if err != nil { logger.Error("failed to create delivery MQ retry scheduler", zap.String("service", s.name), zap.Error(err)) return err From 63c7c067872173801c4720659c72c20c19c5aea9 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 01:28:52 +0700 Subject: [PATCH 12/23] test: redis testcontainer flakiness --- go.mod | 1 - go.sum | 4 ---- internal/util/testinfra/redis.go | 33 +++++++++++++++++++++++--------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index c2de85c1..48572390 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,6 @@ require ( github.com/testcontainers/testcontainers-go/modules/localstack v0.36.0 github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0 github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0 - github.com/testcontainers/testcontainers-go/modules/redis v0.36.0 github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.1 github.com/urfave/cli/v3 v3.4.1 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.53.0 diff --git a/go.sum b/go.sum index 6c635117..af9e1f8a 100644 --- a/go.sum +++ b/go.sum @@ -906,8 +906,6 @@ github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4 github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -1288,8 +1286,6 @@ github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0 h1:xTGNNsOD github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0/go.mod h1:WKS3MGq1lzbVibIRnL08TOaf5bKWPxJe5frzyQfV4oY= github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0 h1:gobSVNvTsiJTcGTlVJMpeUfAcz85HAMMwo8xEVQZItE= github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0/go.mod h1:rLtFlrLEWcU/Ud52FiGk57QvUqoAHvR380hZo+tkBaI= -github.com/testcontainers/testcontainers-go/modules/redis v0.36.0 h1:Z+6APQ0DjQP8Kj5Fu+lkAlH2v7f5QkAQyyjnf1Kq8sw= -github.com/testcontainers/testcontainers-go/modules/redis v0.36.0/go.mod h1:LV66RJhSMikZrxJRc6O0nKcRqykmjQSyX82S93haE2w= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= diff --git a/internal/util/testinfra/redis.go b/internal/util/testinfra/redis.go index 0ad9933a..ea5094bc 100644 --- a/internal/util/testinfra/redis.go +++ b/internal/util/testinfra/redis.go @@ -11,7 +11,6 @@ import ( "github.com/hookdeck/outpost/internal/redis" "github.com/testcontainers/testcontainers-go" - rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis" "github.com/testcontainers/testcontainers-go/wait" ) @@ -54,14 +53,23 @@ func NewRedisConfig(t *testing.T) *redis.RedisConfig { func NewRedisStackConfig(t *testing.T) *redis.RedisConfig { ctx := context.Background() - container, err := rediscontainer.Run(ctx, - "redis/redis-stack-server:latest", - ) + // Use generic container with explicit port exposure and wait strategy to avoid + // race conditions when multiple tests spin up containers in parallel. + req := testcontainers.ContainerRequest{ + Image: "redis/redis-stack-server:latest", + ExposedPorts: []string{"6379/tcp"}, + WaitingFor: wait.ForListeningPort("6379/tcp"), + } + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) if err != nil { t.Fatalf("failed to start redis-stack container: %v", err) } - endpoint, err := container.PortEndpoint(ctx, "6379/tcp", "") + endpoint, err := container.Endpoint(ctx, "") if err != nil { t.Fatalf("failed to get redis-stack endpoint: %v", err) } @@ -227,14 +235,21 @@ func EnsureDragonfly() string { func startRedisTestContainer(cfg *Config) { ctx := context.Background() - redisContainer, err := rediscontainer.Run(ctx, - "redis/redis-stack-server:latest", - ) + req := testcontainers.ContainerRequest{ + Image: "redis/redis-stack-server:latest", + ExposedPorts: []string{"6379/tcp"}, + WaitingFor: wait.ForListeningPort("6379/tcp"), + } + + redisContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) if err != nil { panic(err) } - endpoint, err := redisContainer.PortEndpoint(ctx, "6379/tcp", "") + endpoint, err := redisContainer.Endpoint(ctx, "") if err != nil { panic(err) } From 622f3a7ae5938864b884dc9de46f66a185151f2f Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 02:15:09 +0700 Subject: [PATCH 13/23] refactor: rename Delivery to Attempt in core + API layer Co-Authored-By: Claude Opus 4.5 --- .outpost.yaml.dev | 1 - internal/apirouter/log_handlers.go | 118 +++++----- internal/apirouter/retry_handlers.go | 30 +-- internal/apirouter/router.go | 18 +- internal/app/app.go | 4 +- internal/config/config.go | 10 +- internal/config/id_gen.go | 2 +- internal/deliverymq/messagehandler.go | 106 ++++----- internal/destregistry/registry.go | 38 +-- internal/emetrics/emetrics.go | 4 +- internal/idgen/idgen.go | 16 +- internal/logmq/batchprocessor.go | 10 +- internal/logstore/chlogstore/chlogstore.go | 194 +++++++-------- internal/logstore/driver/driver.go | 26 +-- internal/logstore/drivertest/crud.go | 88 +++---- internal/logstore/logstore.go | 12 +- internal/logstore/memlogstore/memlogstore.go | 160 ++++++------- internal/logstore/pglogstore/pglogstore.go | 234 +++++++++---------- internal/models/event.go | 14 +- internal/util/testutil/event.go | 70 +++--- 20 files changed, 577 insertions(+), 578 deletions(-) diff --git a/.outpost.yaml.dev b/.outpost.yaml.dev index 9e30a388..b0378910 100644 --- a/.outpost.yaml.dev +++ b/.outpost.yaml.dev @@ -73,7 +73,6 @@ idgen: event_prefix: "evt" destination_prefix: "des" delivery_prefix: "dlv" -" # Concurrency publish_max_concurrency: 1 diff --git a/internal/apirouter/log_handlers.go b/internal/apirouter/log_handlers.go index 478a4460..a50a16fc 100644 --- a/internal/apirouter/log_handlers.go +++ b/internal/apirouter/log_handlers.go @@ -86,8 +86,8 @@ func parseIncludeOptions(c *gin.Context) IncludeOptions { // API Response types -// APIDelivery is the API response for a delivery -type APIDelivery struct { +// APIAttempt is the API response for an attempt +type APIAttempt struct { ID string `json:"id"` Status string `json:"status"` DeliveredAt time.Time `json:"delivered_at"` @@ -130,9 +130,9 @@ type APIEvent struct { Data map[string]interface{} `json:"data,omitempty"` } -// DeliveryPaginatedResult is the paginated response for listing deliveries. -type DeliveryPaginatedResult struct { - Models []APIDelivery `json:"models"` +// AttemptPaginatedResult is the paginated response for listing attempts. +type AttemptPaginatedResult struct { + Models []APIAttempt `json:"models"` Pagination SeekPagination `json:"pagination"` } @@ -142,47 +142,47 @@ type EventPaginatedResult struct { Pagination SeekPagination `json:"pagination"` } -// toAPIDelivery converts a DeliveryRecord to APIDelivery with expand options -func toAPIDelivery(dr *logstore.DeliveryRecord, opts IncludeOptions) APIDelivery { - api := APIDelivery{ - Attempt: dr.Delivery.Attempt, - Manual: dr.Delivery.Manual, - Destination: dr.Delivery.DestinationID, +// toAPIAttempt converts an AttemptRecord to APIAttempt with expand options +func toAPIAttempt(ar *logstore.AttemptRecord, opts IncludeOptions) APIAttempt { + api := APIAttempt{ + Attempt: ar.Attempt.AttemptNumber, + Manual: ar.Attempt.Manual, + Destination: ar.Attempt.DestinationID, } - if dr.Delivery != nil { - api.ID = dr.Delivery.ID - api.Status = dr.Delivery.Status - api.DeliveredAt = dr.Delivery.Time - api.Code = dr.Delivery.Code + if ar.Attempt != nil { + api.ID = ar.Attempt.ID + api.Status = ar.Attempt.Status + api.DeliveredAt = ar.Attempt.Time + api.Code = ar.Attempt.Code if opts.ResponseData { - api.ResponseData = dr.Delivery.ResponseData + api.ResponseData = ar.Attempt.ResponseData } } - if dr.Event != nil { + if ar.Event != nil { if opts.EventData { api.Event = APIEventFull{ - ID: dr.Event.ID, - Topic: dr.Event.Topic, - Time: dr.Event.Time, - EligibleForRetry: dr.Event.EligibleForRetry, - Metadata: dr.Event.Metadata, - Data: dr.Event.Data, + ID: ar.Event.ID, + Topic: ar.Event.Topic, + Time: ar.Event.Time, + EligibleForRetry: ar.Event.EligibleForRetry, + Metadata: ar.Event.Metadata, + Data: ar.Event.Data, } } else if opts.Event { api.Event = APIEventSummary{ - ID: dr.Event.ID, - Topic: dr.Event.Topic, - Time: dr.Event.Time, - EligibleForRetry: dr.Event.EligibleForRetry, - Metadata: dr.Event.Metadata, + ID: ar.Event.ID, + Topic: ar.Event.Topic, + Time: ar.Event.Time, + EligibleForRetry: ar.Event.EligibleForRetry, + Metadata: ar.Event.Metadata, } } else { - api.Event = dr.Event.ID + api.Event = ar.Event.ID } } else { - api.Event = dr.Delivery.EventID + api.Event = ar.Attempt.EventID } // TODO: Handle destination expansion @@ -193,17 +193,17 @@ func toAPIDelivery(dr *logstore.DeliveryRecord, opts IncludeOptions) APIDelivery return api } -// ListDeliveries handles GET /:tenantID/deliveries +// ListAttempts handles GET /:tenantID/attempts // Query params: event_id, destination_id, status, topic[], start, end, limit, next, prev, expand[], sort_order -func (h *LogHandlers) ListDeliveries(c *gin.Context) { +func (h *LogHandlers) ListAttempts(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - h.listDeliveriesInternal(c, tenant.ID) + h.listAttemptsInternal(c, tenant.ID) } -func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { +func (h *LogHandlers) listAttemptsInternal(c *gin.Context, tenantID string) { // Parse and validate cursors (next/prev are mutually exclusive) cursors, errResp := ParseCursors(c) if errResp != nil { @@ -234,7 +234,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { _ = orderBy // Parse time date filters - deliveryTimeFilter, errResp := ParseDateFilter(c, "time") + attemptTimeFilter, errResp := ParseDateFilter(c, "time") if errResp != nil { AbortWithError(c, errResp.Code, *errResp) return @@ -247,17 +247,17 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { destinationIDs = []string{destID} } - req := logstore.ListDeliveryRequest{ + req := logstore.ListAttemptRequest{ TenantID: tenantID, EventID: c.Query("event_id"), DestinationIDs: destinationIDs, Status: c.Query("status"), Topics: parseQueryArray(c, "topic"), TimeFilter: logstore.TimeFilter{ - GTE: deliveryTimeFilter.GTE, - LTE: deliveryTimeFilter.LTE, - GT: deliveryTimeFilter.GT, - LT: deliveryTimeFilter.LT, + GTE: attemptTimeFilter.GTE, + LTE: attemptTimeFilter.LTE, + GT: attemptTimeFilter.GT, + LT: attemptTimeFilter.LT, }, Limit: limit, Next: cursors.Next, @@ -265,7 +265,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { SortOrder: dir, } - response, err := h.logStore.ListDelivery(c.Request.Context(), req) + response, err := h.logStore.ListAttempt(c.Request.Context(), req) if err != nil { if errors.Is(err, cursor.ErrInvalidCursor) || errors.Is(err, cursor.ErrVersionMismatch) { AbortWithError(c, http.StatusBadRequest, NewErrBadRequest(err)) @@ -277,13 +277,13 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { includeOpts := parseIncludeOptions(c) - apiDeliveries := make([]APIDelivery, len(response.Data)) - for i, de := range response.Data { - apiDeliveries[i] = toAPIDelivery(de, includeOpts) + apiAttempts := make([]APIAttempt, len(response.Data)) + for i, ar := range response.Data { + apiAttempts[i] = toAPIAttempt(ar, includeOpts) } - c.JSON(http.StatusOK, DeliveryPaginatedResult{ - Models: apiDeliveries, + c.JSON(http.StatusOK, AttemptPaginatedResult{ + Models: apiAttempts, Pagination: SeekPagination{ OrderBy: orderBy, Dir: dir, @@ -323,30 +323,30 @@ func (h *LogHandlers) RetrieveEvent(c *gin.Context) { }) } -// RetrieveDelivery handles GET /:tenantID/deliveries/:deliveryID -func (h *LogHandlers) RetrieveDelivery(c *gin.Context) { +// RetrieveAttempt handles GET /:tenantID/attempts/:attemptID +func (h *LogHandlers) RetrieveAttempt(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - deliveryID := c.Param("deliveryID") + attemptID := c.Param("attemptID") - deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ - TenantID: tenant.ID, - DeliveryID: deliveryID, + attemptRecord, err := h.logStore.RetrieveAttempt(c.Request.Context(), logstore.RetrieveAttemptRequest{ + TenantID: tenant.ID, + AttemptID: attemptID, }) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryRecord == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) + if attemptRecord == nil { + AbortWithError(c, http.StatusNotFound, NewErrNotFound("attempt")) return } includeOpts := parseIncludeOptions(c) - c.JSON(http.StatusOK, toAPIDelivery(deliveryRecord, includeOpts)) + c.JSON(http.StatusOK, toAPIAttempt(attemptRecord, includeOpts)) } // AdminListEvents handles GET /events (admin-only, cross-tenant) @@ -355,10 +355,10 @@ func (h *LogHandlers) AdminListEvents(c *gin.Context) { h.listEventsInternal(c, c.Query("tenant_id")) } -// AdminListDeliveries handles GET /deliveries (admin-only, cross-tenant) +// AdminListAttempts handles GET /attempts (admin-only, cross-tenant) // Query params: tenant_id (optional), event_id, destination_id, status, topic[], start, end, limit, next, prev, expand[], sort_order -func (h *LogHandlers) AdminListDeliveries(c *gin.Context) { - h.listDeliveriesInternal(c, c.Query("tenant_id")) +func (h *LogHandlers) AdminListAttempts(c *gin.Context) { + h.listAttemptsInternal(c, c.Query("tenant_id")) } // ListEvents handles GET /:tenantID/events diff --git a/internal/apirouter/retry_handlers.go b/internal/apirouter/retry_handlers.go index ec2844c6..f52f99d3 100644 --- a/internal/apirouter/retry_handlers.go +++ b/internal/apirouter/retry_handlers.go @@ -32,33 +32,33 @@ func NewRetryHandlers( } } -// RetryDelivery handles POST /:tenantID/deliveries/:deliveryID/retry +// RetryAttempt handles POST /:tenantID/attempts/:attemptID/retry // Constraints: -// - Only the latest delivery for an event+destination pair can be retried +// - Only the latest attempt for an event+destination pair can be retried // - Destination must exist and be enabled -func (h *RetryHandlers) RetryDelivery(c *gin.Context) { +func (h *RetryHandlers) RetryAttempt(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - deliveryID := c.Param("deliveryID") + attemptID := c.Param("attemptID") - // 1. Look up delivery by ID - deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ - TenantID: tenant.ID, - DeliveryID: deliveryID, + // 1. Look up attempt by ID + attemptRecord, err := h.logStore.RetrieveAttempt(c.Request.Context(), logstore.RetrieveAttemptRequest{ + TenantID: tenant.ID, + AttemptID: attemptID, }) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryRecord == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) + if attemptRecord == nil { + AbortWithError(c, http.StatusNotFound, NewErrNotFound("attempt")) return } // 2. Check destination exists and is enabled - destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, deliveryRecord.Delivery.DestinationID) + destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, attemptRecord.Attempt.DestinationID) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return @@ -79,7 +79,7 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { } // 3. Create and publish manual delivery task - task := models.NewManualDeliveryTask(*deliveryRecord.Event, deliveryRecord.Delivery.DestinationID) + task := models.NewManualDeliveryTask(*attemptRecord.Event, attemptRecord.Attempt.DestinationID) if err := h.deliveryMQ.Publish(c.Request.Context(), task); err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) @@ -87,10 +87,10 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { } h.logger.Ctx(c.Request.Context()).Audit("manual retry initiated", - zap.String("delivery_id", deliveryID), - zap.String("event_id", deliveryRecord.Event.ID), + zap.String("attempt_id", attemptID), + zap.String("event_id", attemptRecord.Event.ID), zap.String("tenant_id", tenant.ID), - zap.String("destination_id", deliveryRecord.Delivery.DestinationID), + zap.String("destination_id", attemptRecord.Attempt.DestinationID), zap.String("destination_type", destination.Type)) c.JSON(http.StatusAccepted, gin.H{ diff --git a/internal/apirouter/router.go b/internal/apirouter/router.go index 8d42539c..a3fad430 100644 --- a/internal/apirouter/router.go +++ b/internal/apirouter/router.go @@ -170,8 +170,8 @@ func NewRouter( }, { Method: http.MethodGet, - Path: "/deliveries", - Handler: logHandlers.AdminListDeliveries, + Path: "/attempts", + Handler: logHandlers.AdminListAttempts, AuthScope: AuthScopeAdmin, Mode: RouteModeAlways, }, @@ -353,11 +353,11 @@ func NewRouter( }, }, - // Delivery routes + // Attempt routes { Method: http.MethodGet, - Path: "/:tenantID/deliveries", - Handler: logHandlers.ListDeliveries, + Path: "/:tenantID/attempts", + Handler: logHandlers.ListAttempts, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ @@ -366,8 +366,8 @@ func NewRouter( }, { Method: http.MethodGet, - Path: "/:tenantID/deliveries/:deliveryID", - Handler: logHandlers.RetrieveDelivery, + Path: "/:tenantID/attempts/:attemptID", + Handler: logHandlers.RetrieveAttempt, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ @@ -376,8 +376,8 @@ func NewRouter( }, { Method: http.MethodPost, - Path: "/:tenantID/deliveries/:deliveryID/retry", - Handler: retryHandlers.RetryDelivery, + Path: "/:tenantID/attempts/:attemptID/retry", + Handler: retryHandlers.RetryAttempt, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ diff --git a/internal/app/app.go b/internal/app/app.go index 3dd86e61..fcb55a8a 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -175,13 +175,13 @@ func (a *App) configureIDGenerators() error { zap.String("type", a.config.IDGen.Type), zap.String("event_prefix", a.config.IDGen.EventPrefix), zap.String("destination_prefix", a.config.IDGen.DestinationPrefix), - zap.String("delivery_prefix", a.config.IDGen.DeliveryPrefix)) + zap.String("attempt_prefix", a.config.IDGen.AttemptPrefix)) if err := idgen.Configure(idgen.IDGenConfig{ Type: a.config.IDGen.Type, EventPrefix: a.config.IDGen.EventPrefix, DestinationPrefix: a.config.IDGen.DestinationPrefix, - DeliveryPrefix: a.config.IDGen.DeliveryPrefix, + AttemptPrefix: a.config.IDGen.AttemptPrefix, }); err != nil { a.logger.Error("failed to configure ID generators", zap.Error(err)) return err diff --git a/internal/config/config.go b/internal/config/config.go index 035c20c4..35fed1d5 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -76,11 +76,11 @@ type Config struct { LogMaxConcurrency int `yaml:"log_max_concurrency" env:"LOG_MAX_CONCURRENCY" desc:"Maximum number of log writing operations to process concurrently." required:"N"` // Delivery Retry - RetrySchedule []int `yaml:"retry_schedule" env:"RETRY_SCHEDULE" envSeparator:"," desc:"Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h." required:"N"` - RetryIntervalSeconds int `yaml:"retry_interval_seconds" env:"RETRY_INTERVAL_SECONDS" desc:"Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided." required:"N"` - RetryMaxLimit int `yaml:"retry_max_limit" env:"MAX_RETRY_LIMIT" desc:"Maximum number of retry attempts for a single event delivery before giving up. Ignored if retry_schedule is provided." required:"N"` - RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` - RetryVisibilityTimeoutSeconds int `yaml:"retry_visibility_timeout_seconds" env:"RETRY_VISIBILITY_TIMEOUT_SECONDS" desc:"Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30" required:"N"` + RetrySchedule []int `yaml:"retry_schedule" env:"RETRY_SCHEDULE" envSeparator:"," desc:"Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h." required:"N"` + RetryIntervalSeconds int `yaml:"retry_interval_seconds" env:"RETRY_INTERVAL_SECONDS" desc:"Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided." required:"N"` + RetryMaxLimit int `yaml:"retry_max_limit" env:"MAX_RETRY_LIMIT" desc:"Maximum number of retry attempts for a single event delivery before giving up. Ignored if retry_schedule is provided." required:"N"` + RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` + RetryVisibilityTimeoutSeconds int `yaml:"retry_visibility_timeout_seconds" env:"RETRY_VISIBILITY_TIMEOUT_SECONDS" desc:"Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30" required:"N"` // Event Delivery MaxDestinationsPerTenant int `yaml:"max_destinations_per_tenant" env:"MAX_DESTINATIONS_PER_TENANT" desc:"Maximum number of destinations allowed per tenant/organization." required:"N"` diff --git a/internal/config/id_gen.go b/internal/config/id_gen.go index ec556b75..dd10b5aa 100644 --- a/internal/config/id_gen.go +++ b/internal/config/id_gen.go @@ -5,5 +5,5 @@ type IDGenConfig struct { Type string `yaml:"type" env:"IDGEN_TYPE" desc:"ID generation type for all entities: uuidv4, uuidv7, nanoid. Default: uuidv4" required:"N"` EventPrefix string `yaml:"event_prefix" env:"IDGEN_EVENT_PREFIX" desc:"Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix)" required:"N"` DestinationPrefix string `yaml:"destination_prefix" env:"IDGEN_DESTINATION_PREFIX" desc:"Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix)" required:"N"` - DeliveryPrefix string `yaml:"delivery_prefix" env:"IDGEN_DELIVERY_PREFIX" desc:"Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix)" required:"N"` + AttemptPrefix string `yaml:"attempt_prefix" env:"IDGEN_ATTEMPT_PREFIX" desc:"Prefix for attempt IDs, prepended with underscore (e.g., 'att_123'). Default: empty (no prefix)" required:"N"` } diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index f005e047..98056dbf 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -40,15 +40,15 @@ func (e *PreDeliveryError) Unwrap() error { return e.err } -type DeliveryError struct { +type AttemptError struct { err error } -func (e *DeliveryError) Error() string { - return fmt.Sprintf("delivery error: %v", e.err) +func (e *AttemptError) Error() string { + return fmt.Sprintf("attempt error: %v", e.err) } -func (e *DeliveryError) Unwrap() error { +func (e *AttemptError) Unwrap() error { return e.err } @@ -78,7 +78,7 @@ type messageHandler struct { } type Publisher interface { - PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) + PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) } type LogPublisher interface { @@ -177,29 +177,29 @@ func (h *messageHandler) doHandle(ctx context.Context, task models.DeliveryTask, _, span := h.eventTracer.Deliver(ctx, &task, destination) defer span.End() - delivery, err := h.publisher.PublishEvent(ctx, destination, &task.Event) + attempt, err := h.publisher.PublishEvent(ctx, destination, &task.Event) if err != nil { - // If delivery is nil, it means no delivery was made. + // If attempt is nil, it means no attempt was made. // This is an unexpected error and considered a pre-delivery error. - if delivery == nil { + if attempt == nil { return &PreDeliveryError{err: err} } h.logger.Ctx(ctx).Error("failed to publish event", zap.Error(err), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) - deliveryErr := &DeliveryError{err: err} + attemptErr := &AttemptError{err: err} if h.shouldScheduleRetry(task, err) { if retryErr := h.scheduleRetry(ctx, task); retryErr != nil { - return h.logDeliveryResult(ctx, &task, destination, delivery, errors.Join(err, retryErr)) + return h.logDeliveryResult(ctx, &task, destination, attempt, errors.Join(err, retryErr)) } } - return h.logDeliveryResult(ctx, &task, destination, delivery, deliveryErr) + return h.logDeliveryResult(ctx, &task, destination, attempt, attemptErr) } // Handle successful delivery @@ -208,52 +208,52 @@ func (h *messageHandler) doHandle(ctx context.Context, task models.DeliveryTask, if err := h.retryScheduler.Cancel(ctx, models.RetryID(task.Event.ID, task.DestinationID)); err != nil { h.logger.Ctx(ctx).Error("failed to cancel scheduled retry", zap.Error(err), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) - return h.logDeliveryResult(ctx, &task, destination, delivery, err) + return h.logDeliveryResult(ctx, &task, destination, attempt, err) } logger.Audit("scheduled retry canceled", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) } - return h.logDeliveryResult(ctx, &task, destination, delivery, nil) + return h.logDeliveryResult(ctx, &task, destination, attempt, nil) } -func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) error { +func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, attempt *models.Attempt, err error) error { logger := h.logger.Ctx(ctx) - // Set delivery fields from task - delivery.TenantID = task.Event.TenantID - delivery.Attempt = task.Attempt - delivery.Manual = task.Manual + // Set attempt fields from task + attempt.TenantID = task.Event.TenantID + attempt.AttemptNumber = task.Attempt + attempt.Manual = task.Manual logger.Audit("event delivered", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), - zap.String("delivery_status", delivery.Status), + zap.String("attempt_status", attempt.Status), zap.Int("attempt", task.Attempt), zap.Bool("manual", task.Manual)) - // Publish delivery log + // Publish attempt log logEntry := models.LogEntry{ - Event: &task.Event, - Delivery: delivery, + Event: &task.Event, + Attempt: attempt, } if logErr := h.logMQ.Publish(ctx, logEntry); logErr != nil { - logger.Error("failed to publish delivery log", + logger.Error("failed to publish attempt log", zap.Error(logErr), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), @@ -265,11 +265,11 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.Del } // Call alert monitor in goroutine - go h.handleAlertAttempt(ctx, task, destination, delivery, err) + go h.handleAlertAttempt(ctx, task, destination, attempt, err) - // If we have a DeliveryError, return it as is - var delErr *DeliveryError - if errors.As(err, &delErr) { + // If we have an AttemptError, return it as is + var attErr *AttemptError + if errors.As(err, &attErr) { return err } @@ -287,9 +287,9 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.Del return nil } -func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) { - attempt := alert.DeliveryAttempt{ - Success: delivery.Status == models.DeliveryStatusSuccess, +func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, attemptResult *models.Attempt, err error) { + alertAttempt := alert.DeliveryAttempt{ + Success: attemptResult.Status == models.AttemptStatusSuccess, DeliveryTask: task, Destination: &alert.AlertDestination{ ID: destination.ID, @@ -300,33 +300,33 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.De CreatedAt: destination.CreatedAt, DisabledAt: destination.DisabledAt, }, - Timestamp: delivery.Time, + Timestamp: attemptResult.Time, } - if !attempt.Success && err != nil { + if !alertAttempt.Success && err != nil { // Extract attempt data if available - var delErr *DeliveryError - if errors.As(err, &delErr) { + var attErr *AttemptError + if errors.As(err, &attErr) { var pubErr *destregistry.ErrDestinationPublishAttempt - if errors.As(delErr.err, &pubErr) { - attempt.DeliveryResponse = pubErr.Data + if errors.As(attErr.err, &pubErr) { + alertAttempt.DeliveryResponse = pubErr.Data } else { - attempt.DeliveryResponse = map[string]interface{}{ - "error": delErr.err.Error(), + alertAttempt.DeliveryResponse = map[string]interface{}{ + "error": attErr.err.Error(), } } } else { - attempt.DeliveryResponse = map[string]interface{}{ + alertAttempt.DeliveryResponse = map[string]interface{}{ "error": "unexpected", "message": err.Error(), } } } - if monitorErr := h.alertMonitor.HandleAttempt(ctx, attempt); monitorErr != nil { + if monitorErr := h.alertMonitor.HandleAttempt(ctx, alertAttempt); monitorErr != nil { h.logger.Ctx(ctx).Error("failed to handle alert attempt", zap.Error(monitorErr), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attemptResult.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), @@ -335,7 +335,7 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.De } h.logger.Ctx(ctx).Info("alert attempt handled", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attemptResult.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), @@ -372,18 +372,18 @@ func (h *messageHandler) shouldNackError(err error) bool { } // Handle delivery errors - var delErr *DeliveryError - if errors.As(err, &delErr) { - return h.shouldNackDeliveryError(delErr.err) + var attErr *AttemptError + if errors.As(err, &attErr) { + return h.shouldNackDeliveryError(attErr.err) } // Handle post-delivery errors var postErr *PostDeliveryError if errors.As(err, &postErr) { // Check if this wraps a delivery error - var delErr *DeliveryError - if errors.As(postErr.err, &delErr) { - return h.shouldNackDeliveryError(delErr.err) + var attErr2 *AttemptError + if errors.As(postErr.err, &attErr2) { + return h.shouldNackDeliveryError(attErr2.err) } return true // Nack other post-delivery errors } diff --git a/internal/destregistry/registry.go b/internal/destregistry/registry.go index 8b670da0..b11763f4 100644 --- a/internal/destregistry/registry.go +++ b/internal/destregistry/registry.go @@ -25,7 +25,7 @@ type PreprocessDestinationOpts struct { type Registry interface { // Operations ValidateDestination(ctx context.Context, destination *models.Destination) error - PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) + PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) DisplayDestination(destination *models.Destination) (*DestinationDisplay, error) PreprocessDestination(newDestination *models.Destination, originalDestination *models.Destination, opts *PreprocessDestinationOpts) error @@ -135,14 +135,14 @@ func (r *registry) ValidateDestination(ctx context.Context, destination *models. return nil } -func (r *registry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (r *registry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { publisher, err := r.ResolvePublisher(ctx, destination) if err != nil { return nil, err } - delivery := &models.Delivery{ - ID: idgen.Delivery(), + attempt := &models.Attempt{ + ID: idgen.Attempt(), DestinationID: destination.ID, EventID: event.ID, } @@ -153,7 +153,7 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina deliveryData, err := publisher.Publish(timeoutCtx, event) if err != nil { - // Context canceled = system shutdown, return nil delivery to trigger nack → requeue. + // Context canceled = system shutdown, return nil attempt to trigger nack → requeue. // This is handled centrally so individual publishers don't need to check for it. // See: https://github.com/hookdeck/outpost/issues/571 if errors.Is(err, context.Canceled) { @@ -161,18 +161,18 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } if deliveryData != nil { - delivery.Time = time.Now() - delivery.Status = deliveryData.Status - delivery.Code = deliveryData.Code - delivery.ResponseData = deliveryData.Response + attempt.Time = time.Now() + attempt.Status = deliveryData.Status + attempt.Code = deliveryData.Code + attempt.ResponseData = deliveryData.Response } else { - delivery = nil + attempt = nil } var publishErr *ErrDestinationPublishAttempt if errors.As(err, &publishErr) { // Check if the wrapped error is a timeout if errors.Is(publishErr.Err, context.DeadlineExceeded) { - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: publishErr.Err, Provider: destination.Type, Data: map[string]interface{}{ @@ -181,11 +181,11 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina }, } } - return delivery, publishErr + return attempt, publishErr } if errors.Is(err, context.DeadlineExceeded) { - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: err, Provider: destination.Type, Data: map[string]interface{}{ @@ -195,7 +195,7 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } } - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: err, Provider: destination.Type, Data: map[string]interface{}{ @@ -217,12 +217,12 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } } - delivery.Time = time.Now() - delivery.Status = deliveryData.Status - delivery.Code = deliveryData.Code - delivery.ResponseData = deliveryData.Response + attempt.Time = time.Now() + attempt.Status = deliveryData.Status + attempt.Code = deliveryData.Code + attempt.ResponseData = deliveryData.Response - return delivery, nil + return attempt, nil } func (r *registry) RegisterProvider(destinationType string, provider Provider) error { diff --git a/internal/emetrics/emetrics.go b/internal/emetrics/emetrics.go index 26c0e3cb..5b2f3223 100644 --- a/internal/emetrics/emetrics.go +++ b/internal/emetrics/emetrics.go @@ -102,9 +102,9 @@ func (e *emetricsImpl) DeliveryLatency(ctx context.Context, latency time.Duratio func (e *emetricsImpl) EventDelivered(ctx context.Context, ok bool, destinationType string) { var status string if ok { - status = models.DeliveryStatusSuccess + status = models.AttemptStatusSuccess } else { - status = models.DeliveryStatusFailed + status = models.AttemptStatusFailed } e.eventDeliveredCounter.Add(ctx, 1, metric.WithAttributes( attribute.String("type", destinationType), diff --git a/internal/idgen/idgen.go b/internal/idgen/idgen.go index 1fff29d0..ef72ed88 100644 --- a/internal/idgen/idgen.go +++ b/internal/idgen/idgen.go @@ -17,7 +17,7 @@ func init() { generator: &uuidv4Generator{}, eventPrefix: "", destinationPrefix: "", - deliveryPrefix: "", + attemptPrefix: "", } } @@ -29,7 +29,7 @@ type IDGenerator struct { generator idGenerator eventPrefix string destinationPrefix string - deliveryPrefix string + attemptPrefix string } func (g *IDGenerator) Event() string { @@ -40,8 +40,8 @@ func (g *IDGenerator) Destination() string { return g.generate(g.destinationPrefix) } -func (g *IDGenerator) Delivery() string { - return g.generate(g.deliveryPrefix) +func (g *IDGenerator) Attempt() string { + return g.generate(g.attemptPrefix) } func (g *IDGenerator) Installation() string { @@ -110,7 +110,7 @@ type IDGenConfig struct { Type string EventPrefix string DestinationPrefix string - DeliveryPrefix string + AttemptPrefix string } func Configure(cfg IDGenConfig) error { @@ -123,7 +123,7 @@ func Configure(cfg IDGenConfig) error { generator: gen, eventPrefix: cfg.EventPrefix, destinationPrefix: cfg.DestinationPrefix, - deliveryPrefix: cfg.DeliveryPrefix, + attemptPrefix: cfg.AttemptPrefix, } return nil @@ -137,8 +137,8 @@ func Destination() string { return globalGenerator.Destination() } -func Delivery() string { - return globalGenerator.Delivery() +func Attempt() string { + return globalGenerator.Attempt() } func Installation() string { diff --git a/internal/logmq/batchprocessor.go b/internal/logmq/batchprocessor.go index 472931a8..cfefecaf 100644 --- a/internal/logmq/batchprocessor.go +++ b/internal/logmq/batchprocessor.go @@ -13,7 +13,7 @@ import ( ) // ErrInvalidLogEntry is returned when a LogEntry is missing required fields. -var ErrInvalidLogEntry = errors.New("invalid log entry: both event and delivery are required") +var ErrInvalidLogEntry = errors.New("invalid log entry: both event and attempt are required") // LogStore defines the interface for persisting log entries. // This is a consumer-defined interface containing only what logmq needs. @@ -87,12 +87,12 @@ func (bp *BatchProcessor) processBatch(_ string, msgs []*mqs.Message) { continue } - // Validate that both Event and Delivery are present. + // Validate that both Event and Attempt are present. // The logstore requires both for data consistency. - if entry.Event == nil || entry.Delivery == nil { - logger.Error("invalid log entry: both event and delivery are required", + if entry.Event == nil || entry.Attempt == nil { + logger.Error("invalid log entry: both event and attempt are required", zap.Bool("has_event", entry.Event != nil), - zap.Bool("has_delivery", entry.Delivery != nil), + zap.Bool("has_attempt", entry.Attempt != nil), zap.String("message_id", msg.LoggableID)) msg.Nack() continue diff --git a/internal/logstore/chlogstore/chlogstore.go b/internal/logstore/chlogstore/chlogstore.go index 0f42117f..eadbbd4f 100644 --- a/internal/logstore/chlogstore/chlogstore.go +++ b/internal/logstore/chlogstore/chlogstore.go @@ -16,15 +16,15 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) type logStoreImpl struct { - chDB clickhouse.DB - eventsTable string - deliveriesTable string + chDB clickhouse.DB + eventsTable string + attemptsTable string } var _ driver.LogStore = (*logStoreImpl)(nil) @@ -35,9 +35,9 @@ func NewLogStore(chDB clickhouse.DB, deploymentID string) driver.LogStore { prefix = deploymentID + "_" } return &logStoreImpl{ - chDB: chDB, - eventsTable: prefix + "events", - deliveriesTable: prefix + "deliveries", + chDB: chDB, + eventsTable: prefix + "events", + attemptsTable: prefix + "attempts", } } @@ -255,13 +255,13 @@ func buildEventCursorCondition(compare, position string) (string, []any) { return condition, []any{eventTimeMs, eventTimeMs, eventID} } -// deliveryRecordWithPosition wraps a delivery record with its cursor position data. -type deliveryRecordWithPosition struct { - *driver.DeliveryRecord - deliveryTime time.Time +// attemptRecordWithPosition wraps an attempt record with its cursor position data. +type attemptRecordWithPosition struct { + *driver.AttemptRecord + attemptTime time.Time } -func (s *logStoreImpl) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *logStoreImpl) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -272,48 +272,48 @@ func (s *logStoreImpl) ListDelivery(ctx context.Context, req driver.ListDelivery limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithPosition]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithPosition]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithPosition, error) { - query, args := buildDeliveryQuery(s.deliveriesTable, req, q) + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]attemptRecordWithPosition, error) { + query, args := buildAttemptQuery(s.attemptsTable, req, q) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryRecords(rows) + return scanAttemptRecords(rows) }, - Cursor: pagination.Cursor[deliveryRecordWithPosition]{ - Encode: func(dr deliveryRecordWithPosition) string { - position := fmt.Sprintf("%d::%s", dr.deliveryTime.UnixMilli(), dr.Delivery.ID) - return cursor.Encode(cursorResourceDelivery, cursorVersion, position) + Cursor: pagination.Cursor[attemptRecordWithPosition]{ + Encode: func(ar attemptRecordWithPosition) string { + position := fmt.Sprintf("%d::%s", ar.attemptTime.UnixMilli(), ar.Attempt.ID) + return cursor.Encode(cursorResourceAttempt, cursorVersion, position) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } - // Extract delivery records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + // Extract attempt records from results + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryRecord + data[i] = item.AttemptRecord } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { +func buildAttemptQuery(table string, req driver.ListAttemptRequest, q pagination.QueryInput) (string, []any) { var conditions []string var args []any @@ -343,24 +343,24 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati } if req.TimeFilter.GTE != nil { - conditions = append(conditions, "delivery_time >= ?") + conditions = append(conditions, "attempt_time >= ?") args = append(args, *req.TimeFilter.GTE) } if req.TimeFilter.LTE != nil { - conditions = append(conditions, "delivery_time <= ?") + conditions = append(conditions, "attempt_time <= ?") args = append(args, *req.TimeFilter.LTE) } if req.TimeFilter.GT != nil { - conditions = append(conditions, "delivery_time > ?") + conditions = append(conditions, "attempt_time > ?") args = append(args, *req.TimeFilter.GT) } if req.TimeFilter.LT != nil { - conditions = append(conditions, "delivery_time < ?") + conditions = append(conditions, "attempt_time < ?") args = append(args, *req.TimeFilter.LT) } if q.CursorPos != "" { - cursorCond, cursorArgs := buildDeliveryCursorCondition(q.Compare, q.CursorPos) + cursorCond, cursorArgs := buildAttemptCursorCondition(q.Compare, q.CursorPos) conditions = append(conditions, cursorCond) args = append(args, cursorArgs...) } @@ -370,7 +370,7 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati whereClause = "1=1" } - orderByClause := fmt.Sprintf("ORDER BY delivery_time %s, delivery_id %s", + orderByClause := fmt.Sprintf("ORDER BY attempt_time %s, attempt_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) query := fmt.Sprintf(` @@ -383,13 +383,13 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati event_time, metadata, data, - delivery_id, + attempt_id, status, - delivery_time, + attempt_time, code, response_data, manual, - attempt + attempt_number FROM %s WHERE %s %s @@ -399,8 +399,8 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati return query, args } -func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, error) { - var results []deliveryRecordWithPosition +func scanAttemptRecords(rows clickhouse.Rows) ([]attemptRecordWithPosition, error) { + var results []attemptRecordWithPosition for rows.Next() { var ( eventID string @@ -411,13 +411,13 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er eventTime time.Time metadataStr string dataStr string - deliveryID string + attemptID string status string - deliveryTime time.Time + attemptTime time.Time code string responseDataStr string manual bool - attempt uint32 + attemptNumber uint32 ) err := rows.Scan( @@ -429,13 +429,13 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er &eventTime, &metadataStr, &dataStr, - &deliveryID, + &attemptID, &status, - &deliveryTime, + &attemptTime, &code, &responseDataStr, &manual, - &attempt, + &attemptNumber, ) if err != nil { return nil, fmt.Errorf("scan failed: %w", err) @@ -461,17 +461,17 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er } } - results = append(results, deliveryRecordWithPosition{ - DeliveryRecord: &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + results = append(results, attemptRecordWithPosition{ + AttemptRecord: &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: int(attempt), + AttemptNumber: int(attemptNumber), Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -486,7 +486,7 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er Metadata: metadata, }, }, - deliveryTime: deliveryTime, + attemptTime: attemptTime, }) } @@ -528,7 +528,7 @@ func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEve data FROM %s WHERE %s - LIMIT 1`, s.deliveriesTable, whereClause) + LIMIT 1`, s.attemptsTable, whereClause) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { @@ -570,7 +570,7 @@ func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEve return event, nil } -func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *logStoreImpl) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { var conditions []string var args []any @@ -579,8 +579,8 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve args = append(args, req.TenantID) } - conditions = append(conditions, "delivery_id = ?") - args = append(args, req.DeliveryID) + conditions = append(conditions, "attempt_id = ?") + args = append(args, req.AttemptID) whereClause := strings.Join(conditions, " AND ") @@ -594,16 +594,16 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve event_time, metadata, data, - delivery_id, + attempt_id, status, - delivery_time, + attempt_time, code, response_data, manual, - attempt + attempt_number FROM %s WHERE %s - LIMIT 1`, s.deliveriesTable, whereClause) + LIMIT 1`, s.attemptsTable, whereClause) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { @@ -624,13 +624,13 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve eventTime time.Time metadataStr string dataStr string - deliveryID string + attemptID string status string - deliveryTime time.Time + attemptTime time.Time code string responseDataStr string manual bool - attempt uint32 + attemptNumber uint32 ) err = rows.Scan( @@ -642,13 +642,13 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve &eventTime, &metadataStr, &dataStr, - &deliveryID, + &attemptID, &status, - &deliveryTime, + &attemptTime, &code, &responseDataStr, &manual, - &attempt, + &attemptNumber, ) if err != nil { return nil, fmt.Errorf("scan failed: %w", err) @@ -674,16 +674,16 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve } } - return &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + return &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: int(attempt), + AttemptNumber: int(attemptNumber), Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -750,20 +750,20 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, entries []*models.LogEntr } } - // Insert deliveries with their paired event data - deliveryBatch, err := s.chDB.PrepareBatch(ctx, + // Insert attempts with their paired event data + attemptBatch, err := s.chDB.PrepareBatch(ctx, fmt.Sprintf(`INSERT INTO %s ( event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, status, delivery_time, code, response_data, manual, attempt - )`, s.deliveriesTable), + attempt_id, status, attempt_time, code, response_data, manual, attempt_number + )`, s.attemptsTable), ) if err != nil { - return fmt.Errorf("prepare deliveries batch failed: %w", err) + return fmt.Errorf("prepare attempts batch failed: %w", err) } for _, entry := range entries { event := entry.Event - d := entry.Delivery + a := entry.Attempt metadataJSON, err := json.Marshal(event.Metadata) if err != nil { @@ -773,34 +773,34 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, entries []*models.LogEntr if err != nil { return fmt.Errorf("failed to marshal data: %w", err) } - responseDataJSON, err := json.Marshal(d.ResponseData) + responseDataJSON, err := json.Marshal(a.ResponseData) if err != nil { return fmt.Errorf("failed to marshal response_data: %w", err) } - if err := deliveryBatch.Append( - d.EventID, + if err := attemptBatch.Append( + a.EventID, event.TenantID, - d.DestinationID, + a.DestinationID, event.Topic, event.EligibleForRetry, event.Time, string(metadataJSON), string(dataJSON), - d.ID, - d.Status, - d.Time, - d.Code, + a.ID, + a.Status, + a.Time, + a.Code, string(responseDataJSON), - d.Manual, - uint32(d.Attempt), + a.Manual, + uint32(a.AttemptNumber), ); err != nil { - return fmt.Errorf("deliveries batch append failed: %w", err) + return fmt.Errorf("attempts batch append failed: %w", err) } } - if err := deliveryBatch.Send(); err != nil { - return fmt.Errorf("deliveries batch send failed: %w", err) + if err := attemptBatch.Send(); err != nil { + return fmt.Errorf("attempts batch send failed: %w", err) } return nil @@ -810,21 +810,21 @@ func parseTimestampMs(s string) (int64, error) { return strconv.ParseInt(s, 10, 64) } -func buildDeliveryCursorCondition(compare, position string) (string, []any) { +func buildAttemptCursorCondition(compare, position string) (string, []any) { parts := strings.SplitN(position, "::", 2) if len(parts) != 2 { return "1=1", nil } - deliveryTimeMs, err := parseTimestampMs(parts[0]) + attemptTimeMs, err := parseTimestampMs(parts[0]) if err != nil { return "1=1", nil // invalid timestamp, return always true } - deliveryID := parts[1] + attemptID := parts[1] condition := fmt.Sprintf(`( - delivery_time %s fromUnixTimestamp64Milli(?) - OR (delivery_time = fromUnixTimestamp64Milli(?) AND delivery_id %s ?) + attempt_time %s fromUnixTimestamp64Milli(?) + OR (attempt_time = fromUnixTimestamp64Milli(?) AND attempt_id %s ?) )`, compare, compare) - return condition, []any{deliveryTimeMs, deliveryTimeMs, deliveryID} + return condition, []any{attemptTimeMs, attemptTimeMs, attemptID} } diff --git a/internal/logstore/driver/driver.go b/internal/logstore/driver/driver.go index b2ae46c4..ab86fa16 100644 --- a/internal/logstore/driver/driver.go +++ b/internal/logstore/driver/driver.go @@ -18,9 +18,9 @@ type TimeFilter struct { type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) + ListAttempt(context.Context, ListAttemptRequest) (ListAttemptResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + RetrieveAttempt(ctx context.Context, request RetrieveAttemptRequest) (*AttemptRecord, error) InsertMany(context.Context, []*models.LogEntry) error } @@ -41,11 +41,11 @@ type ListEventResponse struct { Prev string } -type ListDeliveryRequest struct { +type ListAttemptRequest struct { Next string Prev string Limit int - TimeFilter TimeFilter // optional - filter deliveries by time + TimeFilter TimeFilter // optional - filter attempts by time TenantID string // optional - filter by tenant (if empty, returns all tenants) EventID string // optional - filter for specific event DestinationIDs []string // optional @@ -54,8 +54,8 @@ type ListDeliveryRequest struct { SortOrder string // optional: "asc", "desc" (default: "desc") } -type ListDeliveryResponse struct { - Data []*DeliveryRecord +type ListAttemptResponse struct { + Data []*AttemptRecord Next string Prev string } @@ -66,13 +66,13 @@ type RetrieveEventRequest struct { DestinationID string // optional - if provided, scopes to that destination } -type RetrieveDeliveryRequest struct { - TenantID string // optional - filter by tenant (if empty, searches all tenants) - DeliveryID string // required +type RetrieveAttemptRequest struct { + TenantID string // optional - filter by tenant (if empty, searches all tenants) + AttemptID string // required } -// DeliveryRecord represents a delivery query result with optional Event population. -type DeliveryRecord struct { - Delivery *models.Delivery - Event *models.Event // optionally populated for query results +// AttemptRecord represents an attempt query result with optional Event population. +type AttemptRecord struct { + Attempt *models.Attempt + Event *models.Event // optionally populated for query results } diff --git a/internal/logstore/drivertest/crud.go b/internal/logstore/drivertest/crud.go index 09c3e063..34b50066 100644 --- a/internal/logstore/drivertest/crud.go +++ b/internal/logstore/drivertest/crud.go @@ -33,10 +33,10 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { startTime := baseTime.Add(-48 * time.Hour) // We'll populate these as we insert - var allDeliveries []*models.Delivery + var allDeliveries []*models.Attempt destinationEvents := map[string][]*models.Event{} topicEvents := map[string][]*models.Event{} - statusDeliveries := map[string][]*models.Delivery{} + statusDeliveries := map[string][]*models.Attempt{} t.Run("insert and verify", func(t *testing.T) { t.Run("single delivery", func(t *testing.T) { @@ -49,16 +49,16 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.EventFactory.WithTopic(topic), testutil.EventFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("single_del"), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-30*time.Minute)), + delivery := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("single_del"), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - err := logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}}) + err := logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: delivery}}) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) @@ -68,7 +68,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { statusDeliveries["success"] = append(statusDeliveries["success"], delivery) // Verify via List - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, EventID: event.ID, Limit: 10, @@ -77,7 +77,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { require.NoError(t, err) require.Len(t, response.Data, 1) assert.Equal(t, event.ID, response.Data[0].Event.ID) - assert.Equal(t, "success", response.Data[0].Delivery.Status) + assert.Equal(t, "success", response.Data[0].Attempt.Status) // Verify via Retrieve retrieved, err := logStore.RetrieveEvent(ctx, driver.RetrieveEventRequest{ @@ -109,16 +109,16 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.EventFactory.WithTopic(topic), testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("batch_del_%02d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destID), - testutil.DeliveryFactory.WithStatus(status), - testutil.DeliveryFactory.WithTime(eventTime.Add(time.Millisecond)), + delivery := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("batch_del_%02d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destID), + testutil.AttemptFactory.WithStatus(status), + testutil.AttemptFactory.WithTime(eventTime.Add(time.Millisecond)), ) - entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) + entries = append(entries, &models.LogEntry{Event: event, Attempt: delivery}) allDeliveries = append(allDeliveries, delivery) destinationEvents[destID] = append(destinationEvents[destID], event) topicEvents[topic] = append(topicEvents[topic], event) @@ -130,7 +130,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { require.NoError(t, h.FlushWrites(ctx)) // Verify all inserted - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -203,9 +203,9 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { } }) - t.Run("ListDelivery by destination", func(t *testing.T) { + t.Run("ListAttempt by destination", func(t *testing.T) { destID := destinationIDs[0] - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{destID}, Limit: 100, @@ -213,12 +213,12 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) for _, dr := range response.Data { - assert.Equal(t, destID, dr.Delivery.DestinationID) + assert.Equal(t, destID, dr.Attempt.DestinationID) } }) - t.Run("ListDelivery by status", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt by status", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Status: "success", Limit: 100, @@ -226,13 +226,13 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) for _, dr := range response.Data { - assert.Equal(t, "success", dr.Delivery.Status) + assert.Equal(t, "success", dr.Attempt.Status) } }) - t.Run("ListDelivery by topic", func(t *testing.T) { + t.Run("ListAttempt by topic", func(t *testing.T) { topic := testutil.TestTopics[0] - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Topics: []string{topic}, Limit: 100, @@ -244,9 +244,9 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { } }) - t.Run("ListDelivery by event ID", func(t *testing.T) { + t.Run("ListAttempt by event ID", func(t *testing.T) { eventID := "batch_evt_00" - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, EventID: eventID, Limit: 100, @@ -303,29 +303,29 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Nil(t, retrieved) }) - t.Run("RetrieveDelivery existing", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: tenantID, - DeliveryID: knownDeliveryID, + t.Run("RetrieveAttempt existing", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: tenantID, + AttemptID: knownDeliveryID, }) require.NoError(t, err) require.NotNil(t, retrieved) - assert.Equal(t, knownDeliveryID, retrieved.Delivery.ID) + assert.Equal(t, knownDeliveryID, retrieved.Attempt.ID) }) - t.Run("RetrieveDelivery non-existent returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: tenantID, - DeliveryID: "non-existent-delivery", + t.Run("RetrieveAttempt non-existent returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: tenantID, + AttemptID: "non-existent-delivery", }) require.NoError(t, err) assert.Nil(t, retrieved) }) - t.Run("RetrieveDelivery wrong tenant returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: "wrong-tenant", - DeliveryID: knownDeliveryID, + t.Run("RetrieveAttempt wrong tenant returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: "wrong-tenant", + AttemptID: knownDeliveryID, }) require.NoError(t, err) assert.Nil(t, retrieved) diff --git a/internal/logstore/logstore.go b/internal/logstore/logstore.go index e88f6e22..b0557792 100644 --- a/internal/logstore/logstore.go +++ b/internal/logstore/logstore.go @@ -16,18 +16,18 @@ import ( type TimeFilter = driver.TimeFilter type ListEventRequest = driver.ListEventRequest type ListEventResponse = driver.ListEventResponse -type ListDeliveryRequest = driver.ListDeliveryRequest -type ListDeliveryResponse = driver.ListDeliveryResponse +type ListAttemptRequest = driver.ListAttemptRequest +type ListAttemptResponse = driver.ListAttemptResponse type RetrieveEventRequest = driver.RetrieveEventRequest -type RetrieveDeliveryRequest = driver.RetrieveDeliveryRequest -type DeliveryRecord = driver.DeliveryRecord +type RetrieveAttemptRequest = driver.RetrieveAttemptRequest +type AttemptRecord = driver.AttemptRecord type LogEntry = models.LogEntry type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) + ListAttempt(context.Context, ListAttemptRequest) (ListAttemptResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + RetrieveAttempt(ctx context.Context, request RetrieveAttemptRequest) (*AttemptRecord, error) InsertMany(context.Context, []*models.LogEntry) error } diff --git a/internal/logstore/memlogstore/memlogstore.go b/internal/logstore/memlogstore/memlogstore.go index 99d1a5d0..1f0fb0aa 100644 --- a/internal/logstore/memlogstore/memlogstore.go +++ b/internal/logstore/memlogstore/memlogstore.go @@ -14,25 +14,25 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) // memLogStore is an in-memory implementation of driver.LogStore. // It serves as a reference implementation and is useful for testing. type memLogStore struct { - mu sync.RWMutex - events map[string]*models.Event // keyed by event ID - deliveries []*models.Delivery // list of all deliveries + mu sync.RWMutex + events map[string]*models.Event // keyed by event ID + attempts []*models.Attempt // list of all attempts } var _ driver.LogStore = (*memLogStore)(nil) func NewLogStore() driver.LogStore { return &memLogStore{ - events: make(map[string]*models.Event), - deliveries: make([]*models.Delivery, 0), + events: make(map[string]*models.Event), + attempts: make([]*models.Attempt, 0), } } @@ -194,27 +194,27 @@ func (s *memLogStore) InsertMany(ctx context.Context, entries []*models.LogEntry // Insert event (dedupe by ID) s.events[entry.Event.ID] = copyEvent(entry.Event) - // Insert delivery (idempotent upsert: match on event_id + delivery_id) - d := entry.Delivery - copied := copyDelivery(d) + // Insert attempt (idempotent upsert: match on event_id + attempt_id) + a := entry.Attempt + copied := copyAttempt(a) found := false - for i, existing := range s.deliveries { - if existing.EventID == d.EventID && existing.ID == d.ID { - s.deliveries[i] = copied + for i, existing := range s.attempts { + if existing.EventID == a.EventID && existing.ID == a.ID { + s.attempts[i] = copied found = true break } } if !found { - s.deliveries = append(s.deliveries, copied) + s.attempts = append(s.attempts, copied) } } return nil } -func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *memLogStore) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -228,43 +228,43 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR limit = 100 } - // Filter deliveries and build records with events - var allRecords []*driver.DeliveryRecord - for _, d := range s.deliveries { - event := s.events[d.EventID] + // Filter attempts and build records with events + var allRecords []*driver.AttemptRecord + for _, a := range s.attempts { + event := s.events[a.EventID] if event == nil { - continue // skip orphan deliveries + continue // skip orphan attempts } - if !s.matchesDeliveryFilter(d, event, req) { + if !s.matchesAttemptFilter(a, event, req) { continue } - allRecords = append(allRecords, &driver.DeliveryRecord{ - Delivery: copyDelivery(d), - Event: copyEvent(event), + allRecords = append(allRecords, &driver.AttemptRecord{ + Attempt: copyAttempt(a), + Event: copyEvent(event), }) } - // deliveryRecordWithTimeID pairs a delivery record with its sortable time ID. - type deliveryRecordWithTimeID struct { - record *driver.DeliveryRecord + // attemptRecordWithTimeID pairs an attempt record with its sortable time ID. + type attemptRecordWithTimeID struct { + record *driver.AttemptRecord timeID string } - // Build list with time IDs (using delivery time) - recordsWithTimeID := make([]deliveryRecordWithTimeID, len(allRecords)) + // Build list with time IDs (using attempt time) + recordsWithTimeID := make([]attemptRecordWithTimeID, len(allRecords)) for i, r := range allRecords { - recordsWithTimeID[i] = deliveryRecordWithTimeID{ + recordsWithTimeID[i] = attemptRecordWithTimeID{ record: r, - timeID: makeTimeID(r.Delivery.Time, r.Delivery.ID), + timeID: makeTimeID(r.Attempt.Time, r.Attempt.ID), } } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(_ context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { + Fetch: func(_ context.Context, q pagination.QueryInput) ([]attemptRecordWithTimeID, error) { // Sort based on query direction isDesc := q.SortDir == "desc" sort.Slice(recordsWithTimeID, func(i, j int) bool { @@ -275,7 +275,7 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR }) // Filter using q.Compare (like SQL WHERE clause) - var filtered []deliveryRecordWithTimeID + var filtered []attemptRecordWithTimeID for _, r := range recordsWithTimeID { // If no cursor, include all items // If cursor exists, filter using Compare operator @@ -289,38 +289,38 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR filtered = filtered[:q.Limit] } - result := make([]deliveryRecordWithTimeID, len(filtered)) + result := make([]attemptRecordWithTimeID, len(filtered)) for i, r := range filtered { - result[i] = deliveryRecordWithTimeID{ - record: &driver.DeliveryRecord{ - Delivery: copyDelivery(r.record.Delivery), - Event: copyEvent(r.record.Event), + result[i] = attemptRecordWithTimeID{ + record: &driver.AttemptRecord{ + Attempt: copyAttempt(r.record.Attempt), + Event: copyEvent(r.record.Event), }, timeID: r.timeID, } } return result, nil }, - Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ - Encode: func(r deliveryRecordWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, r.timeID) + Cursor: pagination.Cursor[attemptRecordWithTimeID]{ + Encode: func(r attemptRecordWithTimeID) string { + return cursor.Encode(cursorResourceAttempt, cursorVersion, r.timeID) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } // Extract records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { data[i] = item.record } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, @@ -345,42 +345,42 @@ func (s *memLogStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEven return copyEvent(event), nil } -func (s *memLogStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *memLogStore) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { s.mu.RLock() defer s.mu.RUnlock() - for _, d := range s.deliveries { - if d.ID == req.DeliveryID { - event := s.events[d.EventID] + for _, a := range s.attempts { + if a.ID == req.AttemptID { + event := s.events[a.EventID] if event == nil { continue } if req.TenantID != "" && event.TenantID != req.TenantID { continue } - return &driver.DeliveryRecord{ - Delivery: copyDelivery(d), - Event: copyEvent(event), + return &driver.AttemptRecord{ + Attempt: copyAttempt(a), + Event: copyEvent(event), }, nil } } return nil, nil } -func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Event, req driver.ListDeliveryRequest) bool { - // Filter by event's tenant ID since deliveries don't have tenant_id in the database +func (s *memLogStore) matchesAttemptFilter(a *models.Attempt, event *models.Event, req driver.ListAttemptRequest) bool { + // Filter by event's tenant ID since attempts don't have tenant_id in the database if req.TenantID != "" && event.TenantID != req.TenantID { return false } - if req.EventID != "" && d.EventID != req.EventID { + if req.EventID != "" && a.EventID != req.EventID { return false } if len(req.DestinationIDs) > 0 { found := false for _, destID := range req.DestinationIDs { - if d.DestinationID == destID { + if a.DestinationID == destID { found = true break } @@ -390,7 +390,7 @@ func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Ev } } - if req.Status != "" && d.Status != req.Status { + if req.Status != "" && a.Status != req.Status { return false } @@ -407,16 +407,16 @@ func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Ev } } - if req.TimeFilter.GTE != nil && d.Time.Before(*req.TimeFilter.GTE) { + if req.TimeFilter.GTE != nil && a.Time.Before(*req.TimeFilter.GTE) { return false } - if req.TimeFilter.LTE != nil && d.Time.After(*req.TimeFilter.LTE) { + if req.TimeFilter.LTE != nil && a.Time.After(*req.TimeFilter.LTE) { return false } - if req.TimeFilter.GT != nil && !d.Time.After(*req.TimeFilter.GT) { + if req.TimeFilter.GT != nil && !a.Time.After(*req.TimeFilter.GT) { return false } - if req.TimeFilter.LT != nil && !d.Time.Before(*req.TimeFilter.LT) { + if req.TimeFilter.LT != nil && !a.Time.Before(*req.TimeFilter.LT) { return false } @@ -449,25 +449,25 @@ func copyEvent(e *models.Event) *models.Event { return copied } -func copyDelivery(d *models.Delivery) *models.Delivery { - if d == nil { +func copyAttempt(a *models.Attempt) *models.Attempt { + if a == nil { return nil } - copied := &models.Delivery{ - ID: d.ID, - TenantID: d.TenantID, - EventID: d.EventID, - DestinationID: d.DestinationID, - Attempt: d.Attempt, - Manual: d.Manual, - Status: d.Status, - Time: d.Time, - Code: d.Code, - } - - if d.ResponseData != nil { - copied.ResponseData = make(map[string]any, len(d.ResponseData)) - for k, v := range d.ResponseData { + copied := &models.Attempt{ + ID: a.ID, + TenantID: a.TenantID, + EventID: a.EventID, + DestinationID: a.DestinationID, + AttemptNumber: a.AttemptNumber, + Manual: a.Manual, + Status: a.Status, + Time: a.Time, + Code: a.Code, + } + + if a.ResponseData != nil { + copied.ResponseData = make(map[string]any, len(a.ResponseData)) + for k, v := range a.ResponseData { copied.ResponseData[k] = v } } diff --git a/internal/logstore/pglogstore/pglogstore.go b/internal/logstore/pglogstore/pglogstore.go index 4c6fb28f..35feeb08 100644 --- a/internal/logstore/pglogstore/pglogstore.go +++ b/internal/logstore/pglogstore/pglogstore.go @@ -15,9 +15,9 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) type logStore struct { @@ -181,13 +181,13 @@ func scanEvents(rows pgx.Rows) ([]eventWithTimeID, error) { return results, nil } -// deliveryRecordWithTimeID wraps a delivery record with its time_delivery_id for cursor encoding. -type deliveryRecordWithTimeID struct { - *driver.DeliveryRecord - TimeDeliveryID string +// attemptRecordWithTimeID wraps an attempt record with its time_attempt_id for cursor encoding. +type attemptRecordWithTimeID struct { + *driver.AttemptRecord + TimeAttemptID string } -func (s *logStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *logStore) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -198,80 +198,80 @@ func (s *logStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequ limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { - query, args := buildDeliveryQuery(req, q) + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]attemptRecordWithTimeID, error) { + query, args := buildAttemptQuery(req, q) rows, err := s.db.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryRecords(rows) + return scanAttemptRecords(rows) }, - Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ - Encode: func(dr deliveryRecordWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, dr.TimeDeliveryID) + Cursor: pagination.Cursor[attemptRecordWithTimeID]{ + Encode: func(ar attemptRecordWithTimeID) string { + return cursor.Encode(cursorResourceAttempt, cursorVersion, ar.TimeAttemptID) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } - // Extract delivery records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + // Extract attempt records from results + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryRecord + data[i] = item.AttemptRecord } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { - cursorCondition := fmt.Sprintf("AND ($10::text = '' OR idx.time_delivery_id %s $10::text)", q.Compare) - orderByClause := fmt.Sprintf("idx.delivery_time %s, idx.delivery_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) +func buildAttemptQuery(req driver.ListAttemptRequest, q pagination.QueryInput) (string, []any) { + cursorCondition := fmt.Sprintf("AND ($10::text = '' OR idx.time_attempt_id %s $10::text)", q.Compare) + orderByClause := fmt.Sprintf("idx.attempt_time %s, idx.attempt_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) query := fmt.Sprintf(` SELECT idx.event_id, - idx.delivery_id, + idx.attempt_id, idx.destination_id, idx.event_time, - idx.delivery_time, + idx.attempt_time, idx.topic, idx.status, - idx.time_delivery_id, + idx.time_attempt_id, e.tenant_id, e.eligible_for_retry, e.data, e.metadata, - d.code, - d.response_data, + a.code, + a.response_data, idx.manual, - idx.attempt - FROM event_delivery_index idx + idx.attempt_number + FROM event_attempt_index idx JOIN events e ON e.id = idx.event_id AND e.time = idx.event_time - JOIN deliveries d ON d.id = idx.delivery_id AND d.time = idx.delivery_time + JOIN attempts a ON a.id = idx.attempt_id AND a.time = idx.attempt_time WHERE ($1::text = '' OR idx.tenant_id = $1) AND ($2::text = '' OR idx.event_id = $2) AND (array_length($3::text[], 1) IS NULL OR idx.destination_id = ANY($3)) AND ($4::text = '' OR idx.status = $4) AND (array_length($5::text[], 1) IS NULL OR idx.topic = ANY($5)) - AND ($6::timestamptz IS NULL OR idx.delivery_time >= $6) - AND ($7::timestamptz IS NULL OR idx.delivery_time <= $7) - AND ($8::timestamptz IS NULL OR idx.delivery_time > $8) - AND ($9::timestamptz IS NULL OR idx.delivery_time < $9) + AND ($6::timestamptz IS NULL OR idx.attempt_time >= $6) + AND ($7::timestamptz IS NULL OR idx.attempt_time <= $7) + AND ($8::timestamptz IS NULL OR idx.attempt_time > $8) + AND ($9::timestamptz IS NULL OR idx.attempt_time < $9) %s ORDER BY %s LIMIT $11 @@ -294,18 +294,18 @@ func buildDeliveryQuery(req driver.ListDeliveryRequest, q pagination.QueryInput) return query, args } -func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { - var results []deliveryRecordWithTimeID +func scanAttemptRecords(rows pgx.Rows) ([]attemptRecordWithTimeID, error) { + var results []attemptRecordWithTimeID for rows.Next() { var ( eventID string - deliveryID string + attemptID string destinationID string eventTime time.Time - deliveryTime time.Time + attemptTime time.Time topic string status string - timeDeliveryID string + timeAttemptID string tenantID string eligibleForRetry bool data map[string]any @@ -313,18 +313,18 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { code string responseData map[string]any manual bool - attempt int + attemptNumber int ) if err := rows.Scan( &eventID, - &deliveryID, + &attemptID, &destinationID, &eventTime, - &deliveryTime, + &attemptTime, &topic, &status, - &timeDeliveryID, + &timeAttemptID, &tenantID, &eligibleForRetry, &data, @@ -332,22 +332,22 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { &code, &responseData, &manual, - &attempt, + &attemptNumber, ); err != nil { return nil, fmt.Errorf("scan failed: %w", err) } - results = append(results, deliveryRecordWithTimeID{ - DeliveryRecord: &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + results = append(results, attemptRecordWithTimeID{ + AttemptRecord: &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: attempt, + AttemptNumber: attemptNumber, Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -362,7 +362,7 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { Metadata: metadata, }, }, - TimeDeliveryID: timeDeliveryID, + TimeAttemptID: timeAttemptID, }) } @@ -391,7 +391,7 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe FROM events e WHERE ($1::text = '' OR e.tenant_id = $1) AND e.id = $2 AND EXISTS ( - SELECT 1 FROM event_delivery_index idx + SELECT 1 FROM event_attempt_index idx WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.event_id = $2 AND idx.destination_id = $3 )` args = []any{req.TenantID, req.EventID, req.DestinationID} @@ -434,38 +434,38 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe return event, nil } -func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *logStore) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { query := ` SELECT idx.event_id, - idx.delivery_id, + idx.attempt_id, idx.destination_id, idx.event_time, - idx.delivery_time, + idx.attempt_time, idx.topic, idx.status, e.tenant_id, e.eligible_for_retry, e.data, e.metadata, - d.code, - d.response_data, + a.code, + a.response_data, idx.manual, - idx.attempt - FROM event_delivery_index idx + idx.attempt_number + FROM event_attempt_index idx JOIN events e ON e.id = idx.event_id AND e.time = idx.event_time - JOIN deliveries d ON d.id = idx.delivery_id AND d.time = idx.delivery_time - WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.delivery_id = $2 + JOIN attempts a ON a.id = idx.attempt_id AND a.time = idx.attempt_time + WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.attempt_id = $2 LIMIT 1` - row := s.db.QueryRow(ctx, query, req.TenantID, req.DeliveryID) + row := s.db.QueryRow(ctx, query, req.TenantID, req.AttemptID) var ( eventID string - deliveryID string + attemptID string destinationID string eventTime time.Time - deliveryTime time.Time + attemptTime time.Time topic string status string tenantID string @@ -475,15 +475,15 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli code string responseData map[string]any manual bool - attempt int + attemptNumber int ) err := row.Scan( &eventID, - &deliveryID, + &attemptID, &destinationID, &eventTime, - &deliveryTime, + &attemptTime, &topic, &status, &tenantID, @@ -493,7 +493,7 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli &code, &responseData, &manual, - &attempt, + &attemptNumber, ) if err == pgx.ErrNoRows { return nil, nil @@ -502,16 +502,16 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli return nil, fmt.Errorf("scan failed: %w", err) } - return &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + return &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: attempt, + AttemptNumber: attemptNumber, Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -543,10 +543,10 @@ func (s *logStore) InsertMany(ctx context.Context, entries []*models.LogEntry) e events = append(events, e) } - // Extract deliveries - deliveries := make([]*models.Delivery, 0, len(entries)) + // Extract attempts + attempts := make([]*models.Attempt, 0, len(entries)) for _, entry := range entries { - deliveries = append(deliveries, entry.Delivery) + attempts = append(attempts, entry.Attempt) } tx, err := s.db.Begin(ctx) @@ -566,41 +566,41 @@ func (s *logStore) InsertMany(ctx context.Context, entries []*models.LogEntry) e } } - if len(deliveries) > 0 { + if len(attempts) > 0 { _, err = tx.Exec(ctx, ` - INSERT INTO deliveries (id, event_id, destination_id, status, time, code, response_data, manual, attempt) + INSERT INTO attempts (id, event_id, destination_id, status, time, code, response_data, manual, attempt_number) SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) ON CONFLICT (time, id) DO UPDATE SET status = EXCLUDED.status, code = EXCLUDED.code, response_data = EXCLUDED.response_data - `, deliveryArrays(deliveries)...) + `, attemptArrays(attempts)...) if err != nil { return err } _, err = tx.Exec(ctx, ` - INSERT INTO event_delivery_index ( - event_id, delivery_id, tenant_id, destination_id, - event_time, delivery_time, topic, status, manual, attempt + INSERT INTO event_attempt_index ( + event_id, attempt_id, tenant_id, destination_id, + event_time, attempt_time, topic, status, manual, attempt_number ) SELECT - d.event_id, - d.id, + a.event_id, + a.id, e.tenant_id, - d.destination_id, + a.destination_id, e.time, - d.time, + a.time, e.topic, - d.status, - d.manual, - d.attempt + a.status, + a.manual, + a.attempt_number FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) - AS d(id, event_id, destination_id, status, time, code, response_data, manual, attempt) - JOIN events e ON e.id = d.event_id - ON CONFLICT (delivery_time, event_id, delivery_id) DO UPDATE SET + AS a(id, event_id, destination_id, status, time, code, response_data, manual, attempt_number) + JOIN events e ON e.id = a.event_id + ON CONFLICT (attempt_time, event_id, attempt_id) DO UPDATE SET status = EXCLUDED.status - `, deliveryArrays(deliveries)...) + `, attemptArrays(attempts)...) if err != nil { return err } @@ -642,27 +642,27 @@ func eventArrays(events []*models.Event) []any { } } -func deliveryArrays(deliveries []*models.Delivery) []any { - ids := make([]string, len(deliveries)) - eventIDs := make([]string, len(deliveries)) - destinationIDs := make([]string, len(deliveries)) - statuses := make([]string, len(deliveries)) - times := make([]time.Time, len(deliveries)) - codes := make([]string, len(deliveries)) - responseDatas := make([]map[string]any, len(deliveries)) - manuals := make([]bool, len(deliveries)) - attempts := make([]int, len(deliveries)) - - for i, d := range deliveries { - ids[i] = d.ID - eventIDs[i] = d.EventID - destinationIDs[i] = d.DestinationID - statuses[i] = d.Status - times[i] = d.Time - codes[i] = d.Code - responseDatas[i] = d.ResponseData - manuals[i] = d.Manual - attempts[i] = d.Attempt +func attemptArrays(attempts []*models.Attempt) []any { + ids := make([]string, len(attempts)) + eventIDs := make([]string, len(attempts)) + destinationIDs := make([]string, len(attempts)) + statuses := make([]string, len(attempts)) + times := make([]time.Time, len(attempts)) + codes := make([]string, len(attempts)) + responseDatas := make([]map[string]any, len(attempts)) + manuals := make([]bool, len(attempts)) + attemptNumbers := make([]int, len(attempts)) + + for i, a := range attempts { + ids[i] = a.ID + eventIDs[i] = a.EventID + destinationIDs[i] = a.DestinationID + statuses[i] = a.Status + times[i] = a.Time + codes[i] = a.Code + responseDatas[i] = a.ResponseData + manuals[i] = a.Manual + attemptNumbers[i] = a.AttemptNumber } return []any{ @@ -674,6 +674,6 @@ func deliveryArrays(deliveries []*models.Delivery) []any { codes, responseDatas, manuals, - attempts, + attemptNumbers, } } diff --git a/internal/models/event.go b/internal/models/event.go index 4d7a68ff..22704c7d 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -128,18 +128,18 @@ func NewManualDeliveryTask(event Event, destinationID string) DeliveryTask { } const ( - DeliveryStatusSuccess = "success" - DeliveryStatusFailed = "failed" + AttemptStatusSuccess = "success" + AttemptStatusFailed = "failed" ) // LogEntry represents a message for the log queue. // -// IMPORTANT: Both Event and Delivery are REQUIRED. The logstore requires both +// IMPORTANT: Both Event and Attempt are REQUIRED. The logstore requires both // to exist for proper data consistency. The logmq consumer validates this // requirement and rejects entries missing either field. type LogEntry struct { - Event *Event `json:"event"` - Delivery *Delivery `json:"delivery"` + Event *Event `json:"event"` + Attempt *Attempt `json:"attempt"` } var _ mqs.IncomingMessage = &LogEntry{} @@ -156,12 +156,12 @@ func (e *LogEntry) ToMessage() (*mqs.Message, error) { return &mqs.Message{Body: data}, nil } -type Delivery struct { +type Attempt struct { ID string `json:"id"` TenantID string `json:"tenant_id"` EventID string `json:"event_id"` DestinationID string `json:"destination_id"` - Attempt int `json:"attempt"` + AttemptNumber int `json:"attempt"` Manual bool `json:"manual"` Status string `json:"status"` Time time.Time `json:"time"` diff --git a/internal/util/testutil/event.go b/internal/util/testutil/event.go index b882f903..ddf7fcbd 100644 --- a/internal/util/testutil/event.go +++ b/internal/util/testutil/event.go @@ -98,79 +98,79 @@ func (f *mockEventFactory) WithData(data map[string]interface{}) func(*models.Ev // ============================== Mock Delivery ============================== -var DeliveryFactory = &mockDeliveryFactory{} +var AttemptFactory = &mockAttemptFactory{} -type mockDeliveryFactory struct { +type mockAttemptFactory struct { } -func (f *mockDeliveryFactory) Any(opts ...func(*models.Delivery)) models.Delivery { - delivery := models.Delivery{ - ID: idgen.Delivery(), +func (f *mockAttemptFactory) Any(opts ...func(*models.Attempt)) models.Attempt { + attempt := models.Attempt{ + ID: idgen.Attempt(), TenantID: "test-tenant", EventID: idgen.Event(), DestinationID: idgen.Destination(), - Attempt: 1, + AttemptNumber: 1, Manual: false, Status: "success", Time: time.Now(), } for _, opt := range opts { - opt(&delivery) + opt(&attempt) } - return delivery + return attempt } -func (f *mockDeliveryFactory) AnyPointer(opts ...func(*models.Delivery)) *models.Delivery { - delivery := f.Any(opts...) - return &delivery +func (f *mockAttemptFactory) AnyPointer(opts ...func(*models.Attempt)) *models.Attempt { + attempt := f.Any(opts...) + return &attempt } -func (f *mockDeliveryFactory) WithID(id string) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.ID = id +func (f *mockAttemptFactory) WithID(id string) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.ID = id } } -func (f *mockDeliveryFactory) WithTenantID(tenantID string) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.TenantID = tenantID +func (f *mockAttemptFactory) WithTenantID(tenantID string) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.TenantID = tenantID } } -func (f *mockDeliveryFactory) WithAttempt(attempt int) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.Attempt = attempt +func (f *mockAttemptFactory) WithAttemptNumber(attemptNumber int) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.AttemptNumber = attemptNumber } } -func (f *mockDeliveryFactory) WithManual(manual bool) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.Manual = manual +func (f *mockAttemptFactory) WithManual(manual bool) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.Manual = manual } } -func (f *mockDeliveryFactory) WithEventID(eventID string) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.EventID = eventID +func (f *mockAttemptFactory) WithEventID(eventID string) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.EventID = eventID } } -func (f *mockDeliveryFactory) WithDestinationID(destinationID string) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.DestinationID = destinationID +func (f *mockAttemptFactory) WithDestinationID(destinationID string) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.DestinationID = destinationID } } -func (f *mockDeliveryFactory) WithStatus(status string) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.Status = status +func (f *mockAttemptFactory) WithStatus(status string) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.Status = status } } -func (f *mockDeliveryFactory) WithTime(time time.Time) func(*models.Delivery) { - return func(delivery *models.Delivery) { - delivery.Time = time +func (f *mockAttemptFactory) WithTime(time time.Time) func(*models.Attempt) { + return func(attempt *models.Attempt) { + attempt.Time = time } } From f8098c9d2f571d70aaea4fe178d6ea6367b2f81b Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 02:42:29 +0700 Subject: [PATCH 14/23] =?UTF-8?q?test:=20rename=20Delivery=20=E2=86=92=20A?= =?UTF-8?q?ttempt=20in=20all=20test=20files?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update test files to use the new Attempt naming: - logstore/drivertest/*.go: AttemptFactory, ListAttempt, RetrieveAttempt - deliverymq/*_test.go: AttemptStatus*, entry.Attempt - apirouter/*_test.go: AttemptFactory, /attempts API paths - logmq/batchprocessor_test.go: AttemptFactory, Attempt struct fields All unit tests pass (1665 tests). Co-Authored-By: Claude Opus 4.5 --- internal/apirouter/log_handlers_test.go | 118 ++++----- .../logger_middleware_integration_test.go | 2 +- internal/apirouter/retry_handlers_test.go | 40 +-- internal/deliverymq/messagehandler_test.go | 12 +- internal/deliverymq/mock_test.go | 20 +- internal/logmq/batchprocessor_test.go | 28 +-- internal/logstore/drivertest/misc.go | 158 ++++++------ internal/logstore/drivertest/pagination.go | 234 +++++++++--------- 8 files changed, 306 insertions(+), 306 deletions(-) diff --git a/internal/apirouter/log_handlers_test.go b/internal/apirouter/log_handlers_test.go index 693d04e2..c2b311ec 100644 --- a/internal/apirouter/log_handlers_test.go +++ b/internal/apirouter/log_handlers_test.go @@ -37,7 +37,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should return empty list when no deliveries", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -52,7 +52,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should list deliveries", func(t *testing.T) { // Seed delivery events eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -64,18 +64,18 @@ func TestListDeliveries(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -95,7 +95,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should include event when include=event", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -116,7 +116,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should include event.data when include=event.data", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event.data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event.data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -135,7 +135,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should filter by destination_id", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?destination_id="+destinationID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?destination_id="+destinationID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -149,7 +149,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should filter by non-existent destination_id", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?destination_id=nonexistent", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?destination_id=nonexistent", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -163,7 +163,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -171,7 +171,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should exclude response_data by default", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -189,7 +189,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should include response_data with include=response_data", func(t *testing.T) { // Seed a delivery with response_data eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-30 * time.Minute).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -201,22 +201,22 @@ func TestListDeliveries(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - delivery.ResponseData = map[string]interface{}{ + attempt.ResponseData = map[string]interface{}{ "body": "OK", "status": float64(200), } - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=response_data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=response_data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -243,7 +243,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should support comma-separated include param", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event,response_data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event,response_data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -263,7 +263,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should return validation error for invalid dir", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?dir=invalid", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?dir=invalid", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnprocessableEntity, w.Code) @@ -271,7 +271,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should accept valid dir param", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?dir=asc", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?dir=asc", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -279,7 +279,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should cap limit at 1000", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?limit=5000", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?limit=5000", nil) result.router.ServeHTTP(w, req) // Should succeed, limit is silently capped @@ -309,7 +309,7 @@ func TestRetrieveDelivery(t *testing.T) { // Seed a delivery event eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -321,19 +321,19 @@ func TestRetrieveDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("should retrieve delivery by ID", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+deliveryID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -349,7 +349,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should include event when include=event", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"?include=event", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+deliveryID+"?include=event", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -366,7 +366,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should include event.data when include=event.data", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"?include=event.data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+deliveryID+"?include=event.data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -381,7 +381,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should return 404 for non-existent delivery", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/nonexistent", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/nonexistent", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -389,7 +389,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/deliveries/"+deliveryID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/attempts/"+deliveryID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -418,7 +418,7 @@ func TestRetrieveEvent(t *testing.T) { // Seed a delivery event eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -436,15 +436,15 @@ func TestRetrieveEvent(t *testing.T) { }), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("should retrieve event by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -518,7 +518,7 @@ func TestListEvents(t *testing.T) { t.Run("should list events", func(t *testing.T) { // Seed delivery events eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -533,15 +533,15 @@ func TestListEvents(t *testing.T) { }), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events", nil) diff --git a/internal/apirouter/logger_middleware_integration_test.go b/internal/apirouter/logger_middleware_integration_test.go index 2d44ea86..392b1f94 100644 --- a/internal/apirouter/logger_middleware_integration_test.go +++ b/internal/apirouter/logger_middleware_integration_test.go @@ -65,7 +65,7 @@ func (r *mockRegistry) CreatePublisher(ctx context.Context, destination *models. return nil, fmt.Errorf("not implemented") } -func (r *mockRegistry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (r *mockRegistry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { return nil, fmt.Errorf("not implemented") } diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index 170b147c..514c010b 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -37,7 +37,7 @@ func TestRetryDelivery(t *testing.T) { // Seed a delivery event eventID := idgen.Event() - deliveryID := idgen.Delivery() + deliveryID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) deliveryTime := eventTime.Add(100 * time.Millisecond) @@ -49,15 +49,15 @@ func TestRetryDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(deliveryID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("should retry delivery successfully with full event data", func(t *testing.T) { // Subscribe to deliveryMQ to capture published task @@ -69,7 +69,7 @@ func TestRetryDelivery(t *testing.T) { // Trigger manual retry w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+deliveryID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusAccepted, w.Code) @@ -99,7 +99,7 @@ func TestRetryDelivery(t *testing.T) { t.Run("should return 404 for non-existent delivery", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/nonexistent/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/nonexistent/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -107,7 +107,7 @@ func TestRetryDelivery(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/nonexistent/deliveries/"+deliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/nonexistent/attempts/"+deliveryID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -128,7 +128,7 @@ func TestRetryDelivery(t *testing.T) { // Create a delivery for the disabled destination disabledEventID := idgen.Event() - disabledDeliveryID := idgen.Delivery() + disabledDeliveryID := idgen.Attempt() disabledEvent := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(disabledEventID), @@ -138,18 +138,18 @@ func TestRetryDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - disabledDelivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(disabledDeliveryID), - testutil.DeliveryFactory.WithEventID(disabledEventID), - testutil.DeliveryFactory.WithDestinationID(disabledDestinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + disabledAttempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(disabledDeliveryID), + testutil.AttemptFactory.WithEventID(disabledEventID), + testutil.AttemptFactory.WithDestinationID(disabledDestinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(deliveryTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: disabledEvent, Delivery: disabledDelivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: disabledEvent, Attempt: disabledAttempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+disabledDeliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+disabledDeliveryID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusBadRequest, w.Code) diff --git a/internal/deliverymq/messagehandler_test.go b/internal/deliverymq/messagehandler_test.go index 8ccf8b1f..0a3a5d75 100644 --- a/internal/deliverymq/messagehandler_test.go +++ b/internal/deliverymq/messagehandler_test.go @@ -256,7 +256,7 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.taskIDs[0], "should use GetRetryID for task ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -324,7 +324,7 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "should only attempt once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -384,7 +384,7 @@ func TestMessageHandler_RetryFlow(t *testing.T) { assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "publish should succeed once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK") } func TestMessageHandler_Idempotency(t *testing.T) { @@ -831,7 +831,7 @@ func TestManualDelivery_PublishError(t *testing.T) { assert.Equal(t, 1, publisher.current, "should attempt publish once") assert.Empty(t, retryScheduler.schedules, "should not schedule retry for manual delivery") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -894,7 +894,7 @@ func TestManualDelivery_CancelError(t *testing.T) { assert.Len(t, retryScheduler.canceled, 1, "should attempt to cancel retry") assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.canceled[0], "should cancel with correct retry ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK despite cancel error") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK despite cancel error") assertAlertMonitor(t, alertMonitor, true, &destination, nil) } @@ -1077,7 +1077,7 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked despite alert monitor error") assert.Equal(t, 1, publisher.current, "should publish once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK") // Verify alert monitor was called but error was ignored // Wait for the HandleAttempt call to be made diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index b96e4eb3..d90f6ea8 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -25,17 +25,17 @@ func newMockPublisher(responses []error) *mockPublisher { return &mockPublisher{responses: responses} } -func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { m.mu.Lock() defer m.mu.Unlock() if m.current >= len(m.responses) { m.current++ - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, + Status: models.AttemptStatusSuccess, Code: "OK", ResponseData: map[string]interface{}{}, Time: time.Now(), @@ -45,21 +45,21 @@ func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.De resp := m.responses[m.current] m.current++ if resp == nil { - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, + Status: models.AttemptStatusSuccess, Code: "OK", ResponseData: map[string]interface{}{}, Time: time.Now(), }, nil } - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusFailed, + Status: models.AttemptStatusFailed, Code: "ERR", ResponseData: map[string]interface{}{}, Time: time.Now(), diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go index 12dc5d63..01c50510 100644 --- a/internal/logmq/batchprocessor_test.go +++ b/internal/logmq/batchprocessor_test.go @@ -31,14 +31,14 @@ func (m *mockLogStore) InsertMany(ctx context.Context, entries []*models.LogEntr return nil } -func (m *mockLogStore) getInserted() (events []*models.Event, deliveries []*models.Delivery) { +func (m *mockLogStore) getInserted() (events []*models.Event, attempts []*models.Attempt) { m.mu.Lock() defer m.mu.Unlock() for _, entry := range m.entries { events = append(events, entry.Event) - deliveries = append(deliveries, entry.Delivery) + attempts = append(attempts, entry.Attempt) } - return events, deliveries + return events, attempts } // mockQueueMessage implements mqs.QueueMessage for testing. @@ -84,10 +84,10 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { defer bp.Shutdown() event := testutil.EventFactory.Any() - delivery := testutil.DeliveryFactory.Any() + attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ Event: &event, - Delivery: &delivery, + Attempt: &attempt, } mock, msg := newMockMessage(entry) @@ -117,10 +117,10 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { require.NoError(t, err) defer bp.Shutdown() - delivery := testutil.DeliveryFactory.Any() + attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ Event: nil, // Missing event - Delivery: &delivery, + Attempt: &attempt, } mock, msg := newMockMessage(entry) @@ -153,7 +153,7 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { event := testutil.EventFactory.Any() entry := models.LogEntry{ Event: &event, - Delivery: nil, // Missing delivery + Attempt: nil, // Missing delivery } mock, msg := newMockMessage(entry) @@ -185,19 +185,19 @@ func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { // Create valid entry 1 event1 := testutil.EventFactory.Any() - delivery1 := testutil.DeliveryFactory.Any() - validEntry1 := models.LogEntry{Event: &event1, Delivery: &delivery1} + attempt1 := testutil.AttemptFactory.Any() + validEntry1 := models.LogEntry{Event: &event1, Attempt: &attempt1} mock1, msg1 := newMockMessage(validEntry1) // Create invalid entry (missing event) - delivery2 := testutil.DeliveryFactory.Any() - invalidEntry := models.LogEntry{Event: nil, Delivery: &delivery2} + attempt2 := testutil.AttemptFactory.Any() + invalidEntry := models.LogEntry{Event: nil, Attempt: &attempt2} mock2, msg2 := newMockMessage(invalidEntry) // Create valid entry 2 event3 := testutil.EventFactory.Any() - delivery3 := testutil.DeliveryFactory.Any() - validEntry2 := models.LogEntry{Event: &event3, Delivery: &delivery3} + attempt3 := testutil.AttemptFactory.Any() + validEntry2 := models.LogEntry{Event: &event3, Attempt: &attempt3} mock3, msg3 := newMockMessage(validEntry2) // Add all messages diff --git a/internal/logstore/drivertest/misc.go b/internal/logstore/drivertest/misc.go index 022b481e..ce65a2f8 100644 --- a/internal/logstore/drivertest/misc.go +++ b/internal/logstore/drivertest/misc.go @@ -54,12 +54,12 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTopic("test.topic"), testutil.EventFactory.WithTime(baseTime.Add(-10*time.Minute)), ) - delivery1 := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("tenant1-delivery"), - testutil.DeliveryFactory.WithEventID(event1.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-10*time.Minute)), + attempt1 := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("tenant1-delivery"), + testutil.AttemptFactory.WithEventID(event1.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(baseTime.Add(-10*time.Minute)), ) event2 := testutil.EventFactory.AnyPointer( @@ -69,23 +69,23 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTopic("test.topic"), testutil.EventFactory.WithTime(baseTime.Add(-5*time.Minute)), ) - delivery2 := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("tenant2-delivery"), - testutil.DeliveryFactory.WithEventID(event2.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-5*time.Minute)), + attempt2 := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("tenant2-delivery"), + testutil.AttemptFactory.WithEventID(event2.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(baseTime.Add(-5*time.Minute)), ) require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{ - {Event: event1, Delivery: delivery1}, - {Event: event2, Delivery: delivery2}, + {Event: event1, Attempt: attempt1}, + {Event: event2, Attempt: attempt2}, })) require.NoError(t, h.FlushWrites(ctx)) t.Run("TenantIsolation", func(t *testing.T) { - t.Run("ListDelivery isolates by tenant", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt isolates by tenant", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenant1ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -94,7 +94,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, require.Len(t, response.Data, 1) assert.Equal(t, "tenant1-event", response.Data[0].Event.ID) - response, err = logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err = logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenant2ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -140,8 +140,8 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.True(t, tenantsSeen[tenant2ID]) }) - t.Run("ListDelivery returns all tenants when TenantID empty", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt returns all tenants when TenantID empty", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: "", DestinationIDs: []string{destinationID}, Limit: 100, @@ -169,18 +169,18 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.Equal(t, tenant2ID, retrieved2.TenantID) }) - t.Run("RetrieveDelivery finds delivery across tenants when TenantID empty", func(t *testing.T) { - retrieved1, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ + t.Run("RetrieveAttempt finds attempt across tenants when TenantID empty", func(t *testing.T) { + retrieved1, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ TenantID: "", - DeliveryID: "tenant1-delivery", + AttemptID: "tenant1-delivery", }) require.NoError(t, err) require.NotNil(t, retrieved1) assert.Equal(t, tenant1ID, retrieved1.Event.TenantID) - retrieved2, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ + retrieved2, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ TenantID: "", - DeliveryID: "tenant2-delivery", + AttemptID: "tenant2-delivery", }) require.NoError(t, err) require.NotNil(t, retrieved2) @@ -203,21 +203,21 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("sort_del_%d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("sort_del_%d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) + entries = append(entries, &models.LogEntry{Event: event, Attempt: attempt}) } require.NoError(t, logStore.InsertMany(ctx, entries)) startTime := baseTime.Add(-48 * time.Hour) t.Run("invalid SortOrder uses default (desc)", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "sideways", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -225,8 +225,8 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, }) require.NoError(t, err) require.Len(t, response.Data, 3) - assert.Equal(t, "sort_del_0", response.Data[0].Delivery.ID) - assert.Equal(t, "sort_del_2", response.Data[2].Delivery.ID) + assert.Equal(t, "sort_del_0", response.Data[0].Attempt.ID) + assert.Equal(t, "sort_del_2", response.Data[2].Attempt.ID) }) }) @@ -240,15 +240,15 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTopic("test.topic"), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("nil DestinationIDs equals empty DestinationIDs", func(t *testing.T) { - responseNil, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + responseNil, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: nil, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -256,7 +256,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, }) require.NoError(t, err) - responseEmpty, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + responseEmpty, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{}, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -296,18 +296,18 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, ) for _, evt := range []*models.Event{eventBefore, eventAt, eventAfter} { - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("del_%s", evt.ID)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(evt.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(evt.Time), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("del_%s", evt.ID)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(evt.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(evt.Time), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: evt, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: evt, Attempt: attempt}})) } t.Run("GTE is inclusive (>=)", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &boundaryTime}, Limit: 10, @@ -318,7 +318,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, t.Run("LTE is inclusive (<=)", func(t *testing.T) { farPast := boundaryTime.Add(-1 * time.Hour) - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &farPast, LTE: &boundaryTime}, Limit: 10, @@ -337,15 +337,15 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTenantID(tenantID), testutil.EventFactory.WithDestinationID(destinationID), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) - t.Run("modifying ListDelivery result doesn't affect subsequent queries", func(t *testing.T) { - response1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("modifying ListAttempt result doesn't affect subsequent queries", func(t *testing.T) { + response1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -356,7 +356,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, originalID := response1.Data[0].Event.ID response1.Data[0].Event.ID = "MODIFIED" - response2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -371,7 +371,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, tenantID := idgen.String() destinationID := idgen.Destination() eventTime := time.Now().Add(-30 * time.Minute).Truncate(time.Second) - deliveryTime := eventTime.Add(1 * time.Second) + attemptTime := eventTime.Add(1 * time.Second) startTime := eventTime.Add(-1 * time.Hour) event := testutil.EventFactory.AnyPointer( @@ -379,14 +379,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - entries := []*models.LogEntry{{Event: event, Delivery: delivery}} + entries := []*models.LogEntry{{Event: event, Attempt: attempt}} // Race N goroutines all inserting the same record const numGoroutines = 10 @@ -402,7 +402,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, require.NoError(t, h.FlushWrites(ctx)) // Assert: still exactly 1 record - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -427,7 +427,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + _, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", Next: tc.cursor, @@ -453,19 +453,19 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("cursor_del_%d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("cursor_del_%d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) } require.NoError(t, h.FlushWrites(ctx)) t.Run("delivery_time desc", func(t *testing.T) { - page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -474,7 +474,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", Next: page1.Next, @@ -486,7 +486,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log }) t.Run("delivery_time asc", func(t *testing.T) { - page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "asc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -495,7 +495,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "asc", Next: page1.Next, diff --git a/internal/logstore/drivertest/pagination.go b/internal/logstore/drivertest/pagination.go index d1bc7f2e..67de5176 100644 --- a/internal/logstore/drivertest/pagination.go +++ b/internal/logstore/drivertest/pagination.go @@ -28,11 +28,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { baseTime := time.Now().Truncate(time.Second) farPast := baseTime.Add(-48 * time.Hour) - t.Run("ListDelivery", func(t *testing.T) { + t.Run("ListAttempt", func(t *testing.T) { var tenantID, destinationID, idPrefix string - suite := paginationtest.Suite[*driver.DeliveryRecord]{ - Name: "ListDelivery", + suite := paginationtest.Suite[*driver.AttemptRecord]{ + Name: "ListAttempt", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -41,9 +41,9 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *driver.DeliveryRecord { + NewItem: func(i int) *driver.AttemptRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := &models.Event{ ID: fmt.Sprintf("%s_evt_%03d", idPrefix, i), @@ -56,32 +56,32 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - return &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + return &driver.AttemptRecord{ + Event: event, + Attempt: attempt, } }, - InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + InsertMany: func(ctx context.Context, items []*driver.AttemptRecord) error { entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} + entries[i] = &models.LogEntry{Event: dr.Event, Attempt: dr.Attempt} } return logStore.InsertMany(ctx, entries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.AttemptRecord], error) { + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: opts.Limit, SortOrder: opts.Order, @@ -90,17 +90,17 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*driver.DeliveryRecord]{}, err + return paginationtest.ListResult[*driver.AttemptRecord]{}, err } - return paginationtest.ListResult[*driver.DeliveryRecord]{ + return paginationtest.ListResult[*driver.AttemptRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(dr *driver.DeliveryRecord) string { - return dr.Delivery.ID + GetID: func(dr *driver.AttemptRecord) string { + return dr.Attempt.ID }, AfterInsert: func(ctx context.Context) error { @@ -111,11 +111,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { suite.Run(t) }) - t.Run("ListDelivery_WithDestinationFilter", func(t *testing.T) { + t.Run("ListAttempt_WithDestinationFilter", func(t *testing.T) { var tenantID, targetDestID, otherDestID, idPrefix string - suite := paginationtest.Suite[*driver.DeliveryRecord]{ - Name: "ListDelivery_WithDestinationFilter", + suite := paginationtest.Suite[*driver.AttemptRecord]{ + Name: "ListAttempt_WithDestinationFilter", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -125,9 +125,9 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *driver.DeliveryRecord { + NewItem: func(i int) *driver.AttemptRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) destID := targetDestID if i%2 == 1 { @@ -145,32 +145,32 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - return &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + return &driver.AttemptRecord{ + Event: event, + Attempt: attempt, } }, - InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + InsertMany: func(ctx context.Context, items []*driver.AttemptRecord) error { entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} + entries[i] = &models.LogEntry{Event: dr.Event, Attempt: dr.Attempt} } return logStore.InsertMany(ctx, entries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.AttemptRecord], error) { + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{targetDestID}, Limit: opts.Limit, @@ -180,21 +180,21 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*driver.DeliveryRecord]{}, err + return paginationtest.ListResult[*driver.AttemptRecord]{}, err } - return paginationtest.ListResult[*driver.DeliveryRecord]{ + return paginationtest.ListResult[*driver.AttemptRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(dr *driver.DeliveryRecord) string { - return dr.Delivery.ID + GetID: func(dr *driver.AttemptRecord) string { + return dr.Attempt.ID }, - Matches: func(dr *driver.DeliveryRecord) bool { - return dr.Delivery.DestinationID == targetDestID + Matches: func(dr *driver.AttemptRecord) bool { + return dr.Attempt.DestinationID == targetDestID }, AfterInsert: func(ctx context.Context) error { @@ -236,16 +236,16 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { InsertMany: func(ctx context.Context, items []*models.Event) error { entries := make([]*models.LogEntry, len(items)) for i, evt := range items { - deliveryTime := evt.Time.Add(100 * time.Millisecond) + attemptTime := evt.Time.Add(100 * time.Millisecond) entries[i] = &models.LogEntry{ Event: evt, - Delivery: &models.Delivery{ + Attempt: &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: evt.TenantID, EventID: evt.ID, DestinationID: evt.DestinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", }, } @@ -321,16 +321,16 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { InsertMany: func(ctx context.Context, items []*models.Event) error { entries := make([]*models.LogEntry, len(items)) for i, evt := range items { - deliveryTime := evt.Time.Add(100 * time.Millisecond) + attemptTime := evt.Time.Add(100 * time.Millisecond) entries[i] = &models.LogEntry{ Event: evt, - Delivery: &models.Delivery{ + Attempt: &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: evt.TenantID, EventID: evt.ID, DestinationID: evt.DestinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", }, } @@ -379,8 +379,8 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // time-based filters (GTE, LTE, GT, LT), which is critical for // "paginate within a time window" use cases. // - // IMPORTANT: ListDelivery filters by DELIVERY time, ListEvent filters by EVENT time. - // In this test, delivery_time = event_time + 100ms. + // IMPORTANT: ListAttempt filters by ATTEMPT time, ListEvent filters by EVENT time. + // In this test, attempt_time = event_time + 100ms. t.Run("TimeFilterWithCursor", func(t *testing.T) { tenantID := idgen.String() destinationID := idgen.Destination() @@ -392,17 +392,17 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // - Events 15-19: far future (should be excluded by LTE filter) // // Event times are spaced 2 minutes apart within the window. - // Delivery times are 1 second after event times (not sub-second) + // Attempt times are 1 second after event times (not sub-second) // to ensure GT/LT tests work consistently across databases. eventWindowStart := baseTime.Add(-10 * time.Minute) eventWindowEnd := baseTime.Add(10 * time.Minute) - // Delivery window accounts for the 1 second offset - deliveryWindowStart := eventWindowStart.Add(time.Second) - deliveryWindowEnd := eventWindowEnd.Add(time.Second) + // Attempt window accounts for the 1 second offset + attemptWindowStart := eventWindowStart.Add(time.Second) + attemptWindowEnd := eventWindowEnd.Add(time.Second) - var allRecords []*driver.DeliveryRecord + var allRecords []*driver.AttemptRecord var allEvents []*models.Event - var allDeliveries []*models.Delivery + var allAttempts []*models.Attempt for i := range 20 { var eventTime time.Time switch { @@ -418,7 +418,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { eventTime = eventWindowEnd.Add(time.Duration(i-14) * time.Hour) } - deliveryTime := eventTime.Add(time.Second) + attemptTime := eventTime.Add(time.Second) event := &models.Event{ ID: fmt.Sprintf("%s_evt_%03d", idPrefix, i), @@ -430,45 +430,45 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Metadata: map[string]string{}, Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - allRecords = append(allRecords, &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + allRecords = append(allRecords, &driver.AttemptRecord{ + Event: event, + Attempt: attempt, }) allEvents = append(allEvents, event) - allDeliveries = append(allDeliveries, delivery) + allAttempts = append(allAttempts, attempt) } entries := make([]*models.LogEntry, len(allEvents)) for i := range allEvents { - entries[i] = &models.LogEntry{Event: allEvents[i], Delivery: allDeliveries[i]} + entries[i] = &models.LogEntry{Event: allEvents[i], Attempt: allAttempts[i]} } require.NoError(t, logStore.InsertMany(ctx, entries)) require.NoError(t, h.FlushWrites(ctx)) t.Run("paginate within time-bounded window", func(t *testing.T) { - // Paginate through deliveries within the window with limit=3 - // ListDelivery filters by DELIVERY time, not event time. - // Should only see deliveries 5-14 (10 total), not 0-4 or 15-19 + // Paginate through attempts within the window with limit=3 + // ListAttempt filters by ATTEMPT time, not event time. + // Should only see attempts 5-14 (10 total), not 0-4 or 15-19 var collectedIDs []string var nextCursor string pageCount := 0 for { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Next: nextCursor, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) @@ -488,18 +488,18 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { } } - // Should have collected exactly deliveries 5-14 - require.Len(t, collectedIDs, 10, "should have 10 deliveries in window") + // Should have collected exactly attempts 5-14 + require.Len(t, collectedIDs, 10, "should have 10 attempts in window") for i, id := range collectedIDs { expectedID := fmt.Sprintf("%s_evt_%03d", idPrefix, i+5) - require.Equal(t, expectedID, id, "delivery %d mismatch", i) + require.Equal(t, expectedID, id, "attempt %d mismatch", i) } require.Equal(t, 4, pageCount, "should take 4 pages (3+3+3+1)") }) - t.Run("cursor excludes deliveries outside time filter", func(t *testing.T) { - // First page with no time filter gets all deliveries - resAll, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("cursor excludes attempts outside time filter", func(t *testing.T) { + // First page with no time filter gets all attempts + resAll, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", @@ -509,35 +509,35 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { require.Len(t, resAll.Data, 5) // Use the cursor but add a time filter that excludes some results - // The cursor points to position after delivery 4 (far past deliveries) - // But with deliveryWindowStart filter, we should start from delivery 5 - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // The cursor points to position after attempt 4 (far past attempts) + // But with attemptWindowStart filter, we should start from attempt 5 + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", Next: resAll.Next, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) - // Results should respect the time filter (on delivery time) + // Results should respect the time filter (on attempt time) for _, dr := range res.Data { - require.True(t, !dr.Delivery.Time.Before(deliveryWindowStart), "delivery time should be >= deliveryWindowStart") - require.True(t, !dr.Delivery.Time.After(deliveryWindowEnd), "delivery time should be <= deliveryWindowEnd") + require.True(t, !dr.Attempt.Time.Before(attemptWindowStart), "attempt time should be >= attemptWindowStart") + require.True(t, !dr.Attempt.Time.After(attemptWindowEnd), "attempt time should be <= attemptWindowEnd") } }) - t.Run("delivery time filter with GT/LT operators", func(t *testing.T) { - // Test exclusive bounds (GT/LT instead of GTE/LTE) on delivery time - // Use delivery times slightly after delivery 5 and slightly before delivery 14 - gtTime := allRecords[5].Delivery.Time.Add(time.Second) // After delivery 5, before delivery 6 - ltTime := allRecords[14].Delivery.Time.Add(-time.Second) // Before delivery 14, after delivery 13 + t.Run("attempt time filter with GT/LT operators", func(t *testing.T) { + // Test exclusive bounds (GT/LT instead of GTE/LTE) on attempt time + // Use attempt times slightly after attempt 5 and slightly before attempt 14 + gtTime := allRecords[5].Attempt.Time.Add(time.Second) // After attempt 5, before attempt 6 + ltTime := allRecords[14].Attempt.Time.Add(-time.Second) // Before attempt 14, after attempt 13 var collectedIDs []string var nextCursor string for { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -572,10 +572,10 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // comparison across databases with different timestamp precision // (PostgreSQL microseconds, ClickHouse DateTime64, etc.). // - // Important: ListDelivery filters by DELIVERY time, not event time. + // Important: ListAttempt filters by ATTEMPT time, not event time. - // First, retrieve all deliveries to find delivery 10's time - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // First, retrieve all attempts to find attempt 10's time + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -584,90 +584,90 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, }) require.NoError(t, err) - require.GreaterOrEqual(t, len(res.Data), 11, "need at least 11 deliveries") + require.GreaterOrEqual(t, len(res.Data), 11, "need at least 11 attempts") - // Find delivery 10's stored delivery time, truncated to seconds - var storedDelivery10Time time.Time + // Find attempt 10's stored attempt time, truncated to seconds + var storedAttempt10Time time.Time for _, dr := range res.Data { if dr.Event.ID == allRecords[10].Event.ID { - storedDelivery10Time = dr.Delivery.Time.Truncate(time.Second) + storedAttempt10Time = dr.Attempt.Time.Truncate(time.Second) break } } - require.False(t, storedDelivery10Time.IsZero(), "should find delivery 10") + require.False(t, storedAttempt10Time.IsZero(), "should find attempt 10") - // GT with exact time should exclude delivery 10 - resGT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // GT with exact time should exclude attempt 10 + resGT, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GT: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{GT: &storedAttempt10Time}, }) require.NoError(t, err) for _, dr := range resGT.Data { - drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) - require.True(t, drTimeTrunc.After(storedDelivery10Time), - "GT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) + drTimeTrunc := dr.Attempt.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.After(storedAttempt10Time), + "GT filter should exclude attempt with exact timestamp, got attempt %s with time %v (filter time: %v)", + dr.Attempt.ID, drTimeTrunc, storedAttempt10Time) } - // LT with exact time should exclude delivery 10 - resLT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // LT with exact time should exclude attempt 10 + resLT, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{LT: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{LT: &storedAttempt10Time}, }) require.NoError(t, err) for _, dr := range resLT.Data { - drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) - require.True(t, drTimeTrunc.Before(storedDelivery10Time), - "LT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) + drTimeTrunc := dr.Attempt.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.Before(storedAttempt10Time), + "LT filter should exclude attempt with exact timestamp, got attempt %s with time %v (filter time: %v)", + dr.Attempt.ID, drTimeTrunc, storedAttempt10Time) } - // Verify delivery 10 is included with GTE/LTE (inclusive bounds) - resGTE, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // Verify attempt 10 is included with GTE/LTE (inclusive bounds) + resGTE, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GTE: &storedDelivery10Time, LTE: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{GTE: &storedAttempt10Time, LTE: &storedAttempt10Time}, }) require.NoError(t, err) - require.GreaterOrEqual(t, len(resGTE.Data), 1, "GTE/LTE with same time should include delivery at that second") + require.GreaterOrEqual(t, len(resGTE.Data), 1, "GTE/LTE with same time should include attempt at that second") }) t.Run("prev cursor respects time filter", func(t *testing.T) { - // Get first page (ListDelivery filters by delivery time) - res1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // Get first page (ListAttempt filters by attempt time) + res1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) require.NotEmpty(t, res1.Next) // Get second page - res2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Next: res1.Next, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) require.NotEmpty(t, res2.Prev) // Go back to first page using prev cursor - resPrev, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + resPrev, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Prev: res2.Prev, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) From 78f3ed50bc797d6179ddf3160d82e9fef327f1a9 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 03:02:07 +0700 Subject: [PATCH 15/23] =?UTF-8?q?chore:=20add=20database=20migrations=20fo?= =?UTF-8?q?r=20Delivery=20=E2=86=92=20Attempt=20rename?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Claude Opus 4.5 --- cmd/e2e/log_test.go | 146 +++++++++--------- cmd/e2e/suites_test.go | 8 +- internal/logmq/batchprocessor_test.go | 6 +- .../logstore/chlogstore/chlogstore_test.go | 6 +- internal/logstore/drivertest/misc.go | 4 +- .../clickhouse/000001_init.down.sql | 2 +- .../migrations/clickhouse/000001_init.up.sql | 20 +-- .../migrations/postgres/000001_init.down.sql | 2 +- .../migrations/postgres/000001_init.up.sql | 2 +- .../000002_delivery_response.down.sql | 2 +- .../postgres/000002_delivery_response.up.sql | 2 +- .../000003_event_delivery_index.down.sql | 2 +- .../000003_event_delivery_index.up.sql | 2 +- ...000005_rename_delivery_to_attempt.down.sql | 48 ++++++ .../000005_rename_delivery_to_attempt.up.sql | 47 ++++++ internal/migrator/migrator_test.go | 12 +- 16 files changed, 203 insertions(+), 108 deletions(-) create mode 100644 internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql create mode 100644 internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql diff --git a/cmd/e2e/log_test.go b/cmd/e2e/log_test.go index 5dc72343..7785343e 100644 --- a/cmd/e2e/log_test.go +++ b/cmd/e2e/log_test.go @@ -18,14 +18,14 @@ func parseTime(s string) time.Time { return t } -// TestLogAPI tests the Log API endpoints (deliveries, events). +// TestLogAPI tests the Log API endpoints (attempts, events). // // Setup: // 1. Create a tenant and destination // 2. Publish 10 events with small delays for distinct timestamps // // Test Groups: -// - deliveries: list, filter, expand +// - attempts: list, filter, expand // - events: list, filter, retrieve // - sort_order: sort by time ascending/descending // - pagination: paginate through results @@ -111,17 +111,17 @@ func (suite *basicSuite) TestLogAPI() { suite.Require().Equal(http.StatusAccepted, resp.StatusCode, "failed to publish event %d", i) } - // Wait for all deliveries (30s timeout for slow CI environments) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries", 10, 10*time.Second) + // Wait for all attempts (30s timeout for slow CI environments) + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts", 10, 10*time.Second) // ========================================================================= - // Deliveries Tests + // Attempts Tests // ========================================================================= - suite.Run("deliveries", func() { + suite.Run("attempts", func() { suite.Run("list all", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries", + Path: "/tenants/" + tenantID + "/attempts", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -142,7 +142,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("filter by destination_id", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?destination_id=" + destinationID, + Path: "/tenants/" + tenantID + "/attempts?destination_id=" + destinationID, })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -155,7 +155,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("filter by event_id", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventIDs[0], + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventIDs[0], })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -168,7 +168,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=event returns event object without data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=event&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=event&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -177,8 +177,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.NotEmpty(event["id"]) suite.NotEmpty(event["topic"]) suite.NotEmpty(event["time"]) @@ -188,7 +188,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=event.data returns event object with data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=event.data&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=event.data&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -197,8 +197,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.NotEmpty(event["id"]) suite.NotNil(event["data"]) // include=event.data SHOULD include data }) @@ -206,7 +206,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=response_data returns response data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=response_data&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=response_data&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -215,8 +215,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - suite.NotNil(delivery["response_data"]) + attempt := models[0].(map[string]interface{}) + suite.NotNil(attempt["response_data"]) }) }) @@ -508,14 +508,14 @@ func (suite *basicSuite) TestLogAPI() { // 2. Configure mock webhook server to FAIL (return 500) // 3. Create a destination pointing to the mock server // 4. Publish an event with eligible_for_retry=false (fails once, no auto-retry) -// 5. Wait for delivery to fail, then fetch the delivery ID +// 5. Wait for attempt to fail, then fetch the attempt ID // 6. Update mock server to SUCCEED (return 200) // // Test Cases: -// - POST /:tenantID/deliveries/:deliveryID/retry - Successful retry returns 202 Accepted -// - POST /:tenantID/deliveries/:deliveryID/retry (non-existent) - Returns 404 -// - Verify retry created new delivery - Event now has 2+ deliveries -// - POST /:tenantID/deliveries/:deliveryID/retry (disabled destination) - Returns 400 +// - POST /:tenantID/attempts/:attemptID/retry - Successful retry returns 202 Accepted +// - POST /:tenantID/attempts/:attemptID/retry (non-existent) - Returns 404 +// - Verify retry created new attempt - Event now has 2+ attempts +// - POST /:tenantID/attempts/:attemptID/retry (disabled destination) - Returns 400 func (suite *basicSuite) TestRetryAPI() { tenantID := idgen.String() destinationID := idgen.Destination() @@ -548,7 +548,7 @@ func (suite *basicSuite) TestRetryAPI() { "url": fmt.Sprintf("%s/webhook/%s", suite.mockServerBaseURL, destinationID), }, "response": map[string]interface{}{ - "status": 500, // Fail deliveries + "status": 500, // Fail attempts }, }, }, @@ -602,22 +602,22 @@ func (suite *basicSuite) TestRetryAPI() { } suite.RunAPITests(suite.T(), setupTests) - // Wait for delivery to complete (and fail) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries?event_id="+eventID, 1, 5*time.Second) + // Wait for attempt to complete (and fail) + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts?event_id="+eventID, 1, 5*time.Second) - // Get the delivery ID - deliveriesResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ + // Get the attempt ID + attemptsResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventID, + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventID, })) suite.Require().NoError(err) - suite.Require().Equal(http.StatusOK, deliveriesResp.StatusCode) + suite.Require().Equal(http.StatusOK, attemptsResp.StatusCode) - body := deliveriesResp.Body.(map[string]interface{}) + body := attemptsResp.Body.(map[string]interface{}) models := body["models"].([]interface{}) - suite.Require().NotEmpty(models, "should have at least one delivery") - firstDelivery := models[0].(map[string]interface{}) - deliveryID := firstDelivery["id"].(string) + suite.Require().NotEmpty(models, "should have at least one attempt") + firstAttempt := models[0].(map[string]interface{}) + attemptID := firstAttempt["id"].(string) // Update mock to succeed for retry updateMockTests := []APITest{ @@ -649,12 +649,12 @@ func (suite *basicSuite) TestRetryAPI() { // Test retry endpoint retryTests := []APITest{ - // POST /:tenantID/deliveries/:deliveryID/retry - successful retry + // POST /:tenantID/attempts/:attemptID/retry - successful retry { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - retry delivery", + Name: "POST /:tenantID/attempts/:attemptID/retry - retry attempt", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + deliveryID + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + attemptID + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -665,12 +665,12 @@ func (suite *basicSuite) TestRetryAPI() { }, }, }, - // POST /:tenantID/deliveries/:deliveryID/retry - non-existent delivery + // POST /:tenantID/attempts/:attemptID/retry - non-existent attempt { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - not found", + Name: "POST /:tenantID/attempts/:attemptID/retry - not found", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + idgen.Delivery() + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + idgen.Attempt() + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -681,16 +681,16 @@ func (suite *basicSuite) TestRetryAPI() { } suite.RunAPITests(suite.T(), retryTests) - // Wait for retry delivery to complete - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries?event_id="+eventID, 2, 5*time.Second) + // Wait for retry attempt to complete + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts?event_id="+eventID, 2, 5*time.Second) - // Verify we have more deliveries after retry + // Verify we have more attempts after retry verifyTests := []APITest{ { - Name: "GET /:tenantID/deliveries?event_id=X - verify retry created new delivery", + Name: "GET /:tenantID/attempts?event_id=X - verify retry created new attempt", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventID, + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventID, }), Expected: APITestExpectation{ Validate: map[string]interface{}{ @@ -728,10 +728,10 @@ func (suite *basicSuite) TestRetryAPI() { }, }, { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - disabled destination", + Name: "POST /:tenantID/attempts/:attemptID/retry - disabled destination", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + deliveryID + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + attemptID + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -776,24 +776,24 @@ func (suite *basicSuite) TestRetryAPI() { suite.RunAPITests(suite.T(), cleanupTests) } -// TestAdminLogEndpoints tests the admin-only /events and /deliveries endpoints. +// TestAdminLogEndpoints tests the admin-only /events and /attempts endpoints. // // These endpoints allow cross-tenant queries with optional tenant_id filter. // // Setup: // 1. Create two tenants with destinations // 2. Publish events to each tenant -// 3. Wait for deliveries to complete +// 3. Wait for attempts to complete // // Test Cases: // - GET /events without auth returns 401 -// - GET /deliveries without auth returns 401 +// - GET /attempts without auth returns 401 // - GET /events with JWT returns 401 (admin-only) -// - GET /deliveries with JWT returns 401 (admin-only) +// - GET /attempts with JWT returns 401 (admin-only) // - GET /events with admin key returns all events (cross-tenant) -// - GET /deliveries with admin key returns all deliveries (cross-tenant) +// - GET /attempts with admin key returns all attempts (cross-tenant) // - GET /events?tenant_id=X filters to single tenant -// - GET /deliveries?tenant_id=X filters to single tenant +// - GET /attempts?tenant_id=X filters to single tenant func (suite *basicSuite) TestAdminLogEndpoints() { tenant1ID := idgen.String() tenant2ID := idgen.String() @@ -931,9 +931,9 @@ func (suite *basicSuite) TestAdminLogEndpoints() { } suite.RunAPITests(suite.T(), setupTests) - // Wait for deliveries for both tenants - suite.waitForDeliveries(suite.T(), "/tenants/"+tenant1ID+"/deliveries", 1, 5*time.Second) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenant2ID+"/deliveries", 1, 5*time.Second) + // Wait for attempts for both tenants + suite.waitForAttempts(suite.T(), "/tenants/"+tenant1ID+"/attempts", 1, 5*time.Second) + suite.waitForAttempts(suite.T(), "/tenants/"+tenant2ID+"/attempts", 1, 5*time.Second) // Get JWT token for tenant1 to test that JWT auth is rejected on admin endpoints tokenResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ @@ -959,10 +959,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(http.StatusUnauthorized, resp.StatusCode) }) - suite.Run("GET /deliveries without auth returns 401", func() { + suite.Run("GET /attempts without auth returns 401", func() { resp, err := suite.client.Do(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries", + Path: "/attempts", }) suite.Require().NoError(err) suite.Equal(http.StatusUnauthorized, resp.StatusCode) @@ -977,10 +977,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(http.StatusUnauthorized, resp.StatusCode) }) - suite.Run("GET /deliveries with JWT returns 401 (admin-only)", func() { + suite.Run("GET /attempts with JWT returns 401 (admin-only)", func() { resp, err := suite.client.Do(suite.AuthJWTRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries", + Path: "/attempts", }, jwtToken)) suite.Require().NoError(err) suite.Equal(http.StatusUnauthorized, resp.StatusCode) @@ -1016,31 +1016,31 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.True(eventsSeen[event2ID], "should include tenant2 event") }) - suite.Run("GET /deliveries returns deliveries from all tenants", func() { + suite.Run("GET /attempts returns attempts from all tenants", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries?include=event", + Path: "/attempts?include=event", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) body := resp.Body.(map[string]interface{}) models := body["models"].([]interface{}) - // Should have at least 2 deliveries (one from each tenant we created) + // Should have at least 2 attempts (one from each tenant we created) suite.GreaterOrEqual(len(models), 2) - // Verify we have deliveries from both tenants by checking event IDs + // Verify we have attempts from both tenants by checking event IDs eventsSeen := map[string]bool{} for _, item := range models { - delivery := item.(map[string]interface{}) - if event, ok := delivery["event"].(map[string]interface{}); ok { + attempt := item.(map[string]interface{}) + if event, ok := attempt["event"].(map[string]interface{}); ok { if id, ok := event["id"].(string); ok { eventsSeen[id] = true } } } - suite.True(eventsSeen[event1ID], "should include tenant1 delivery") - suite.True(eventsSeen[event2ID], "should include tenant2 delivery") + suite.True(eventsSeen[event1ID], "should include tenant1 attempt") + suite.True(eventsSeen[event2ID], "should include tenant2 attempt") }) }) @@ -1065,10 +1065,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(event1ID, event["id"]) }) - suite.Run("GET /deliveries?tenant_id=X filters to single tenant", func() { + suite.Run("GET /attempts?tenant_id=X filters to single tenant", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries?tenant_id=" + tenant2ID + "&include=event", + Path: "/attempts?tenant_id=" + tenant2ID + "&include=event", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -1077,9 +1077,9 @@ func (suite *basicSuite) TestAdminLogEndpoints() { models := body["models"].([]interface{}) suite.Len(models, 1) - // Verify only tenant2 delivery by event ID - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + // Verify only tenant2 attempt by event ID + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.Equal(event2ID, event["id"]) }) }) diff --git a/cmd/e2e/suites_test.go b/cmd/e2e/suites_test.go index 48321bf4..f2531e19 100644 --- a/cmd/e2e/suites_test.go +++ b/cmd/e2e/suites_test.go @@ -41,8 +41,8 @@ func waitForHealthy(t *testing.T, port int, timeout time.Duration) { t.Fatalf("timed out waiting for health check at %s", healthURL) } -// waitForDeliveries polls until at least minCount deliveries exist for the given path. -func (s *e2eSuite) waitForDeliveries(t *testing.T, path string, minCount int, timeout time.Duration) { +// waitForAttempts polls until at least minCount attempts exist for the given path. +func (s *e2eSuite) waitForAttempts(t *testing.T, path string, minCount int, timeout time.Duration) { t.Helper() deadline := time.Now().Add(timeout) var lastCount int @@ -72,9 +72,9 @@ func (s *e2eSuite) waitForDeliveries(t *testing.T, path string, minCount int, ti time.Sleep(100 * time.Millisecond) } if lastErr != nil { - t.Fatalf("timed out waiting for %d deliveries at %s: last error: %v", minCount, path, lastErr) + t.Fatalf("timed out waiting for %d attempts at %s: last error: %v", minCount, path, lastErr) } - t.Fatalf("timed out waiting for %d deliveries at %s: got %d (status %d)", minCount, path, lastCount, lastStatus) + t.Fatalf("timed out waiting for %d attempts at %s: got %d (status %d)", minCount, path, lastCount, lastStatus) } // waitForDestinationDisabled polls until the destination has disabled_at set (non-null). diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go index 01c50510..c7a23ee6 100644 --- a/internal/logmq/batchprocessor_test.go +++ b/internal/logmq/batchprocessor_test.go @@ -86,7 +86,7 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { event := testutil.EventFactory.Any() attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ - Event: &event, + Event: &event, Attempt: &attempt, } @@ -119,7 +119,7 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ - Event: nil, // Missing event + Event: nil, // Missing event Attempt: &attempt, } @@ -152,7 +152,7 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { event := testutil.EventFactory.Any() entry := models.LogEntry{ - Event: &event, + Event: &event, Attempt: nil, // Missing delivery } diff --git a/internal/logstore/chlogstore/chlogstore_test.go b/internal/logstore/chlogstore/chlogstore_test.go index 4eaad64d..827c35c1 100644 --- a/internal/logstore/chlogstore/chlogstore_test.go +++ b/internal/logstore/chlogstore/chlogstore_test.go @@ -77,15 +77,15 @@ func (h *harness) Close() { func (h *harness) FlushWrites(ctx context.Context) error { // Force ClickHouse to merge parts and deduplicate rows on both tables eventsTable := "events" - deliveriesTable := "deliveries" + attemptsTable := "attempts" if h.deploymentID != "" { eventsTable = h.deploymentID + "_events" - deliveriesTable = h.deploymentID + "_deliveries" + attemptsTable = h.deploymentID + "_attempts" } if err := h.chDB.Exec(ctx, "OPTIMIZE TABLE "+eventsTable+" FINAL"); err != nil { return err } - return h.chDB.Exec(ctx, "OPTIMIZE TABLE "+deliveriesTable+" FINAL") + return h.chDB.Exec(ctx, "OPTIMIZE TABLE "+attemptsTable+" FINAL") } func (h *harness) MakeDriver(ctx context.Context) (driver.LogStore, error) { diff --git a/internal/logstore/drivertest/misc.go b/internal/logstore/drivertest/misc.go index ce65a2f8..b9dc9cbe 100644 --- a/internal/logstore/drivertest/misc.go +++ b/internal/logstore/drivertest/misc.go @@ -171,7 +171,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, t.Run("RetrieveAttempt finds attempt across tenants when TenantID empty", func(t *testing.T) { retrieved1, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ - TenantID: "", + TenantID: "", AttemptID: "tenant1-delivery", }) require.NoError(t, err) @@ -179,7 +179,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.Equal(t, tenant1ID, retrieved1.Event.TenantID) retrieved2, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ - TenantID: "", + TenantID: "", AttemptID: "tenant2-delivery", }) require.NoError(t, err) diff --git a/internal/migrator/migrations/clickhouse/000001_init.down.sql b/internal/migrator/migrations/clickhouse/000001_init.down.sql index d0465d4e..70b20422 100644 --- a/internal/migrator/migrations/clickhouse/000001_init.down.sql +++ b/internal/migrator/migrations/clickhouse/000001_init.down.sql @@ -1,2 +1,2 @@ -DROP TABLE IF EXISTS {deployment_prefix}deliveries; +DROP TABLE IF EXISTS {deployment_prefix}attempts; DROP TABLE IF EXISTS {deployment_prefix}events; diff --git a/internal/migrator/migrations/clickhouse/000001_init.up.sql b/internal/migrator/migrations/clickhouse/000001_init.up.sql index 1e44f64f..ac077085 100644 --- a/internal/migrator/migrations/clickhouse/000001_init.up.sql +++ b/internal/migrator/migrations/clickhouse/000001_init.up.sql @@ -21,11 +21,11 @@ CREATE TABLE IF NOT EXISTS {deployment_prefix}events ( PARTITION BY toYYYYMM(event_time) ORDER BY (event_time, event_id); --- Deliveries table for delivery queries --- Each row represents a delivery attempt for an event +-- Attempts table for attempt queries +-- Each row represents an attempt for an event -- Stateless queries: no GROUP BY, no aggregation, direct row access -CREATE TABLE IF NOT EXISTS {deployment_prefix}deliveries ( +CREATE TABLE IF NOT EXISTS {deployment_prefix}attempts ( -- Event fields event_id String, tenant_id String, @@ -36,22 +36,22 @@ CREATE TABLE IF NOT EXISTS {deployment_prefix}deliveries ( metadata String, -- JSON serialized data String, -- JSON serialized - -- Delivery fields - delivery_id String, + -- Attempt fields + attempt_id String, status String, -- 'success', 'failed' - delivery_time DateTime64(3), + attempt_time DateTime64(3), code String, response_data String, -- JSON serialized manual Bool DEFAULT false, - attempt UInt32 DEFAULT 0, + attempt_number UInt32 DEFAULT 0, -- Indexes for filtering (bloom filters help skip granules) INDEX idx_tenant_id tenant_id TYPE bloom_filter GRANULARITY 1, INDEX idx_destination_id destination_id TYPE bloom_filter GRANULARITY 1, INDEX idx_event_id event_id TYPE bloom_filter GRANULARITY 1, - INDEX idx_delivery_id delivery_id TYPE bloom_filter GRANULARITY 1, + INDEX idx_attempt_id attempt_id TYPE bloom_filter GRANULARITY 1, INDEX idx_topic topic TYPE bloom_filter GRANULARITY 1, INDEX idx_status status TYPE set(100) GRANULARITY 1 ) ENGINE = ReplacingMergeTree -PARTITION BY toYYYYMM(delivery_time) -ORDER BY (delivery_time, delivery_id); +PARTITION BY toYYYYMM(attempt_time) +ORDER BY (attempt_time, attempt_id); diff --git a/internal/migrator/migrations/postgres/000001_init.down.sql b/internal/migrator/migrations/postgres/000001_init.down.sql index f55d529e..e0dca179 100644 --- a/internal/migrator/migrations/postgres/000001_init.down.sql +++ b/internal/migrator/migrations/postgres/000001_init.down.sql @@ -3,4 +3,4 @@ BEGIN; DROP TABLE IF EXISTS events CASCADE; DROP TABLE IF EXISTS deliveries CASCADE; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000001_init.up.sql b/internal/migrator/migrations/postgres/000001_init.up.sql index f9e5234c..884dc93b 100644 --- a/internal/migrator/migrations/postgres/000001_init.up.sql +++ b/internal/migrator/migrations/postgres/000001_init.up.sql @@ -43,4 +43,4 @@ CREATE TABLE deliveries_default PARTITION OF deliveries DEFAULT; CREATE INDEX ON deliveries (event_id); CREATE INDEX ON deliveries (event_id, status); -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000002_delivery_response.down.sql b/internal/migrator/migrations/postgres/000002_delivery_response.down.sql index 0ecc7d93..4699a411 100644 --- a/internal/migrator/migrations/postgres/000002_delivery_response.down.sql +++ b/internal/migrator/migrations/postgres/000002_delivery_response.down.sql @@ -3,4 +3,4 @@ BEGIN; ALTER TABLE deliveries DROP COLUMN code, DROP COLUMN response_data; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000002_delivery_response.up.sql b/internal/migrator/migrations/postgres/000002_delivery_response.up.sql index 24e0d728..7a224676 100644 --- a/internal/migrator/migrations/postgres/000002_delivery_response.up.sql +++ b/internal/migrator/migrations/postgres/000002_delivery_response.up.sql @@ -4,4 +4,4 @@ ALTER TABLE deliveries ADD COLUMN code TEXT, ADD COLUMN response_data JSONB; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql b/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql index 93623032..2218227c 100644 --- a/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql +++ b/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql @@ -3,4 +3,4 @@ BEGIN; DROP TABLE IF EXISTS event_delivery_index_default; DROP TABLE IF EXISTS event_delivery_index CASCADE; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql b/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql index 23fc772f..bba2ad01 100644 --- a/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql +++ b/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql @@ -50,4 +50,4 @@ CREATE INDEX IF NOT EXISTS idx_event_delivery_index_main ON event_delivery_index time_delivery_id ); -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql new file mode 100644 index 00000000..21f8575d --- /dev/null +++ b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql @@ -0,0 +1,48 @@ +BEGIN; + +-- Drop new index and restore old one +DROP INDEX IF EXISTS idx_event_attempt_index_main; + +-- Restore generated column with old name +ALTER TABLE event_attempt_index DROP COLUMN time_attempt_id; +ALTER TABLE event_attempt_index ADD COLUMN time_delivery_id text GENERATED ALWAYS AS ( + LPAD( + CAST( + EXTRACT( + EPOCH + FROM attempt_time AT TIME ZONE 'UTC' + ) AS BIGINT + )::text, + 10, + '0' + ) || '_' || attempt_id +) STORED; + +-- Rename columns back in event_attempt_index +ALTER TABLE event_attempt_index RENAME COLUMN attempt_number TO attempt; +ALTER TABLE event_attempt_index RENAME COLUMN attempt_time TO delivery_time; +ALTER TABLE event_attempt_index RENAME COLUMN attempt_id TO delivery_id; + +-- Rename tables back +ALTER TABLE event_attempt_index RENAME TO event_delivery_index; +ALTER TABLE event_attempt_index_default RENAME TO event_delivery_index_default; + +-- Rename column back in attempts: attempt_number -> attempt +ALTER TABLE attempts RENAME COLUMN attempt_number TO attempt; + +ALTER TABLE attempts RENAME TO deliveries; +ALTER TABLE attempts_default RENAME TO deliveries_default; + +-- Recreate old index +CREATE INDEX IF NOT EXISTS idx_event_delivery_index_main ON event_delivery_index( + tenant_id, + destination_id, + topic, + status, + event_time DESC, + delivery_time DESC, + time_event_id, + time_delivery_id +); + +COMMIT; diff --git a/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql new file mode 100644 index 00000000..ea143623 --- /dev/null +++ b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql @@ -0,0 +1,47 @@ +BEGIN; + +-- Rename deliveries table to attempts +ALTER TABLE deliveries RENAME TO attempts; +ALTER TABLE deliveries_default RENAME TO attempts_default; + +-- Rename column in attempts: attempt -> attempt_number +ALTER TABLE attempts RENAME COLUMN attempt TO attempt_number; + +-- Rename event_delivery_index table to event_attempt_index +ALTER TABLE event_delivery_index RENAME TO event_attempt_index; +ALTER TABLE event_delivery_index_default RENAME TO event_attempt_index_default; + +-- Rename columns in event_attempt_index +ALTER TABLE event_attempt_index RENAME COLUMN delivery_id TO attempt_id; +ALTER TABLE event_attempt_index RENAME COLUMN delivery_time TO attempt_time; +ALTER TABLE event_attempt_index RENAME COLUMN attempt TO attempt_number; + +-- Drop and recreate generated column with new name +ALTER TABLE event_attempt_index DROP COLUMN time_delivery_id; +ALTER TABLE event_attempt_index ADD COLUMN time_attempt_id text GENERATED ALWAYS AS ( + LPAD( + CAST( + EXTRACT( + EPOCH + FROM attempt_time AT TIME ZONE 'UTC' + ) AS BIGINT + )::text, + 10, + '0' + ) || '_' || attempt_id +) STORED; + +-- Drop old index and create new one with updated column names +DROP INDEX IF EXISTS idx_event_delivery_index_main; +CREATE INDEX IF NOT EXISTS idx_event_attempt_index_main ON event_attempt_index( + tenant_id, + destination_id, + topic, + status, + event_time DESC, + attempt_time DESC, + time_event_id, + time_attempt_id +); + +COMMIT; diff --git a/internal/migrator/migrator_test.go b/internal/migrator/migrator_test.go index 51762911..8edeb651 100644 --- a/internal/migrator/migrator_test.go +++ b/internal/migrator/migrator_test.go @@ -342,9 +342,9 @@ func TestMigrator_DeploymentID_TableNaming(t *testing.T) { assert.Equal(t, uint64(1), count, "testdeploy_events table should exist") err = chDB.QueryRow(ctx, "SELECT count() FROM system.tables WHERE database = ? AND name = ?", - chConfig.Database, "testdeploy_deliveries").Scan(&count) + chConfig.Database, "testdeploy_attempts").Scan(&count) require.NoError(t, err) - assert.Equal(t, uint64(1), count, "testdeploy_deliveries table should exist") + assert.Equal(t, uint64(1), count, "testdeploy_attempts table should exist") } // TestMigrator_DeploymentID_Isolation tests that multiple deployments are isolated. @@ -390,8 +390,8 @@ func TestMigrator_DeploymentID_Isolation(t *testing.T) { defer chDB.Close() tables := []string{ - "deploy_a_events", "deploy_a_deliveries", - "deploy_b_events", "deploy_b_deliveries", + "deploy_a_events", "deploy_a_attempts", + "deploy_b_events", "deploy_b_attempts", } for _, table := range tables { var count uint64 @@ -466,9 +466,9 @@ func TestMigrator_NoDeploymentID_DefaultTables(t *testing.T) { assert.Equal(t, uint64(1), count, "events table should exist") err = chDB.QueryRow(ctx, "SELECT count() FROM system.tables WHERE database = ? AND name = ?", - chConfig.Database, "deliveries").Scan(&count) + chConfig.Database, "attempts").Scan(&count) require.NoError(t, err) - assert.Equal(t, uint64(1), count, "deliveries table should exist") + assert.Equal(t, uint64(1), count, "attempts table should exist") } func setupClickHouseConfig(t *testing.T) clickhouse.ClickHouseConfig { From 2ef43d2bdc7200b00ed8bba43e9acc7f4b0a60c4 Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 03:07:46 +0700 Subject: [PATCH 16/23] docs: rename Delivery to Attempt in OpenAPI spec Co-Authored-By: Claude Opus 4.5 --- docs/apis/openapi.yaml | 272 ++++++++++++++++++++--------------------- 1 file changed, 136 insertions(+), 136 deletions(-) diff --git a/docs/apis/openapi.yaml b/docs/apis/openapi.yaml index 66b2165f..f657ce26 100644 --- a/docs/apis/openapi.yaml +++ b/docs/apis/openapi.yaml @@ -470,12 +470,12 @@ components: $ref: "#/components/schemas/WebhookConfig" credentials: $ref: "#/components/schemas/WebhookCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -557,12 +557,12 @@ components: $ref: "#/components/schemas/AWSSQSConfig" credentials: $ref: "#/components/schemas/AWSSQSCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -644,12 +644,12 @@ components: $ref: "#/components/schemas/RabbitMQConfig" credentials: $ref: "#/components/schemas/RabbitMQCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -721,12 +721,12 @@ components: config: {} # Empty config credentials: $ref: "#/components/schemas/HookdeckCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -805,12 +805,12 @@ components: $ref: "#/components/schemas/AWSKinesisConfig" credentials: $ref: "#/components/schemas/AWSKinesisCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -882,12 +882,12 @@ components: $ref: "#/components/schemas/AzureServiceBusConfig" credentials: $ref: "#/components/schemas/AzureServiceBusCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -968,12 +968,12 @@ components: $ref: "#/components/schemas/AWSS3Config" credentials: $ref: "#/components/schemas/AWSS3Credentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1055,12 +1055,12 @@ components: $ref: "#/components/schemas/GCPPubSubConfig" credentials: $ref: "#/components/schemas/GCPPubSubCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1138,12 +1138,12 @@ components: credentials: # Secret is optional on create for admin, forbidden for tenant $ref: "#/components/schemas/WebhookCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1172,12 +1172,12 @@ components: $ref: "#/components/schemas/AWSSQSConfig" credentials: $ref: "#/components/schemas/AWSSQSCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1206,12 +1206,12 @@ components: $ref: "#/components/schemas/RabbitMQConfig" credentials: $ref: "#/components/schemas/RabbitMQCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1239,12 +1239,12 @@ components: config: {} credentials: $ref: "#/components/schemas/HookdeckCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1273,12 +1273,12 @@ components: $ref: "#/components/schemas/AWSKinesisConfig" credentials: $ref: "#/components/schemas/AWSKinesisCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1308,12 +1308,12 @@ components: $ref: "#/components/schemas/AzureServiceBusConfig" credentials: $ref: "#/components/schemas/AzureServiceBusCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1343,12 +1343,12 @@ components: $ref: "#/components/schemas/AWSS3Config" credentials: $ref: "#/components/schemas/AWSS3Credentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1377,12 +1377,12 @@ components: $ref: "#/components/schemas/GCPPubSubConfig" credentials: $ref: "#/components/schemas/GCPPubSubCredentials" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1444,12 +1444,12 @@ components: $ref: "#/components/schemas/WebhookConfig" # URL is required here, but PATCH means it's optional in the request credentials: $ref: "#/components/schemas/WebhookCredentialsUpdate" - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1470,12 +1470,12 @@ components: $ref: "#/components/schemas/AWSSQSConfig" # queue_url is required here, but PATCH means it's optional credentials: $ref: "#/components/schemas/AWSSQSCredentials" # key/secret required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1496,12 +1496,12 @@ components: $ref: "#/components/schemas/RabbitMQConfig" # server_url/exchange required here, but PATCH means optional credentials: $ref: "#/components/schemas/RabbitMQCredentials" # username/password required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1521,12 +1521,12 @@ components: config: {} # Empty config, cannot be updated credentials: $ref: "#/components/schemas/HookdeckCredentials" # token required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1547,12 +1547,12 @@ components: $ref: "#/components/schemas/AWSKinesisConfig" # stream_name/region required here, but PATCH means optional credentials: $ref: "#/components/schemas/AWSKinesisCredentials" # key/secret required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1573,12 +1573,12 @@ components: $ref: "#/components/schemas/AzureServiceBusConfig" # name required here, but PATCH means optional credentials: $ref: "#/components/schemas/AzureServiceBusCredentials" # connection_string required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1600,12 +1600,12 @@ components: $ref: "#/components/schemas/AWSS3Config" # bucket/region required here, but PATCH means optional credentials: $ref: "#/components/schemas/AWSS3Credentials" # key/secret required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1626,12 +1626,12 @@ components: $ref: "#/components/schemas/GCPPubSubConfig" # project_id/topic required here, but PATCH means optional credentials: $ref: "#/components/schemas/GCPPubSubCredentials" # service_account_json required here, but PATCH means optional - delivery_metadata: + attempt_metadata: type: object additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1769,24 +1769,24 @@ components: type: string example: { "content-type": "application/json" } - # Delivery schemas for deliveries-first API - Delivery: + # Attempt schemas for attempts-first API + Attempt: type: object - description: A delivery represents a single delivery attempt of an event to a destination. + description: An attempt represents a single delivery attempt of an event to a destination. properties: id: type: string - description: Unique identifier for this delivery. - example: "del_123" + description: Unique identifier for this attempt. + example: "att_123" status: type: string enum: [success, failed] - description: The delivery status. + description: The attempt status. example: "success" delivered_at: type: string format: date-time - description: Time the delivery was attempted. + description: Time the attempt was made. example: "2024-01-01T00:00:05Z" code: type: string @@ -1794,7 +1794,7 @@ components: example: "200" response_data: type: object - description: Response data from the delivery attempt. Only included when include=response_data. + description: Response data from the attempt. Only included when include=response_data. additionalProperties: true example: { "status_code": 200, "body": '{"status":"ok"}', "headers": { "content-type": "application/json" } } attempt: @@ -1803,7 +1803,7 @@ components: example: 1 manual: type: boolean - description: Whether this delivery was manually triggered (e.g., a retry initiated by a user). + description: Whether this attempt was manually triggered (e.g., a retry initiated by a user). example: false event: oneOf: @@ -1815,7 +1815,7 @@ components: description: The associated event. Returns event ID by default, or included event object when include=event or include=event.data. destination: type: string - description: The destination ID this delivery was sent to. + description: The destination ID this attempt was sent to. example: "des_456" EventSummary: type: object @@ -1872,15 +1872,15 @@ components: additionalProperties: true description: The event payload data. example: { "user_id": "userid", "status": "active" } - DeliveryPaginatedResult: + AttemptPaginatedResult: type: object - description: Paginated list of deliveries. + description: Paginated list of attempts. properties: models: type: array items: - $ref: "#/components/schemas/Delivery" - description: Array of delivery objects. + $ref: "#/components/schemas/Attempt" + description: Array of attempt objects. pagination: $ref: "#/components/schemas/SeekPagination" @@ -2028,14 +2028,14 @@ tags: description: Operations for retrieving destination type schemas. - name: Topics description: Operations for retrieving available event topics. - - name: Deliveries + - name: Attempts description: | - Deliveries represent individual delivery attempts of events to destinations. The deliveries API provides a delivery-centric view of event processing. + Attempts represent individual delivery attempts of events to destinations. The attempts API provides an attempt-centric view of event processing. - Each delivery contains: - - `id`: Unique delivery identifier + Each attempt contains: + - `id`: Unique attempt identifier - `status`: success or failed - - `delivered_at`: Timestamp of the delivery attempt + - `delivered_at`: Timestamp of the attempt - `code`: HTTP status code or error code - `attempt`: Attempt number (1 for first attempt, 2+ for retries) - `event`: Associated event (ID or included object) @@ -2044,7 +2044,7 @@ tags: Use the `include` query parameter to include related data: - `include=event`: Include event summary (id, topic, time, eligible_for_retry, metadata) - `include=event.data`: Include full event with payload data - - `include=response_data`: Include response body and headers from the delivery attempt + - `include=response_data`: Include response body and headers from the attempt - name: Events description: Operations related to event history. @@ -2456,15 +2456,15 @@ paths: schema: $ref: "#/components/schemas/APIErrorResponse" - /deliveries: + /attempts: get: - tags: [Deliveries] - summary: List Deliveries (Admin) + tags: [Attempts] + summary: List Attempts (Admin) description: | - Retrieves a paginated list of deliveries across all tenants. This is an admin-only endpoint that requires the Admin API Key. + Retrieves a paginated list of attempts across all tenants. This is an admin-only endpoint that requires the Admin API Key. - When `tenant_id` is not provided, returns deliveries from all tenants. When `tenant_id` is provided, returns only deliveries for that tenant. - operationId: adminListDeliveries + When `tenant_id` is not provided, returns attempts from all tenants. When `tenant_id` is provided, returns only attempts for that tenant. + operationId: adminListAttempts security: - AdminApiKey: [] parameters: @@ -2473,26 +2473,26 @@ paths: required: false schema: type: string - description: Filter deliveries by tenant ID. If not provided, returns deliveries from all tenants. + description: Filter attempts by tenant ID. If not provided, returns attempts from all tenants. - name: event_id in: query required: false schema: type: string - description: Filter deliveries by event ID. + description: Filter attempts by event ID. - name: destination_id in: query required: false schema: type: string - description: Filter deliveries by destination ID. + description: Filter attempts by destination ID. - name: status in: query required: false schema: type: string enum: [success, failed] - description: Filter deliveries by status. + description: Filter attempts by status. - name: topic in: query required: false @@ -2502,21 +2502,21 @@ paths: - type: array items: type: string - description: Filter deliveries by event topic(s). Can be specified multiple times or comma-separated. + description: Filter attempts by event topic(s). Can be specified multiple times or comma-separated. - name: time[gte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time >= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time >= value (RFC3339 or YYYY-MM-DD format). - name: time[lte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time <= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time <= value (RFC3339 or YYYY-MM-DD format). - name: limit in: query required: false @@ -2570,23 +2570,23 @@ paths: description: Sort direction. responses: "200": - description: A paginated list of deliveries. + description: A paginated list of attempts. content: application/json: schema: - $ref: "#/components/schemas/DeliveryPaginatedResult" + $ref: "#/components/schemas/AttemptPaginatedResult" examples: - AdminDeliveriesListExample: + AdminAttemptsListExample: value: models: - - id: "del_123" + - id: "att_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - - id: "del_124" + - id: "att_124" status: "failed" delivered_at: "2024-01-02T10:00:01Z" code: "503" @@ -2599,7 +2599,7 @@ paths: limit: 100 next: "MTcwNDA2NzIwMA==" prev: null - AdminDeliveriesWithIncludeExample: + AdminAttemptsWithIncludeExample: summary: Response with include=event value: models: @@ -3307,8 +3307,8 @@ paths: "404": description: Tenant not found. - # Deliveries (Tenant Specific - Admin or JWT) - /tenants/{tenant_id}/deliveries: + # Attempts (Tenant Specific - Admin or JWT) + /tenants/{tenant_id}/attempts: parameters: - name: tenant_id in: path @@ -3317,30 +3317,30 @@ paths: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. get: - tags: [Deliveries] - summary: List Deliveries - description: Retrieves a paginated list of deliveries for the tenant, with filtering and sorting options. - operationId: listTenantDeliveries + tags: [Attempts] + summary: List Attempts + description: Retrieves a paginated list of attempts for the tenant, with filtering and sorting options. + operationId: listTenantAttempts parameters: - name: destination_id in: query required: false schema: type: string - description: Filter deliveries by destination ID. + description: Filter attempts by destination ID. - name: event_id in: query required: false schema: type: string - description: Filter deliveries by event ID. + description: Filter attempts by event ID. - name: status in: query required: false schema: type: string enum: [success, failed] - description: Filter deliveries by status. + description: Filter attempts by status. - name: topic in: query required: false @@ -3350,21 +3350,21 @@ paths: - type: array items: type: string - description: Filter deliveries by event topic(s). Can be specified multiple times or comma-separated. + description: Filter attempts by event topic(s). Can be specified multiple times or comma-separated. - name: time[gte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time >= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time >= value (RFC3339 or YYYY-MM-DD format). - name: time[lte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time <= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time <= value (RFC3339 or YYYY-MM-DD format). - name: limit in: query required: false @@ -3418,23 +3418,23 @@ paths: description: Sort direction. responses: "200": - description: A paginated list of deliveries. + description: A paginated list of attempts. content: application/json: schema: - $ref: "#/components/schemas/DeliveryPaginatedResult" + $ref: "#/components/schemas/AttemptPaginatedResult" examples: - DeliveriesListExample: + AttemptsListExample: value: models: - - id: "del_123" + - id: "att_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - - id: "del_124" + - id: "att_124" status: "failed" delivered_at: "2024-01-02T10:00:01Z" code: "503" @@ -3447,11 +3447,11 @@ paths: limit: 100 next: "MTcwNDA2NzIwMA==" prev: null - DeliveriesWithIncludeExample: + AttemptsWithIncludeExample: summary: Response with include=event value: models: - - id: "del_123" + - id: "att_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" @@ -3478,7 +3478,7 @@ paths: schema: $ref: "#/components/schemas/APIErrorResponse" - /tenants/{tenant_id}/deliveries/{delivery_id}: + /tenants/{tenant_id}/attempts/{attempt_id}: parameters: - name: tenant_id in: path @@ -3486,17 +3486,17 @@ paths: schema: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: delivery_id + - name: attempt_id in: path required: true schema: type: string - description: The ID of the delivery. + description: The ID of the attempt. get: - tags: [Deliveries] - summary: Get Delivery - description: Retrieves details for a specific delivery. - operationId: getTenantDelivery + tags: [Attempts] + summary: Get Attempt + description: Retrieves details for a specific attempt. + operationId: getTenantAttempt parameters: - name: include in: query @@ -3514,25 +3514,25 @@ paths: - `response_data`: Include response body and headers responses: "200": - description: Delivery details. + description: Attempt details. content: application/json: schema: - $ref: "#/components/schemas/Delivery" + $ref: "#/components/schemas/Attempt" examples: - DeliveryExample: + AttemptExample: value: - id: "del_123" + id: "att_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - DeliveryWithIncludeExample: + AttemptWithIncludeExample: summary: Response with include=event.data,response_data value: - id: "del_123" + id: "att_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" @@ -3550,9 +3550,9 @@ paths: data: { "user_id": "userid", "status": "active" } destination: "des_456" "404": - description: Tenant or Delivery not found. + description: Tenant or Attempt not found. - /tenants/{tenant_id}/deliveries/{delivery_id}/retry: + /tenants/{tenant_id}/attempts/{attempt_id}/retry: parameters: - name: tenant_id in: path @@ -3560,28 +3560,28 @@ paths: schema: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: delivery_id + - name: attempt_id in: path required: true schema: type: string - description: The ID of the delivery to retry. + description: The ID of the attempt to retry. post: - tags: [Deliveries] - summary: Retry Delivery + tags: [Attempts] + summary: Retry Attempt description: | - Triggers a retry for a delivery. Only the latest delivery for an event+destination pair can be retried. + Triggers a retry for an attempt. Only the latest attempt for an event+destination pair can be retried. The destination must exist and be enabled. - operationId: retryTenantDelivery + operationId: retryTenantAttempt responses: "202": description: Retry accepted for processing. "404": - description: Tenant or Delivery not found. + description: Tenant or Attempt not found. "409": description: | - Delivery not eligible for retry. This can happen when: - - The delivery is not the latest for this event+destination pair + Attempt not eligible for retry. This can happen when: + - The attempt is not the latest for this event+destination pair - The destination is disabled or deleted # Events (Tenant Specific - Admin or JWT) @@ -3746,7 +3746,7 @@ paths: "404": description: Tenant or Event not found. - /tenants/{tenant_id}/events/{event_id}/deliveries: + /tenants/{tenant_id}/events/{event_id}/attempts: parameters: - name: tenant_id in: path @@ -3762,12 +3762,12 @@ paths: description: The ID of the event. get: tags: [Events] - summary: List Event Delivery Attempts - description: Retrieves a list of delivery attempts for a specific event, including response details. - operationId: listTenantEventDeliveries + summary: List Event Attempts + description: Retrieves a list of attempts for a specific event, including response details. + operationId: listTenantEventAttempts responses: "200": - description: A list of delivery attempts. + description: A list of attempts. content: application/json: schema: @@ -3775,7 +3775,7 @@ paths: items: $ref: "#/components/schemas/DeliveryAttempt" examples: - DeliveriesListExample: + AttemptsListExample: value: - delivered_at: "2024-01-01T00:00:05Z" status: "success" From fc4a57f60bb6650f069bb8ea25caf54a6ec7facb Mon Sep 17 00:00:00 2001 From: Alex Luong Date: Tue, 27 Jan 2026 03:29:02 +0700 Subject: [PATCH 17/23] feat: rename Delivery to Attempt in UI components Co-Authored-By: Claude Opus 4.5 --- .../RetryAttemptButton.tsx} | 18 ++-- .../src/scenes/Destination/Destination.tsx | 8 +- ...DeliveryDetails.tsx => AttemptDetails.tsx} | 64 +++++------ .../Events/{Deliveries.scss => Attempts.scss} | 6 +- .../Events/{Deliveries.tsx => Attempts.tsx} | 100 +++++++++--------- internal/portal/src/typings/Event.ts | 12 +-- 6 files changed, 104 insertions(+), 104 deletions(-) rename internal/portal/src/common/{RetryDeliveryButton/RetryDeliveryButton.tsx => RetryAttemptButton/RetryAttemptButton.tsx} (76%) rename internal/portal/src/scenes/Destination/Events/{DeliveryDetails.tsx => AttemptDetails.tsx} (65%) rename internal/portal/src/scenes/Destination/Events/{Deliveries.scss => Attempts.scss} (98%) rename internal/portal/src/scenes/Destination/Events/{Deliveries.tsx => Attempts.tsx} (77%) diff --git a/internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx b/internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx similarity index 76% rename from internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx rename to internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx index 8746eaca..b5c15d70 100644 --- a/internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx +++ b/internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx @@ -4,8 +4,8 @@ import { ReplayIcon } from "../Icons"; import { showToast } from "../Toast/Toast"; import { ApiContext, formatError } from "../../app"; -interface RetryDeliveryButtonProps { - deliveryId: string; +interface RetryAttemptButtonProps { + attemptId: string; disabled: boolean; loading: boolean; completed: (success: boolean) => void; @@ -13,8 +13,8 @@ interface RetryDeliveryButtonProps { iconLabel?: string; } -const RetryDeliveryButton: React.FC = ({ - deliveryId, +const RetryAttemptButton: React.FC = ({ + attemptId, disabled, loading, completed, @@ -24,12 +24,12 @@ const RetryDeliveryButton: React.FC = ({ const apiClient = useContext(ApiContext); const [retrying, setRetrying] = useState(false); - const retryDelivery = useCallback( + const retryAttempt = useCallback( async (e: MouseEvent) => { e.stopPropagation(); setRetrying(true); try { - await apiClient.fetch(`deliveries/${deliveryId}/retry`, { + await apiClient.fetch(`attempts/${attemptId}/retry`, { method: "POST", }); showToast("success", "Retry successful."); @@ -41,7 +41,7 @@ const RetryDeliveryButton: React.FC = ({ setRetrying(false); }, - [apiClient, deliveryId, completed], + [apiClient, attemptId, completed], ); return ( @@ -49,7 +49,7 @@ const RetryDeliveryButton: React.FC = ({ minimal icon={icon} iconLabel={iconLabel} - onClick={(e) => retryDelivery(e)} + onClick={(e) => retryAttempt(e)} disabled={disabled || retrying} loading={loading || retrying} > @@ -58,4 +58,4 @@ const RetryDeliveryButton: React.FC = ({ ); }; -export default RetryDeliveryButton; +export default RetryAttemptButton; diff --git a/internal/portal/src/scenes/Destination/Destination.tsx b/internal/portal/src/scenes/Destination/Destination.tsx index d6029b90..34dc192d 100644 --- a/internal/portal/src/scenes/Destination/Destination.tsx +++ b/internal/portal/src/scenes/Destination/Destination.tsx @@ -14,7 +14,7 @@ import { } from "../../typings/Destination"; import getLogo from "../../utils/logo"; import DestinationSettings from "./DestinationSettings/DestinationSettings"; -import { DeliveryRoutes } from "./Events/Deliveries"; +import { AttemptRoutes } from "./Events/Attempts"; // Define the tab interface interface Tab { @@ -26,7 +26,7 @@ interface Tab { const tabs: Tab[] = [ { label: "Overview", path: "" }, { label: "Settings", path: "/settings" }, - { label: "Deliveries", path: "/deliveries" }, + { label: "Attempts", path: "/attempts" }, ]; const Destination = () => { @@ -133,8 +133,8 @@ const Destination = () => { } /> } + path="/attempts/*" + element={} /> void; + navigateAttempt: (path: string, params?: any) => void; }) => { - const { delivery_id: deliveryId } = useParams(); + const { attempt_id: attemptId } = useParams(); - const { data: delivery } = useSWR( - `deliveries/${deliveryId}?include=event.data,response_data`, + const { data: attempt } = useSWR( + `attempts/${attemptId}?include=event.data,response_data`, ); - if (!delivery) { + if (!attempt) { return
Loading...
; } const event = - typeof delivery.event === "object" ? (delivery.event as EventFull) : null; + typeof attempt.event === "object" ? (attempt.event as EventFull) : null; return (

- {event?.topic || "Delivery"} + {event?.topic || "Attempt"}

- {}} @@ -45,7 +45,7 @@ const DeliveryDetails = ({ icon iconLabel="Close" minimal - onClick={() => navigateDelivery("/")} + onClick={() => navigateAttempt("/")} > @@ -53,30 +53,30 @@ const DeliveryDetails = ({
-
-
+
+
Status
- {delivery.code && ( + {attempt.code && (
Response Code
-
{delivery.code}
+
{attempt.code}
)}
Attempt
-
{delivery.attempt}
+
{attempt.attempt}
{event && (
@@ -87,7 +87,7 @@ const DeliveryDetails = ({
Delivered at
- {new Date(delivery.delivered_at).toLocaleString("en-US", { + {new Date(attempt.delivered_at).toLocaleString("en-US", { year: "numeric", month: "numeric", day: "numeric", @@ -99,10 +99,10 @@ const DeliveryDetails = ({
-
Delivery ID
+
Attempt ID
- {delivery.id} - + {attempt.id} +
{event && ( @@ -118,7 +118,7 @@ const DeliveryDetails = ({
{event?.data && ( -
+

Data

                 {JSON.stringify(event.data, null, 2)}
@@ -127,7 +127,7 @@ const DeliveryDetails = ({
           )}
 
           {event?.metadata && Object.keys(event.metadata).length > 0 && (
-            
+

Metadata

                 {JSON.stringify(event.metadata, null, 2)}
@@ -135,11 +135,11 @@ const DeliveryDetails = ({
             
)} - {delivery.response_data && ( -
+ {attempt.response_data && ( +

Response

-                {JSON.stringify(delivery.response_data, null, 2)}
+                {JSON.stringify(attempt.response_data, null, 2)}
               
)} @@ -149,4 +149,4 @@ const DeliveryDetails = ({ ); }; -export default DeliveryDetails; +export default AttemptDetails; diff --git a/internal/portal/src/scenes/Destination/Events/Deliveries.scss b/internal/portal/src/scenes/Destination/Events/Attempts.scss similarity index 98% rename from internal/portal/src/scenes/Destination/Events/Deliveries.scss rename to internal/portal/src/scenes/Destination/Events/Attempts.scss index fff9146c..5d37d6d2 100644 --- a/internal/portal/src/scenes/Destination/Events/Deliveries.scss +++ b/internal/portal/src/scenes/Destination/Events/Attempts.scss @@ -1,4 +1,4 @@ -.destination-deliveries { +.destination-attempts { margin-top: var(--spacing-5); margin-bottom: var(--spacing-20); @@ -32,7 +32,7 @@ display: grid; min-height: 713px; - .delivery-time-cell { + .attempt-time-cell { text-transform: uppercase; } @@ -117,7 +117,7 @@ } } -.delivery-data { +.attempt-data { height: 100%; box-sizing: border-box; diff --git a/internal/portal/src/scenes/Destination/Events/Deliveries.tsx b/internal/portal/src/scenes/Destination/Events/Attempts.tsx similarity index 77% rename from internal/portal/src/scenes/Destination/Events/Deliveries.tsx rename to internal/portal/src/scenes/Destination/Events/Attempts.tsx index 3cfb8b8b..529a21d1 100644 --- a/internal/portal/src/scenes/Destination/Events/Deliveries.tsx +++ b/internal/portal/src/scenes/Destination/Events/Attempts.tsx @@ -1,9 +1,9 @@ import { useCallback, useMemo, useState } from "react"; import Badge from "../../../common/Badge/Badge"; import Button from "../../../common/Button/Button"; -import "./Deliveries.scss"; +import "./Attempts.scss"; import Table from "../../../common/Table/Table"; -import { DeliveryListResponse, EventSummary } from "../../../typings/Event"; +import { AttemptListResponse, EventSummary } from "../../../typings/Event"; import useSWR from "swr"; import Dropdown from "../../../common/Dropdown/Dropdown"; import { @@ -13,7 +13,7 @@ import { RefreshIcon, NextIcon, } from "../../../common/Icons"; -import RetryDeliveryButton from "../../../common/RetryDeliveryButton/RetryDeliveryButton"; +import RetryAttemptButton from "../../../common/RetryAttemptButton/RetryAttemptButton"; import { Checkbox } from "../../../common/Checkbox/Checkbox"; import { Route, @@ -24,20 +24,20 @@ import { useParams, } from "react-router-dom"; import CONFIGS from "../../../config"; -import DeliveryDetails from "./DeliveryDetails"; +import AttemptDetails from "./AttemptDetails"; -interface DeliveriesProps { +interface AttemptsProps { destination: any; - navigateDelivery: (path: string, state?: any) => void; + navigateAttempt: (path: string, state?: any) => void; } -const Deliveries: React.FC = ({ +const Attempts: React.FC = ({ destination, - navigateDelivery, + navigateAttempt, }) => { const [timeRange, setTimeRange] = useState("24h"); - const { delivery_id: deliveryId } = useParams<{ delivery_id: string }>(); - const { status, topics, pagination, urlSearchParams } = useDeliveryFilter(); + const { attempt_id: attemptId } = useParams<{ attempt_id: string }>(); + const { status, topics, pagination, urlSearchParams } = useAttemptFilter(); const queryUrl = useMemo(() => { const searchParams = new URLSearchParams(urlSearchParams); @@ -70,31 +70,31 @@ const Deliveries: React.FC = ({ searchParams.set("destination_id", destination.id); searchParams.set("include", "event"); - return `deliveries?${searchParams.toString()}`; + return `attempts?${searchParams.toString()}`; }, [destination.id, timeRange, urlSearchParams]); const { - data: deliveriesList, + data: attemptsList, mutate, isValidating, - } = useSWR(queryUrl, { + } = useSWR(queryUrl, { revalidateOnFocus: false, }); const topicsList = CONFIGS.TOPICS.split(","); - const table_rows = deliveriesList?.models - ? deliveriesList.models.map((delivery) => { + const table_rows = attemptsList?.models + ? attemptsList.models.map((attempt) => { const event = - typeof delivery.event === "object" - ? (delivery.event as EventSummary) + typeof attempt.event === "object" + ? (attempt.event as EventSummary) : null; return { - id: delivery.id, - active: delivery.id === (deliveryId || ""), + id: attempt.id, + active: attempt.id === (attemptId || ""), entries: [ - - {new Date(delivery.delivered_at).toLocaleString("en-US", { + + {new Date(attempt.delivered_at).toLocaleString("en-US", { month: "short", day: "numeric", hour: "numeric", @@ -103,13 +103,13 @@ const Deliveries: React.FC = ({ })} , - {delivery.status === "success" ? ( + {attempt.status === "success" ? ( ) : ( )} - { @@ -120,21 +120,21 @@ const Deliveries: React.FC = ({ /> , {event?.topic || "-"}, - {delivery.id}, + {attempt.id}, ], - onClick: () => navigateDelivery(`/${delivery.id}`), + onClick: () => navigateAttempt(`/${attempt.id}`), }; }) : []; return ( -
-
-

- Deliveries{" "} - +
+
+

+ Attempts{" "} +

-
+
} trigger={`Last ${timeRange}`} @@ -230,8 +230,8 @@ const Deliveries: React.FC = ({
= ({ header: "Topic", }, { - header: "Delivery ID", + header: "Attempt ID", }, ]} rows={table_rows} @@ -256,7 +256,7 @@ const Deliveries: React.FC = ({
- {deliveriesList?.models.length ?? 0} deliveries + {attemptsList?.models.length ?? 0} attempts
@@ -264,9 +264,9 @@ const Deliveries: React.FC = ({