diff --git a/REFERENCE.md b/REFERENCE.md index 1e9014d..f02f9aa 100644 --- a/REFERENCE.md +++ b/REFERENCE.md @@ -19,6 +19,7 @@ The Hookdeck CLI provides comprehensive webhook infrastructure management includ - [Events](#events) - [Requests](#requests) - [Attempts](#attempts) +- [Metrics](#metrics) - [Utilities](#utilities) ## Global Options @@ -1782,6 +1783,24 @@ hookdeck gateway attempt get [flags] hookdeck gateway attempt get atm_abc123 ``` +## Metrics + +Query Event Gateway metrics (events, requests, attempts, queue depth, pending events, events by issue, transformations). All metrics commands require `--start` and `--end` (ISO 8601 date-time). + +**Use cases and examples:** + +| Use case | Example command | +|----------|-----------------| +| Event volume and failure rate over time | `hookdeck gateway metrics events --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --granularity 1d --measures count,failed_count,error_rate` | +| Request acceptance vs rejection | `hookdeck gateway metrics requests --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --measures count,accepted_count,rejected_count` | +| Delivery latency (attempts) | `hookdeck gateway metrics attempts --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --measures response_latency_avg,response_latency_p95` | +| Queue backlog per destination | `hookdeck gateway metrics queue-depth --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --measures max_depth,max_age --destination-id dest_xxx` | +| Pending events over time | `hookdeck gateway metrics pending --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --granularity 1h --measures count` | +| Events grouped by issue (debugging) | `hookdeck gateway metrics events-by-issue iss_xxx --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --measures count` | +| Transformation errors | `hookdeck gateway metrics transformations --start 2026-02-01T00:00:00Z --end 2026-02-25T00:00:00Z --measures count,failed_count,error_rate` | + +**Common flags (all metrics subcommands):** `--start`, `--end` (required), `--granularity` (e.g. 1h, 5m, 1d), `--measures`, `--dimensions`, `--source-id`, `--destination-id`, `--connection-id`, `--status`, `--output` (json). + ## Utilities diff --git a/package-lock.json b/package-lock.json index e198cbd..b70a512 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "hookdeck-cli", - "version": "1.6.0", + "version": "1.8.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "hookdeck-cli", - "version": "1.6.0", + "version": "1.8.0", "license": "Apache-2.0", "bin": { "hookdeck": "bin/hookdeck.js" diff --git a/package.json b/package.json index 5f53b52..261e932 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "hookdeck-cli", - "version": "1.8.1", + "version": "1.9.0-beta.1", "description": "Hookdeck CLI", "repository": { "type": "git", diff --git a/pkg/cmd/gateway.go b/pkg/cmd/gateway.go index 3e5b9a0..51c1e7e 100644 --- a/pkg/cmd/gateway.go +++ b/pkg/cmd/gateway.go @@ -39,6 +39,7 @@ The gateway command group provides full access to all Event Gateway resources.`, addEventCmdTo(g.cmd) addRequestCmdTo(g.cmd) addAttemptCmdTo(g.cmd) + addMetricsCmdTo(g.cmd) return g } diff --git a/pkg/cmd/metrics.go b/pkg/cmd/metrics.go new file mode 100644 index 0000000..9e2a78e --- /dev/null +++ b/pkg/cmd/metrics.go @@ -0,0 +1,155 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/hookdeck/hookdeck-cli/pkg/hookdeck" + "github.com/hookdeck/hookdeck-cli/pkg/validators" +) + +// printMetricsResponse prints data as JSON or a human-readable table. +func printMetricsResponse(data hookdeck.MetricsResponse, output string) error { + if output == "json" { + bytes, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal metrics: %w", err) + } + fmt.Println(string(bytes)) + return nil + } + if len(data) == 0 { + fmt.Println("No data points.") + return nil + } + for i, pt := range data { + tb := "" + if pt.TimeBucket != nil { + tb = *pt.TimeBucket + } + fmt.Printf("time_bucket: %s\n", tb) + if len(pt.Dimensions) > 0 { + for k, v := range pt.Dimensions { + fmt.Printf(" %s: %v\n", k, v) + } + } + if len(pt.Metrics) > 0 { + for k, v := range pt.Metrics { + fmt.Printf(" %s: %v\n", k, v) + } + } + if i < len(data)-1 { + fmt.Println("---") + } + } + return nil +} + +const granularityHelp = `Time bucket size. Format: (e.g. 1h, 5m, 1d). +Units: s (seconds), m (minutes), h (hours), d (days), w (weeks), M (months).` + +// metricsCommonFlags holds the common flags for all metrics subcommands. +// Used by addMetricsCommonFlags and to build hookdeck.MetricsQueryParams. +type metricsCommonFlags struct { + start string + end string + granularity string + measures string + dimensions string + sourceID string + destinationID string + connectionID string + status string + issueID string + output string +} + +// addMetricsCommonFlags adds common metrics flags to cmd and binds them to f. +// For subcommands that take a required resource id as an argument (e.g. events-by-issue ), +// pass skipIssueID true so --issue-id is not added as a flag. +func addMetricsCommonFlags(cmd *cobra.Command, f *metricsCommonFlags) { + addMetricsCommonFlagsEx(cmd, f, false) +} + +func addMetricsCommonFlagsEx(cmd *cobra.Command, f *metricsCommonFlags, skipIssueID bool) { + cmd.Flags().StringVar(&f.start, "start", "", "Start of time range (ISO 8601 date-time, required)") + cmd.Flags().StringVar(&f.end, "end", "", "End of time range (ISO 8601 date-time, required)") + cmd.Flags().StringVar(&f.granularity, "granularity", "", granularityHelp) + cmd.Flags().StringVar(&f.measures, "measures", "", "Comma-separated list of measures to return") + cmd.Flags().StringVar(&f.dimensions, "dimensions", "", "Comma-separated list of dimensions") + cmd.Flags().StringVar(&f.sourceID, "source-id", "", "Filter by source ID") + cmd.Flags().StringVar(&f.destinationID, "destination-id", "", "Filter by destination ID") + cmd.Flags().StringVar(&f.connectionID, "connection-id", "", "Filter by connection ID") + cmd.Flags().StringVar(&f.status, "status", "", "Filter by status (e.g. SUCCESSFUL, FAILED)") + if !skipIssueID { + cmd.Flags().StringVar(&f.issueID, "issue-id", "", "Filter by issue ID") + } + cmd.Flags().StringVar(&f.output, "output", "", "Output format (json)") + _ = cmd.MarkFlagRequired("start") + _ = cmd.MarkFlagRequired("end") +} + +// metricsParamsFromFlags builds hookdeck.MetricsQueryParams from common flags. +// Measures and dimensions are split from comma-separated strings. +func metricsParamsFromFlags(f *metricsCommonFlags) hookdeck.MetricsQueryParams { + var measures, dimensions []string + if f.measures != "" { + for _, s := range strings.Split(f.measures, ",") { + if t := strings.TrimSpace(s); t != "" { + measures = append(measures, t) + } + } + } + if f.dimensions != "" { + for _, s := range strings.Split(f.dimensions, ",") { + if t := strings.TrimSpace(s); t != "" { + dimensions = append(dimensions, t) + } + } + } + return hookdeck.MetricsQueryParams{ + Start: f.start, + End: f.end, + Granularity: f.granularity, + Measures: measures, + Dimensions: dimensions, + SourceID: f.sourceID, + DestinationID: f.destinationID, + ConnectionID: f.connectionID, + Status: f.status, + IssueID: f.issueID, + } +} + +type metricsCmd struct { + cmd *cobra.Command +} + +func newMetricsCmd() *metricsCmd { + mc := &metricsCmd{} + + mc.cmd = &cobra.Command{ + Use: "metrics", + Args: validators.NoArgs, + Short: ShortBeta("Query Event Gateway metrics"), + Long: LongBeta(`Query metrics for events, requests, attempts, queue depth, pending events, events by issue, and transformations. +Requires --start and --end (ISO 8601 date-time). Use subcommands to choose the metric type.`), + } + + mc.cmd.AddCommand(newMetricsEventsCmd().cmd) + mc.cmd.AddCommand(newMetricsRequestsCmd().cmd) + mc.cmd.AddCommand(newMetricsAttemptsCmd().cmd) + mc.cmd.AddCommand(newMetricsQueueDepthCmd().cmd) + mc.cmd.AddCommand(newMetricsPendingCmd().cmd) + mc.cmd.AddCommand(newMetricsEventsByIssueCmd().cmd) + mc.cmd.AddCommand(newMetricsTransformationsCmd().cmd) + + return mc +} + +func addMetricsCmdTo(parent *cobra.Command) { + parent.AddCommand(newMetricsCmd().cmd) +} diff --git a/pkg/cmd/metrics_attempts.go b/pkg/cmd/metrics_attempts.go new file mode 100644 index 0000000..9644744 --- /dev/null +++ b/pkg/cmd/metrics_attempts.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +const metricsAttemptsMeasures = "count, successful_count, failed_count, delivered_count, error_rate, response_latency_avg, response_latency_max, response_latency_p95, response_latency_p99, delivery_latency_avg" + +type metricsAttemptsCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsAttemptsCmd() *metricsAttemptsCmd { + c := &metricsAttemptsCmd{} + c.cmd = &cobra.Command{ + Use: "attempts", + Args: cobra.NoArgs, + Short: ShortBeta("Query attempt metrics"), + Long: LongBeta(`Query metrics for delivery attempts (latency, success/failure). Measures: ` + metricsAttemptsMeasures + `.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsAttemptsCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryAttemptMetrics(context.Background(), params) + if err != nil { + return fmt.Errorf("query attempt metrics: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_events.go b/pkg/cmd/metrics_events.go new file mode 100644 index 0000000..3fe9229 --- /dev/null +++ b/pkg/cmd/metrics_events.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +const metricsEventsMeasures = "count, successful_count, failed_count, scheduled_count, paused_count, error_rate, avg_attempts, scheduled_retry_count" + +type metricsEventsCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsEventsCmd() *metricsEventsCmd { + c := &metricsEventsCmd{} + c.cmd = &cobra.Command{ + Use: "events", + Args: cobra.NoArgs, + Short: ShortBeta("Query event metrics"), + Long: LongBeta(`Query metrics for events (volume, success/failure counts, error rate, etc.). Measures: ` + metricsEventsMeasures + `.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsEventsCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryEventMetrics(context.Background(), params) + if err != nil { + return fmt.Errorf("query event metrics: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_events_by_issue.go b/pkg/cmd/metrics_events_by_issue.go new file mode 100644 index 0000000..51997bf --- /dev/null +++ b/pkg/cmd/metrics_events_by_issue.go @@ -0,0 +1,41 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/hookdeck/hookdeck-cli/pkg/validators" +) + +type metricsEventsByIssueCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsEventsByIssueCmd() *metricsEventsByIssueCmd { + c := &metricsEventsByIssueCmd{} + c.cmd = &cobra.Command{ + Use: "events-by-issue ", + Args: validators.ExactArgs(1), + Short: ShortBeta("Query events grouped by issue"), + Long: LongBeta(`Query metrics for events grouped by issue (for debugging). Requires issue ID as argument.`), + RunE: c.runE, + } + addMetricsCommonFlagsEx(c.cmd, &c.flags, true) + return c +} + +func (c *metricsEventsByIssueCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + params.IssueID = args[0] + data, err := Config.GetAPIClient().QueryEventsByIssue(context.Background(), params) + if err != nil { + return fmt.Errorf("query events by issue: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_pending.go b/pkg/cmd/metrics_pending.go new file mode 100644 index 0000000..eb8e4d4 --- /dev/null +++ b/pkg/cmd/metrics_pending.go @@ -0,0 +1,38 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +type metricsPendingCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsPendingCmd() *metricsPendingCmd { + c := &metricsPendingCmd{} + c.cmd = &cobra.Command{ + Use: "pending", + Args: cobra.NoArgs, + Short: ShortBeta("Query events pending timeseries"), + Long: LongBeta(`Query events pending over time (timeseries). Measures: count.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsPendingCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryEventsPendingTimeseries(context.Background(), params) + if err != nil { + return fmt.Errorf("query events pending: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_queue_depth.go b/pkg/cmd/metrics_queue_depth.go new file mode 100644 index 0000000..b1f0fea --- /dev/null +++ b/pkg/cmd/metrics_queue_depth.go @@ -0,0 +1,41 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +const metricsQueueDepthMeasures = "max_depth, max_age" +const metricsQueueDepthDimensions = "destination_id" + +type metricsQueueDepthCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsQueueDepthCmd() *metricsQueueDepthCmd { + c := &metricsQueueDepthCmd{} + c.cmd = &cobra.Command{ + Use: "queue-depth", + Args: cobra.NoArgs, + Short: ShortBeta("Query queue depth metrics"), + Long: LongBeta(`Query queue depth metrics. Measures: ` + metricsQueueDepthMeasures + `. Dimensions: ` + metricsQueueDepthDimensions + `.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsQueueDepthCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryQueueDepth(context.Background(), params) + if err != nil { + return fmt.Errorf("query queue depth: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_requests.go b/pkg/cmd/metrics_requests.go new file mode 100644 index 0000000..084dbf1 --- /dev/null +++ b/pkg/cmd/metrics_requests.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +const metricsRequestsMeasures = "count, accepted_count, rejected_count, discarded_count, avg_events_per_request, avg_ignored_per_request" + +type metricsRequestsCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsRequestsCmd() *metricsRequestsCmd { + c := &metricsRequestsCmd{} + c.cmd = &cobra.Command{ + Use: "requests", + Args: cobra.NoArgs, + Short: ShortBeta("Query request metrics"), + Long: LongBeta(`Query metrics for requests (acceptance, rejection, etc.). Measures: ` + metricsRequestsMeasures + `.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsRequestsCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryRequestMetrics(context.Background(), params) + if err != nil { + return fmt.Errorf("query request metrics: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/cmd/metrics_transformations.go b/pkg/cmd/metrics_transformations.go new file mode 100644 index 0000000..a47b6e8 --- /dev/null +++ b/pkg/cmd/metrics_transformations.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" +) + +const metricsTransformationsMeasures = "count, successful_count, failed_count, error_rate, error_count, warn_count, info_count, debug_count" + +type metricsTransformationsCmd struct { + cmd *cobra.Command + flags metricsCommonFlags +} + +func newMetricsTransformationsCmd() *metricsTransformationsCmd { + c := &metricsTransformationsCmd{} + c.cmd = &cobra.Command{ + Use: "transformations", + Args: cobra.NoArgs, + Short: ShortBeta("Query transformation metrics"), + Long: LongBeta(`Query metrics for transformations. Measures: ` + metricsTransformationsMeasures + `.`), + RunE: c.runE, + } + addMetricsCommonFlags(c.cmd, &c.flags) + return c +} + +func (c *metricsTransformationsCmd) runE(cmd *cobra.Command, args []string) error { + if err := Config.Profile.ValidateAPIKey(); err != nil { + return err + } + params := metricsParamsFromFlags(&c.flags) + data, err := Config.GetAPIClient().QueryTransformationMetrics(context.Background(), params) + if err != nil { + return fmt.Errorf("query transformation metrics: %w", err) + } + return printMetricsResponse(data, c.flags.output) +} diff --git a/pkg/hookdeck/metrics.go b/pkg/hookdeck/metrics.go new file mode 100644 index 0000000..9313332 --- /dev/null +++ b/pkg/hookdeck/metrics.go @@ -0,0 +1,134 @@ +package hookdeck + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" +) + +// MetricDataPoint is a single metric data point with time bucket, dimensions, and metrics. +// All metrics endpoints return an array of MetricDataPoint. +type MetricDataPoint struct { + TimeBucket *string `json:"time_bucket,omitempty"` + Dimensions map[string]interface{} `json:"dimensions,omitempty"` + Metrics map[string]float64 `json:"metrics,omitempty"` +} + +// MetricsResponse is the response from any of the metrics GET endpoints. +type MetricsResponse = []MetricDataPoint + +// MetricsQueryParams holds shared query parameters for all metrics endpoints. +// Start and End are required (ISO 8601 date-time). +// ConnectionID is mapped to API webhook_id in the CLI layer. +type MetricsQueryParams struct { + Start string // required, ISO 8601 + End string // required, ISO 8601 + Granularity string // e.g. 1h, 5m, 1d (pattern: \d+(s|m|h|d|w|M)) + Measures []string + Dimensions []string + SourceID string + DestinationID string + ConnectionID string // sent as filters[webhook_id] + Status string // e.g. SUCCESSFUL, FAILED + IssueID string // sent as filters[issue_id]; required for events-by-issue +} + +// buildMetricsQuery builds the query string for metrics endpoints. +// Uses bracket notation: date_range[start], date_range[end], filters[webhook_id], etc. +func buildMetricsQuery(p MetricsQueryParams) string { + q := url.Values{} + q.Set("date_range[start]", p.Start) + q.Set("date_range[end]", p.End) + if p.Granularity != "" { + q.Set("granularity", p.Granularity) + } + for _, m := range p.Measures { + q.Add("measures[]", m) + } + for _, d := range p.Dimensions { + q.Add("dimensions[]", d) + } + if p.SourceID != "" { + q.Set("filters[source_id]", p.SourceID) + } + if p.DestinationID != "" { + q.Set("filters[destination_id]", p.DestinationID) + } + if p.ConnectionID != "" { + q.Set("filters[webhook_id]", p.ConnectionID) + } + if p.Status != "" { + q.Set("filters[status]", p.Status) + } + if p.IssueID != "" { + q.Set("filters[issue_id]", p.IssueID) + } + return q.Encode() +} + +// metricsResponseWrapper is used when the API returns an object with a "data" array instead of a raw array. +type metricsResponseWrapper struct { + Data MetricsResponse `json:"data"` +} + +func (c *Client) queryMetrics(ctx context.Context, path string, params MetricsQueryParams) (MetricsResponse, error) { + queryStr := buildMetricsQuery(params) + resp, err := c.Get(ctx, APIPathPrefix+path, queryStr, nil) + if err != nil { + return nil, err + } + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("failed to read metrics response: %w", err) + } + // Try as array first (most endpoints return []MetricDataPoint). + var result MetricsResponse + if err := json.Unmarshal(body, &result); err == nil { + return result, nil + } + // Some endpoints may return {"data": [...]}. + var wrapped metricsResponseWrapper + if err := json.NewDecoder(bytes.NewReader(body)).Decode(&wrapped); err != nil { + return nil, fmt.Errorf("failed to parse metrics response: %w", err) + } + return wrapped.Data, nil +} + +// QueryEventMetrics returns event metrics (GET /metrics/events). +func (c *Client) QueryEventMetrics(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/events", params) +} + +// QueryRequestMetrics returns request metrics (GET /metrics/requests). +func (c *Client) QueryRequestMetrics(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/requests", params) +} + +// QueryAttemptMetrics returns attempt metrics (GET /metrics/attempts). +func (c *Client) QueryAttemptMetrics(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/attempts", params) +} + +// QueryQueueDepth returns queue depth metrics (GET /metrics/queue-depth). +func (c *Client) QueryQueueDepth(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/queue-depth", params) +} + +// QueryEventsPendingTimeseries returns events pending timeseries (GET /metrics/events-pending-timeseries). +func (c *Client) QueryEventsPendingTimeseries(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/events-pending-timeseries", params) +} + +// QueryEventsByIssue returns events grouped by issue (GET /metrics/events-by-issue). +func (c *Client) QueryEventsByIssue(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/events-by-issue", params) +} + +// QueryTransformationMetrics returns transformation metrics (GET /metrics/transformations). +func (c *Client) QueryTransformationMetrics(ctx context.Context, params MetricsQueryParams) (MetricsResponse, error) { + return c.queryMetrics(ctx, "/metrics/transformations", params) +} diff --git a/test/acceptance/metrics_test.go b/test/acceptance/metrics_test.go new file mode 100644 index 0000000..6367a9f --- /dev/null +++ b/test/acceptance/metrics_test.go @@ -0,0 +1,270 @@ +package acceptance + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// metricsStart and metricsEnd define a fixed date range for metrics acceptance tests. +// Use a past range that the API will accept. +const metricsStart = "2025-01-01T00:00:00Z" +const metricsEnd = "2025-01-02T00:00:00Z" + +func metricsArgs(subcmd string, extra ...string) []string { + args := []string{"gateway", "metrics", subcmd, "--start", metricsStart, "--end", metricsEnd} + return append(args, extra...) +} + +// TestMetricsHelp verifies that hookdeck gateway metrics --help lists all 7 subcommands. +func TestMetricsHelp(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess("gateway", "metrics", "--help") + assert.Contains(t, stdout, "events") + assert.Contains(t, stdout, "requests") + assert.Contains(t, stdout, "attempts") + assert.Contains(t, stdout, "queue-depth") + assert.Contains(t, stdout, "pending") + assert.Contains(t, stdout, "events-by-issue") + assert.Contains(t, stdout, "transformations") +} + +// Baseline: one success test per endpoint. API requires at least one measure for most endpoints. +func TestMetricsEvents(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsRequests(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("requests"), "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsAttempts(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("attempts"), "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsQueueDepth(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("queue-depth"), "--measures", "max_depth")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsPending(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("pending"), "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsByIssue(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + // events-by-issue requires issue-id as positional argument and --measures + stdout := cli.RunExpectSuccess("gateway", "metrics", "events-by-issue", "iss_placeholder", "--start", metricsStart, "--end", metricsEnd, "--measures", "count") + assert.NotEmpty(t, stdout) +} + +func TestMetricsTransformations(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("transformations"), "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +// Common flags: granularity, measures, dimensions, source-id, destination-id, connection-id, output. +func TestMetricsEventsWithGranularity(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--granularity", "1d", "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsWithMeasures(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count,failed_count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsQueueDepthWithMeasuresAndDimensions(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("queue-depth"), "--measures", "max_depth,max_age", "--dimensions", "destination_id")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsWithSourceID(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + // Filter by a placeholder ID; API may return empty data but command should succeed + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count", "--source-id", "src_placeholder")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsWithConnectionID(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count", "--connection-id", "web_placeholder")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsWithDestinationID(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count", "--destination-id", "dst_placeholder")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsEventsWithStatus(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("events"), "--measures", "count", "--status", "SUCCESSFUL")...) + assert.NotEmpty(t, stdout) +} + +// Output: JSON structure (array of objects with time_bucket, dimensions, metrics). +func TestMetricsEventsOutputJSON(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + var data []struct { + TimeBucket *string `json:"time_bucket"` + Dimensions map[string]interface{} `json:"dimensions"` + Metrics map[string]float64 `json:"metrics"` + } + require.NoError(t, cli.RunJSON(&data, append(metricsArgs("events"), "--measures", "count")...)) + // Response is an array; may be empty + assert.NotNil(t, data) +} + +func TestMetricsQueueDepthOutputJSON(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + var data []struct { + TimeBucket *string `json:"time_bucket"` + Dimensions map[string]interface{} `json:"dimensions"` + Metrics map[string]float64 `json:"metrics"` + } + require.NoError(t, cli.RunJSON(&data, append(metricsArgs("queue-depth"), "--measures", "max_depth")...)) + assert.NotNil(t, data) +} + +// Validation: missing --start or --end should fail. +func TestMetricsEventsMissingStart(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "events", "--end", metricsEnd) + require.Error(t, err) +} + +func TestMetricsEventsMissingEnd(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "events", "--start", metricsStart) + require.Error(t, err) +} + +func TestMetricsRequestsMissingStart(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "requests", "--end", metricsEnd) + require.Error(t, err) +} + +func TestMetricsAttemptsMissingEnd(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "attempts", "--start", metricsStart) + require.Error(t, err) +} + +// Missing --measures: API returns 422 (measures required for all endpoints). +func TestMetricsEventsMissingMeasures(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "events", "--start", metricsStart, "--end", metricsEnd) + require.Error(t, err) +} + +// events-by-issue without required argument: Cobra rejects (ExactArgs(1)). +func TestMetricsEventsByIssueMissingIssueID(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + _, _, err := cli.Run("gateway", "metrics", "events-by-issue", "--start", metricsStart, "--end", metricsEnd, "--measures", "count") + require.Error(t, err) +} + +// Pending and transformations with minimal flags. +func TestMetricsPendingWithGranularity(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("pending"), "--granularity", "1h", "--measures", "count")...) + assert.NotEmpty(t, stdout) +} + +func TestMetricsTransformationsWithMeasures(t *testing.T) { + if testing.Short() { + t.Skip("Skipping acceptance test in short mode") + } + cli := NewCLIRunner(t) + stdout := cli.RunExpectSuccess(append(metricsArgs("transformations"), "--measures", "count,error_rate")...) + assert.NotEmpty(t, stdout) +} diff --git a/tools/generate-reference/main.go b/tools/generate-reference/main.go index 6f80efc..06c3dc4 100644 --- a/tools/generate-reference/main.go +++ b/tools/generate-reference/main.go @@ -361,7 +361,8 @@ func globalFlagsTable(root *cobra.Command) string { } else { flag = fmt.Sprintf("`--%s`", f.name) } - usage := strings.ReplaceAll(f.usage, "|", "\\|") + usage := normalizeUsageForTable(f.usage) + usage = strings.ReplaceAll(usage, "|", "\\|") b.WriteString(fmt.Sprintf("| %s | `%s` | %s |\n", flag, f.ftype, usage)) } return b.String() @@ -412,7 +413,8 @@ func generateGlobalFlags(root *cobra.Command) string { } else { flag = fmt.Sprintf("`--%s`", f.name) } - usage := strings.ReplaceAll(f.usage, "|", "\\|") + usage := normalizeUsageForTable(f.usage) + usage = strings.ReplaceAll(usage, "|", "\\|") b.WriteString(fmt.Sprintf("| %s | `%s` | %s |\n", flag, f.ftype, usage)) } return b.String() @@ -569,7 +571,8 @@ func commandSection(root *cobra.Command, c *cobra.Command, wrap wrapConfig, glob mainBuf.WriteString("| Flag | Type | Description |\n") mainBuf.WriteString("|------|------|-------------|\n") for _, r := range flagRows { - usage := wrapFlagsInBackticks(r.usage) + usage := normalizeUsageForTable(r.usage) + usage = wrapFlagsInBackticks(usage) usage = strings.ReplaceAll(usage, "|", "\\|") mainBuf.WriteString(fmt.Sprintf("| %s | %s | %s |\n", r.flag, r.ftype, usage)) } @@ -688,6 +691,27 @@ func extractExamplesFromLong(long string) (prose, examplesBlock string) { return prose, examplesBlock } +// escapeAngleBracketsForMarkdown replaces < and > with HTML entities so Markdoc and +// other parsers do not treat placeholders like or as HTML tags. +// Use for any generator output that is embedded in markdown (usage lines, table cells). +func escapeAngleBracketsForMarkdown(s string) string { + s = strings.ReplaceAll(s, "<", "<") + s = strings.ReplaceAll(s, ">", ">") + return s +} + +// normalizeUsageForTable collapses newlines and extra spaces in flag usage so markdown +// table rows stay on one line. Escapes angle brackets so Markdoc does not treat them +// as HTML tags (e.g. "" in granularity help). Use for any flag description +// emitted into a markdown table. +func normalizeUsageForTable(s string) string { + s = strings.ReplaceAll(s, "\n", " ") + s = strings.ReplaceAll(s, "\r", " ") + s = regexp.MustCompile(`\s+`).ReplaceAllString(s, " ") + s = escapeAngleBracketsForMarkdown(s) + return strings.TrimSpace(s) +} + // wrapFlagsInBackticks wraps flag references (--flag-name) in backticks for markdown. // Skips segments already inside backticks to avoid double-wrapping (RE2 has no lookbehind). var flagLongRE = regexp.MustCompile(`--([a-zA-Z][a-zA-Z0-9_-]*)`)