From 2b079c97da7315fb72b6dd90f3add54a5b8c6174 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sun, 15 Mar 2026 13:13:36 +0700 Subject: [PATCH 01/18] feat: Add Grafana service for dashboard and data source management - Implemented GrafanaService with endpoints for creating, updating, retrieving, and deleting dashboards. - Added functionality for managing data sources and annotations. - Included health check endpoint for Grafana. feat: Introduce MongoDB service for multi-tenant product management - Created MongoDBService to handle product operations across different tenant databases. - Implemented CRUD operations for products with tenant-specific database connections. - Added search and analytics functionalities for products. feat: Develop Multi-Tenant service for order management - Established MultiTenantService to manage orders across multiple PostgreSQL databases. - Implemented CRUD operations for orders with tenant-specific handling. - Auto-migrated order schema for each connected database. feat: Create Products service with basic functionality - Added ProductsService with a simple endpoint to return a greeting message. - Registered service for future expansion. feat: Implement Tasks service for task management - Developed TasksService with CRUD operations for tasks using PostgreSQL. - Utilized async operations to enhance performance. feat: Add Users service for user management - Created UsersService with endpoints for user CRUD operations. - Implemented request validation and mock data handling for user management. --- config.yaml | 18 ++++---- .../{service_h.go => broadcast_service.go} | 42 +++++++++---------- .../{service_c.go => cache_service.go} | 20 ++++----- .../{service_e.go => encryption_service.go} | 38 ++++++++--------- .../{service_i.go => grafana_service.go} | 38 ++++++++--------- .../{service_g.go => mongodb_service.go} | 38 ++++++++--------- .../{service_f.go => multi_tenant_service.go} | 36 ++++++++-------- internal/services/modules/products_service.go | 37 ++++++++++++++++ internal/services/modules/service_b.go | 37 ---------------- .../{service_d.go => tasks_service.go} | 29 +++++++------ .../{service_a.go => users_service.go} | 28 ++++++------- 11 files changed, 181 insertions(+), 180 deletions(-) rename internal/services/modules/{service_h.go => broadcast_service.go} (84%) rename internal/services/modules/{service_c.go => cache_service.go} (68%) rename internal/services/modules/{service_e.go => encryption_service.go} (86%) rename internal/services/modules/{service_i.go => grafana_service.go} (81%) rename internal/services/modules/{service_g.go => mongodb_service.go} (91%) rename internal/services/modules/{service_f.go => multi_tenant_service.go} (87%) create mode 100644 internal/services/modules/products_service.go delete mode 100644 internal/services/modules/service_b.go rename internal/services/modules/{service_d.go => tasks_service.go} (76%) rename internal/services/modules/{service_a.go => users_service.go} (81%) diff --git a/config.yaml b/config.yaml index 39ff9f6..db2cf09 100644 --- a/config.yaml +++ b/config.yaml @@ -12,15 +12,15 @@ server: port: "8080" services: - service_a: true - service_b: false - service_c: true - service_d: false - service_e: false - service_f: true - service_g: true - service_h: true - service_i: true + users_service: true + broadcast_service: false + cache_service: true + encryption_service: false + grafana_service: false + mongodb_service: true + multi_tenant_service: true + products_service: true + tasks_service: true auth: type: "apikey" diff --git a/internal/services/modules/service_h.go b/internal/services/modules/broadcast_service.go similarity index 84% rename from internal/services/modules/service_h.go rename to internal/services/modules/broadcast_service.go index fe40aec..35d7916 100644 --- a/internal/services/modules/service_h.go +++ b/internal/services/modules/broadcast_service.go @@ -6,9 +6,9 @@ import ( "time" "stackyard/config" - "stackyard/pkg/registry" "stackyard/pkg/interfaces" "stackyard/pkg/logger" + "stackyard/pkg/registry" "stackyard/pkg/response" "stackyard/pkg/utils" @@ -95,15 +95,15 @@ func (sg *SimpleStreamGenerator) generateEvents() { // ServiceH is a super simple demo of using the broadcast utility // Shows how easy it is to add event streaming to any service! -type ServiceH struct { +type BroadcastService struct { enabled bool broadcaster *utils.EventBroadcaster streams map[string]*SimpleStreamGenerator logger *logger.Logger } -func NewServiceH(enabled bool, logger *logger.Logger) *ServiceH { - service := &ServiceH{ +func NewBroadcastService(enabled bool, logger *logger.Logger) *BroadcastService { + service := &BroadcastService{ enabled: enabled, broadcaster: utils.NewEventBroadcaster(), streams: make(map[string]*SimpleStreamGenerator), @@ -111,21 +111,21 @@ func NewServiceH(enabled bool, logger *logger.Logger) *ServiceH { } if enabled { - logger.Info("Service H starting - broadcasting made easy!") + logger.Info("Broadcast Service starting - broadcasting made easy!") service.startDemoStreams() - logger.Info("Service H ready!") + logger.Info("Broadcast Service ready!") } return service } -func (s *ServiceH) Name() string { return "Service H (Broadcast Utility Demo)" } -func (s *ServiceH) Enabled() bool { return s.enabled } -func (s *ServiceH) Endpoints() []string { +func (s *BroadcastService) Name() string { return "Broadcast Service" } +func (s *BroadcastService) Enabled() bool { return s.enabled } +func (s *BroadcastService) Endpoints() []string { return []string{"/events/stream/{stream_id}", "/events/broadcast", "/events/streams"} } -func (s *ServiceH) RegisterRoutes(g *echo.Group) { +func (s *BroadcastService) RegisterRoutes(g *echo.Group) { events := g.Group("/events") events.GET("/stream/:stream_id", s.streamEvents) events.POST("/broadcast", s.broadcastEvent) @@ -138,7 +138,7 @@ func (s *ServiceH) RegisterRoutes(g *echo.Group) { // HANDLER METHODS - Using Broadcast Utility // ========================================= -func (s *ServiceH) streamEvents(c echo.Context) error { +func (s *BroadcastService) streamEvents(c echo.Context) error { streamID := c.Param("stream_id") client := s.broadcaster.Subscribe(streamID) defer s.broadcaster.Unsubscribe(client.ID) @@ -154,7 +154,7 @@ func (s *ServiceH) streamEvents(c echo.Context) error { ID: "connected", Type: "connection", Message: "Connected to stream: " + streamID, - Data: map[string]interface{}{"stream_id": streamID, "service": "service_h"}, + Data: map[string]interface{}{"stream_id": streamID, "service": "broadcast_service"}, Timestamp: time.Now().Unix(), StreamID: streamID, } @@ -174,7 +174,7 @@ func (s *ServiceH) streamEvents(c echo.Context) error { } } -func (s *ServiceH) broadcastEvent(c echo.Context) error { +func (s *BroadcastService) broadcastEvent(c echo.Context) error { type BroadcastRequest struct { StreamID string `json:"stream_id,omitempty"` Type string `json:"type" validate:"required"` @@ -200,7 +200,7 @@ func (s *ServiceH) broadcastEvent(c echo.Context) error { } } -func (s *ServiceH) getActiveStreams(c echo.Context) error { +func (s *BroadcastService) getActiveStreams(c echo.Context) error { activeStreams := s.broadcaster.GetActiveStreams() totalClients := s.broadcaster.GetTotalClients() streamCount := s.broadcaster.GetStreamCount() @@ -217,13 +217,13 @@ func (s *ServiceH) getActiveStreams(c echo.Context) error { "streams": streamInfo, "total_clients": totalClients, "stream_count": streamCount, - "service": "service_h", + "service": "broadcast_service", } return response.Success(c, result, "Active streams retrieved") } -func (s *ServiceH) startStream(c echo.Context) error { +func (s *BroadcastService) startStream(c echo.Context) error { streamID := c.Param("stream_id") if generator, exists := s.streams[streamID]; exists { @@ -238,7 +238,7 @@ func (s *ServiceH) startStream(c echo.Context) error { return response.Created(c, nil, fmt.Sprintf("Stream '%s' created and started", streamID)) } -func (s *ServiceH) stopStream(c echo.Context) error { +func (s *BroadcastService) stopStream(c echo.Context) error { streamID := c.Param("stream_id") generator, exists := s.streams[streamID] @@ -256,7 +256,7 @@ func (s *ServiceH) stopStream(c echo.Context) error { // HELPER METHODS // ========================================= -func (s *ServiceH) sendSSEEvent(c echo.Context, event utils.EventData) error { +func (s *BroadcastService) sendSSEEvent(c echo.Context, event utils.EventData) error { eventJSON, err := json.Marshal(event) if err != nil { return err @@ -271,7 +271,7 @@ func (s *ServiceH) sendSSEEvent(c echo.Context, event utils.EventData) error { return nil } -func (s *ServiceH) startDemoStreams() { +func (s *BroadcastService) startDemoStreams() { streams := []string{"demo-notifications", "demo-metrics", "demo-alerts"} for _, streamID := range streams { @@ -283,7 +283,7 @@ func (s *ServiceH) startDemoStreams() { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_h", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewServiceH(config.Services.IsEnabled("service_h"), logger) + registry.RegisterService("broadcast_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewBroadcastService(config.Services.IsEnabled("broadcast_service"), logger) }) } diff --git a/internal/services/modules/service_c.go b/internal/services/modules/cache_service.go similarity index 68% rename from internal/services/modules/service_c.go rename to internal/services/modules/cache_service.go index 62b32bf..8579608 100644 --- a/internal/services/modules/service_c.go +++ b/internal/services/modules/cache_service.go @@ -4,37 +4,37 @@ import ( "time" "stackyard/config" - "stackyard/pkg/registry" "stackyard/pkg/cache" "stackyard/pkg/interfaces" "stackyard/pkg/logger" + "stackyard/pkg/registry" "stackyard/pkg/response" "github.com/labstack/echo/v4" ) -type ServiceC struct { +type CacheService struct { enabled bool store *cache.Cache[string] } -func NewServiceC(enabled bool) *ServiceC { - return &ServiceC{ +func NewCacheService(enabled bool) *CacheService { + return &CacheService{ enabled: enabled, store: cache.New[string](), } } -func (s *ServiceC) Name() string { return "Service C (Cache Demo)" } -func (s *ServiceC) Enabled() bool { return s.enabled } -func (s *ServiceC) Endpoints() []string { return []string{"/cache"} } +func (s *CacheService) Name() string { return "Cache Service" } +func (s *CacheService) Enabled() bool { return s.enabled } +func (s *CacheService) Endpoints() []string { return []string{"/cache"} } type CacheRequest struct { Value string `json:"value"` TTL int `json:"ttl_seconds"` // Optional } -func (s *ServiceC) RegisterRoutes(g *echo.Group) { +func (s *CacheService) RegisterRoutes(g *echo.Group) { sub := g.Group("/cache") // GET /cache/:key @@ -68,7 +68,7 @@ func (s *ServiceC) RegisterRoutes(g *echo.Group) { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_c", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewServiceC(config.Services.IsEnabled("service_c")) + registry.RegisterService("cache_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewCacheService(config.Services.IsEnabled("cache_service")) }) } diff --git a/internal/services/modules/service_e.go b/internal/services/modules/encryption_service.go similarity index 86% rename from internal/services/modules/service_e.go rename to internal/services/modules/encryption_service.go index 811b655..edb48a5 100644 --- a/internal/services/modules/service_e.go +++ b/internal/services/modules/encryption_service.go @@ -14,21 +14,21 @@ import ( "time" "stackyard/config" - "stackyard/pkg/registry" "stackyard/pkg/interfaces" "stackyard/pkg/logger" + "stackyard/pkg/registry" "stackyard/pkg/response" "github.com/labstack/echo/v4" ) -type ServiceE struct { +type EncryptionService struct { enabled bool algorithm string encryptionKey []byte } -func NewServiceE(enabled bool, config map[string]interface{}) *ServiceE { +func NewEncryptionService(enabled bool, config map[string]interface{}) *EncryptionService { // Extract configuration algorithm := "aes-256-gcm" key := "" @@ -55,20 +55,20 @@ func NewServiceE(enabled bool, config map[string]interface{}) *ServiceE { keyBytes = keyBytes[:32] } - return &ServiceE{ + return &EncryptionService{ enabled: enabled, algorithm: algorithm, encryptionKey: keyBytes, } } -func (s *ServiceE) Name() string { return "Service E (Encryption)" } -func (s *ServiceE) Enabled() bool { return s.enabled } -func (s *ServiceE) Endpoints() []string { +func (s *EncryptionService) Name() string { return "Encryption Service" } +func (s *EncryptionService) Enabled() bool { return s.enabled } +func (s *EncryptionService) Endpoints() []string { return []string{"/encryption/encrypt", "/encryption/decrypt", "/encryption/status", "/encryption/key-rotate"} } -func (s *ServiceE) RegisterRoutes(g *echo.Group) { +func (s *EncryptionService) RegisterRoutes(g *echo.Group) { sub := g.Group("/encryption") // Encrypt endpoint @@ -123,7 +123,7 @@ type KeyRotateRequest struct { } // Encryption/Decryption functions -func (s *ServiceE) encrypt(data []byte) (string, error) { +func (s *EncryptionService) encrypt(data []byte) (string, error) { block, err := aes.NewCipher(s.encryptionKey) if err != nil { return "", fmt.Errorf("failed to create cipher: %v", err) @@ -148,7 +148,7 @@ func (s *ServiceE) encrypt(data []byte) (string, error) { return base64.StdEncoding.EncodeToString(encrypted), nil } -func (s *ServiceE) decrypt(encryptedData string) ([]byte, error) { +func (s *EncryptionService) decrypt(encryptedData string) ([]byte, error) { // Decode from base64 data, err := base64.StdEncoding.DecodeString(encryptedData) if err != nil { @@ -184,7 +184,7 @@ func (s *ServiceE) decrypt(encryptedData string) ([]byte, error) { } // Handlers -func (s *ServiceE) EncryptData(c echo.Context) error { +func (s *EncryptionService) EncryptData(c echo.Context) error { var req EncryptRequest if err := c.Bind(&req); err != nil { return response.BadRequest(c, "Invalid request body") @@ -212,7 +212,7 @@ func (s *ServiceE) EncryptData(c echo.Context) error { return response.Success(c, resp, "Data encrypted successfully") } -func (s *ServiceE) DecryptData(c echo.Context) error { +func (s *EncryptionService) DecryptData(c echo.Context) error { var req DecryptRequest if err := c.Bind(&req); err != nil { return response.BadRequest(c, "Invalid request body") @@ -240,7 +240,7 @@ func (s *ServiceE) DecryptData(c echo.Context) error { return response.Success(c, resp, "Data decrypted successfully") } -func (s *ServiceE) GetStatus(c echo.Context) error { +func (s *EncryptionService) GetStatus(c echo.Context) error { // Get current key info (show only first 8 chars for security) currentKeyPreview := fmt.Sprintf("%s...", hex.EncodeToString(s.encryptionKey[:4])) @@ -256,7 +256,7 @@ func (s *ServiceE) GetStatus(c echo.Context) error { return response.Success(c, resp, "Encryption service status") } -func (s *ServiceE) RotateKey(c echo.Context) error { +func (s *EncryptionService) RotateKey(c echo.Context) error { var req KeyRotateRequest if err := c.Bind(&req); err != nil { return response.BadRequest(c, "Invalid request body") @@ -293,7 +293,7 @@ func (s *ServiceE) RotateKey(c echo.Context) error { } // Middleware for automatic request/response encryption -func (s *ServiceE) EncryptionMiddleware() echo.MiddlewareFunc { +func (s *EncryptionService) EncryptionMiddleware() echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { // Skip encryption for encryption service endpoints @@ -319,7 +319,7 @@ func (s *ServiceE) EncryptionMiddleware() echo.MiddlewareFunc { } // Helper function to encrypt JSON data -func (s *ServiceE) EncryptJSON(data interface{}) (string, error) { +func (s *EncryptionService) EncryptJSON(data interface{}) (string, error) { jsonData, err := json.Marshal(data) if err != nil { return "", fmt.Errorf("failed to marshal JSON: %v", err) @@ -329,7 +329,7 @@ func (s *ServiceE) EncryptJSON(data interface{}) (string, error) { } // Helper function to decrypt to JSON -func (s *ServiceE) DecryptJSON(encryptedData string, target interface{}) error { +func (s *EncryptionService) DecryptJSON(encryptedData string, target interface{}) error { decrypted, err := s.decrypt(encryptedData) if err != nil { return fmt.Errorf("failed to decrypt: %v", err) @@ -340,11 +340,11 @@ func (s *ServiceE) DecryptJSON(encryptedData string, target interface{}) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_e", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + registry.RegisterService("encryption_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { encryptionConfig := map[string]interface{}{ "algorithm": config.Encryption.Algorithm, "key": config.Encryption.Key, } - return NewServiceE(config.Encryption.Enabled, encryptionConfig) + return NewEncryptionService(config.Encryption.Enabled, encryptionConfig) }) } diff --git a/internal/services/modules/service_i.go b/internal/services/modules/grafana_service.go similarity index 81% rename from internal/services/modules/service_i.go rename to internal/services/modules/grafana_service.go index 717733a..1e6154c 100644 --- a/internal/services/modules/service_i.go +++ b/internal/services/modules/grafana_service.go @@ -13,25 +13,25 @@ import ( ) // ServiceI provides Grafana integration endpoints -type ServiceI struct { +type GrafanaService struct { grafanaManager *infrastructure.GrafanaManager enabled bool logger *logger.Logger } -func NewServiceI(grafanaManager *infrastructure.GrafanaManager, enabled bool, logger *logger.Logger) *ServiceI { - return &ServiceI{ +func NewGrafanaService(grafanaManager *infrastructure.GrafanaManager, enabled bool, logger *logger.Logger) *GrafanaService { + return &GrafanaService{ grafanaManager: grafanaManager, enabled: enabled, logger: logger, } } -func (s *ServiceI) Name() string { return "Grafana Integration Service" } -func (s *ServiceI) Enabled() bool { return s.enabled && s.grafanaManager != nil } -func (s *ServiceI) Endpoints() []string { return []string{"/grafana"} } +func (s *GrafanaService) Name() string { return "Grafana Service" } +func (s *GrafanaService) Enabled() bool { return s.enabled && s.grafanaManager != nil } +func (s *GrafanaService) Endpoints() []string { return []string{"/grafana"} } -func (s *ServiceI) RegisterRoutes(g *echo.Group) { +func (s *GrafanaService) RegisterRoutes(g *echo.Group) { sub := g.Group("/grafana") // Dashboard management @@ -52,7 +52,7 @@ func (s *ServiceI) RegisterRoutes(g *echo.Group) { } // createDashboard creates a new Grafana dashboard -func (s *ServiceI) createDashboard(c echo.Context) error { +func (s *GrafanaService) createDashboard(c echo.Context) error { var dashboard infrastructure.GrafanaDashboard if err := c.Bind(&dashboard); err != nil { return response.BadRequest(c, "Invalid dashboard data") @@ -68,7 +68,7 @@ func (s *ServiceI) createDashboard(c echo.Context) error { } // updateDashboard updates an existing Grafana dashboard -func (s *ServiceI) updateDashboard(c echo.Context) error { +func (s *GrafanaService) updateDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { return response.BadRequest(c, "Dashboard UID is required") @@ -92,7 +92,7 @@ func (s *ServiceI) updateDashboard(c echo.Context) error { } // getDashboard retrieves a Grafana dashboard by UID -func (s *ServiceI) getDashboard(c echo.Context) error { +func (s *GrafanaService) getDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { return response.BadRequest(c, "Dashboard UID is required") @@ -108,7 +108,7 @@ func (s *ServiceI) getDashboard(c echo.Context) error { } // deleteDashboard deletes a Grafana dashboard by UID -func (s *ServiceI) deleteDashboard(c echo.Context) error { +func (s *GrafanaService) deleteDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { return response.BadRequest(c, "Dashboard UID is required") @@ -124,7 +124,7 @@ func (s *ServiceI) deleteDashboard(c echo.Context) error { } // listDashboards lists all Grafana dashboards -func (s *ServiceI) listDashboards(c echo.Context) error { +func (s *GrafanaService) listDashboards(c echo.Context) error { // Parse pagination parameters page := 1 perPage := 50 @@ -164,7 +164,7 @@ func (s *ServiceI) listDashboards(c echo.Context) error { } // createDataSource creates a new Grafana data source -func (s *ServiceI) createDataSource(c echo.Context) error { +func (s *GrafanaService) createDataSource(c echo.Context) error { var ds infrastructure.GrafanaDataSource if err := c.Bind(&ds); err != nil { return response.BadRequest(c, "Invalid data source data") @@ -180,7 +180,7 @@ func (s *ServiceI) createDataSource(c echo.Context) error { } // createAnnotation creates a new Grafana annotation -func (s *ServiceI) createAnnotation(c echo.Context) error { +func (s *GrafanaService) createAnnotation(c echo.Context) error { var annotation infrastructure.GrafanaAnnotation if err := c.Bind(&annotation); err != nil { return response.BadRequest(c, "Invalid annotation data") @@ -196,7 +196,7 @@ func (s *ServiceI) createAnnotation(c echo.Context) error { } // getHealth returns Grafana health status -func (s *ServiceI) getHealth(c echo.Context) error { +func (s *GrafanaService) getHealth(c echo.Context) error { health, err := s.grafanaManager.GetHealth(c.Request().Context()) if err != nil { s.logger.Error("Failed to get Grafana health", err) @@ -208,14 +208,14 @@ func (s *ServiceI) getHealth(c echo.Context) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_i", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("service_i") { + registry.RegisterService("grafana_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("grafana_service") { return nil } if deps == nil || deps.GrafanaManager == nil { - logger.Warn("Grafana manager not available, skipping Service I") + logger.Warn("Grafana manager not available, skipping Grafana Service") return nil } - return NewServiceI(deps.GrafanaManager, true, logger) + return NewGrafanaService(deps.GrafanaManager, true, logger) }) } diff --git a/internal/services/modules/service_g.go b/internal/services/modules/mongodb_service.go similarity index 91% rename from internal/services/modules/service_g.go rename to internal/services/modules/mongodb_service.go index 7e0e85a..bb6dce6 100644 --- a/internal/services/modules/service_g.go +++ b/internal/services/modules/mongodb_service.go @@ -27,31 +27,31 @@ type Product struct { // ServiceG demonstrates using multiple MongoDB connections with NoSQL operations // This service shows how to work with different MongoDB databases dynamically -type ServiceG struct { +type MongoDBService struct { enabled bool mongoConnectionManager *infrastructure.MongoConnectionManager logger *logger.Logger } -func NewServiceG( +func NewMongoDBService( mongoConnectionManager *infrastructure.MongoConnectionManager, enabled bool, logger *logger.Logger, -) *ServiceG { - return &ServiceG{ +) *MongoDBService { + return &MongoDBService{ enabled: enabled, mongoConnectionManager: mongoConnectionManager, logger: logger, } } -func (s *ServiceG) Name() string { return "Service G (MongoDB Products)" } -func (s *ServiceG) Enabled() bool { return s.enabled } -func (s *ServiceG) Endpoints() []string { +func (s *MongoDBService) Name() string { return "MongoDB Service" } +func (s *MongoDBService) Enabled() bool { return s.enabled } +func (s *MongoDBService) Endpoints() []string { return []string{"/products/{tenant}", "/products/{tenant}/{id}"} } -func (s *ServiceG) RegisterRoutes(g *echo.Group) { +func (s *MongoDBService) RegisterRoutes(g *echo.Group) { sub := g.Group("/products") // Routes with tenant parameter for database selection @@ -65,7 +65,7 @@ func (s *ServiceG) RegisterRoutes(g *echo.Group) { } // listProductsByTenant lists products from a specific tenant database -func (s *ServiceG) listProductsByTenant(c echo.Context) error { +func (s *MongoDBService) listProductsByTenant(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -93,7 +93,7 @@ func (s *ServiceG) listProductsByTenant(c echo.Context) error { } // createProduct creates a new product in the specified tenant database -func (s *ServiceG) createProduct(c echo.Context) error { +func (s *MongoDBService) createProduct(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -143,7 +143,7 @@ func (s *ServiceG) createProduct(c echo.Context) error { } // getProductByTenant retrieves a specific product from a tenant database -func (s *ServiceG) getProductByTenant(c echo.Context) error { +func (s *MongoDBService) getProductByTenant(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -173,7 +173,7 @@ func (s *ServiceG) getProductByTenant(c echo.Context) error { } // updateProduct updates a product in the specified tenant database -func (s *ServiceG) updateProduct(c echo.Context) error { +func (s *MongoDBService) updateProduct(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -220,7 +220,7 @@ func (s *ServiceG) updateProduct(c echo.Context) error { } // deleteProduct deletes a product from the specified tenant database -func (s *ServiceG) deleteProduct(c echo.Context) error { +func (s *MongoDBService) deleteProduct(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -251,7 +251,7 @@ func (s *ServiceG) deleteProduct(c echo.Context) error { } // searchProducts performs advanced search on products -func (s *ServiceG) searchProducts(c echo.Context) error { +func (s *MongoDBService) searchProducts(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -323,7 +323,7 @@ func (s *ServiceG) searchProducts(c echo.Context) error { } // getProductAnalytics provides analytics for products in a tenant -func (s *ServiceG) getProductAnalytics(c echo.Context) error { +func (s *MongoDBService) getProductAnalytics(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -381,14 +381,14 @@ func (s *ServiceG) getProductAnalytics(c echo.Context) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_g", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("service_g") { + registry.RegisterService("mongodb_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("mongodb_service") { return nil } if deps == nil || deps.MongoConnectionManager == nil { - logger.Warn("MongoDB connections not available, skipping Service G") + logger.Warn("MongoDB connections not available, skipping MongoDB Service") return nil } - return NewServiceG(deps.MongoConnectionManager, true, logger) + return NewMongoDBService(deps.MongoConnectionManager, true, logger) }) } diff --git a/internal/services/modules/service_f.go b/internal/services/modules/multi_tenant_service.go similarity index 87% rename from internal/services/modules/service_f.go rename to internal/services/modules/multi_tenant_service.go index 0059838..032009c 100644 --- a/internal/services/modules/service_f.go +++ b/internal/services/modules/multi_tenant_service.go @@ -27,17 +27,17 @@ type MultiTenantOrder struct { // ServiceF demonstrates using multiple PostgreSQL connections with GORM // This service shows how to work with different databases dynamically using ORM -type ServiceF struct { +type MultiTenantService struct { enabled bool postgresConnectionManager *infrastructure.PostgresConnectionManager logger *logger.Logger } -func NewServiceF( +func NewMultiTenantService( postgresConnectionManager *infrastructure.PostgresConnectionManager, enabled bool, logger *logger.Logger, -) *ServiceF { +) *MultiTenantService { // Auto-migrate the schema for each connected database if enabled && postgresConnectionManager != nil { allConnections := postgresConnectionManager.GetAllConnections() @@ -50,18 +50,20 @@ func NewServiceF( } } - return &ServiceF{ + return &MultiTenantService{ enabled: enabled, postgresConnectionManager: postgresConnectionManager, logger: logger, } } -func (s *ServiceF) Name() string { return "Service F (Multi-Tenant Orders - GORM)" } -func (s *ServiceF) Enabled() bool { return s.enabled } -func (s *ServiceF) Endpoints() []string { return []string{"/orders/{tenant}", "/orders/{tenant}/{id}"} } +func (s *MultiTenantService) Name() string { return "Multi-Tenant Service" } +func (s *MultiTenantService) Enabled() bool { return s.enabled } +func (s *MultiTenantService) Endpoints() []string { + return []string{"/orders/{tenant}", "/orders/{tenant}/{id}"} +} -func (s *ServiceF) RegisterRoutes(g *echo.Group) { +func (s *MultiTenantService) RegisterRoutes(g *echo.Group) { sub := g.Group("/orders") // Routes with tenant parameter for database selection @@ -73,7 +75,7 @@ func (s *ServiceF) RegisterRoutes(g *echo.Group) { } // listOrdersByTenant lists orders from a specific tenant database -func (s *ServiceF) listOrdersByTenant(c echo.Context) error { +func (s *MultiTenantService) listOrdersByTenant(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -93,7 +95,7 @@ func (s *ServiceF) listOrdersByTenant(c echo.Context) error { } // createOrder creates a new order in the specified tenant database -func (s *ServiceF) createOrder(c echo.Context) error { +func (s *MultiTenantService) createOrder(c echo.Context) error { tenant := c.Param("tenant") // Get the database connection for this tenant @@ -121,7 +123,7 @@ func (s *ServiceF) createOrder(c echo.Context) error { } // getOrderByTenant retrieves a specific order from a tenant database -func (s *ServiceF) getOrderByTenant(c echo.Context) error { +func (s *MultiTenantService) getOrderByTenant(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") id, err := strconv.Atoi(idStr) @@ -149,7 +151,7 @@ func (s *ServiceF) getOrderByTenant(c echo.Context) error { } // updateOrder updates an order in the specified tenant database -func (s *ServiceF) updateOrder(c echo.Context) error { +func (s *MultiTenantService) updateOrder(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") id, err := strconv.Atoi(idStr) @@ -209,7 +211,7 @@ func (s *ServiceF) updateOrder(c echo.Context) error { } // deleteOrder deletes an order from the specified tenant database -func (s *ServiceF) deleteOrder(c echo.Context) error { +func (s *MultiTenantService) deleteOrder(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") id, err := strconv.Atoi(idStr) @@ -238,14 +240,14 @@ func (s *ServiceF) deleteOrder(c echo.Context) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_f", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("service_f") { + registry.RegisterService("multi_tenant_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("multi_tenant_service") { return nil } if deps == nil || deps.PostgresConnectionManager == nil { - logger.Warn("PostgreSQL connections not available, skipping Service F") + logger.Warn("PostgreSQL connections not available, skipping Multi-Tenant Service") return nil } - return NewServiceF(deps.PostgresConnectionManager, true, logger) + return NewMultiTenantService(deps.PostgresConnectionManager, true, logger) }) } diff --git a/internal/services/modules/products_service.go b/internal/services/modules/products_service.go new file mode 100644 index 0000000..2604fd1 --- /dev/null +++ b/internal/services/modules/products_service.go @@ -0,0 +1,37 @@ +package modules + +import ( + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" + + "github.com/labstack/echo/v4" +) + +type ProductsService struct { + enabled bool +} + +func NewProductsService(enabled bool) *ProductsService { + return &ProductsService{enabled: enabled} +} + +func (s *ProductsService) Name() string { return "Products Service" } +func (s *ProductsService) Enabled() bool { return s.enabled } +func (s *ProductsService) Endpoints() []string { return []string{"/products"} } + +func (s *ProductsService) RegisterRoutes(g *echo.Group) { + sub := g.Group("/products") + sub.GET("", func(c echo.Context) error { + return response.Success(c, map[string]string{"message": "Hello from Service B - Products"}) + }) +} + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("products_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewProductsService(config.Services.IsEnabled("products_service")) + }) +} diff --git a/internal/services/modules/service_b.go b/internal/services/modules/service_b.go deleted file mode 100644 index 950707d..0000000 --- a/internal/services/modules/service_b.go +++ /dev/null @@ -1,37 +0,0 @@ -package modules - -import ( - "stackyard/config" - "stackyard/pkg/registry" - "stackyard/pkg/interfaces" - "stackyard/pkg/logger" - "stackyard/pkg/response" - - "github.com/labstack/echo/v4" -) - -type ServiceB struct { - enabled bool -} - -func NewServiceB(enabled bool) *ServiceB { - return &ServiceB{enabled: enabled} -} - -func (s *ServiceB) Name() string { return "Service B (Products)" } -func (s *ServiceB) Enabled() bool { return s.enabled } -func (s *ServiceB) Endpoints() []string { return []string{"/products"} } - -func (s *ServiceB) RegisterRoutes(g *echo.Group) { - sub := g.Group("/products") - sub.GET("", func(c echo.Context) error { - return response.Success(c, map[string]string{"message": "Hello from Service B - Products"}) - }) -} - -// Auto-registration function - called when package is imported -func init() { - registry.RegisterService("service_b", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewServiceB(config.Services.IsEnabled("service_b")) - }) -} diff --git a/internal/services/modules/service_d.go b/internal/services/modules/tasks_service.go similarity index 76% rename from internal/services/modules/service_d.go rename to internal/services/modules/tasks_service.go index 7be441c..b084490 100644 --- a/internal/services/modules/service_d.go +++ b/internal/services/modules/tasks_service.go @@ -5,10 +5,10 @@ import ( "strconv" "stackyard/config" - "stackyard/pkg/registry" "stackyard/pkg/infrastructure" "stackyard/pkg/interfaces" "stackyard/pkg/logger" + "stackyard/pkg/registry" "stackyard/pkg/response" "github.com/labstack/echo/v4" @@ -22,36 +22,36 @@ type Task struct { Completed bool `json:"completed"` } -type ServiceD struct { +type TasksService struct { db *infrastructure.PostgresManager logger *logger.Logger enabled bool } -func NewServiceD(db *infrastructure.PostgresManager, enabled bool, logger *logger.Logger) *ServiceD { +func NewTasksService(db *infrastructure.PostgresManager, enabled bool, logger *logger.Logger) *TasksService { if enabled && db != nil && db.ORM != nil { // Auto-migrate the schema if err := db.ORM.AutoMigrate(&Task{}); err != nil { logger.Error("Error migrating Task model", err) } } - return &ServiceD{ + return &TasksService{ db: db, logger: logger, enabled: enabled, } } -func (s *ServiceD) Name() string { return "Service D (Tasks - GORM)" } +func (s *TasksService) Name() string { return "Tasks Service" } -func (s *ServiceD) Enabled() bool { +func (s *TasksService) Enabled() bool { // Service is enabled only if configured AND DB is available return s.enabled && s.db != nil && s.db.ORM != nil } -func (s *ServiceD) Endpoints() []string { return []string{"/tasks"} } +func (s *TasksService) Endpoints() []string { return []string{"/tasks"} } -func (s *ServiceD) RegisterRoutes(g *echo.Group) { +func (s *TasksService) RegisterRoutes(g *echo.Group) { sub := g.Group("/tasks") sub.GET("", s.listTasks) sub.POST("", s.createTask) @@ -59,7 +59,7 @@ func (s *ServiceD) RegisterRoutes(g *echo.Group) { sub.DELETE("/:id", s.deleteTask) } -func (s *ServiceD) listTasks(c echo.Context) error { +func (s *TasksService) listTasks(c echo.Context) error { var tasks []Task // Use async GORM operation to avoid blocking main thread @@ -74,7 +74,7 @@ func (s *ServiceD) listTasks(c echo.Context) error { return response.Success(c, tasks) } -func (s *ServiceD) createTask(c echo.Context) error { +func (s *TasksService) createTask(c echo.Context) error { task := new(Task) if err := c.Bind(task); err != nil { return response.BadRequest(c, "Invalid input") @@ -92,7 +92,7 @@ func (s *ServiceD) createTask(c echo.Context) error { return response.Created(c, task) } -func (s *ServiceD) updateTask(c echo.Context) error { +func (s *TasksService) updateTask(c echo.Context) error { id, _ := strconv.Atoi(c.Param("id")) var task Task @@ -117,7 +117,7 @@ func (s *ServiceD) updateTask(c echo.Context) error { return response.Success(c, task) } -func (s *ServiceD) deleteTask(c echo.Context) error { +func (s *TasksService) deleteTask(c echo.Context) error { id, _ := strconv.Atoi(c.Param("id")) var task Task @@ -135,8 +135,7 @@ func (s *ServiceD) deleteTask(c echo.Context) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_d", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - logger.Debug("Service INIT LOADED") - return NewServiceD(deps.PostgresManager, config.Services.IsEnabled("service_d"), logger) + registry.RegisterService("tasks_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewTasksService(deps.PostgresManager, config.Services.IsEnabled("tasks_service"), logger) }) } diff --git a/internal/services/modules/service_a.go b/internal/services/modules/users_service.go similarity index 81% rename from internal/services/modules/service_a.go rename to internal/services/modules/users_service.go index 2ea471a..fa3f962 100644 --- a/internal/services/modules/service_a.go +++ b/internal/services/modules/users_service.go @@ -12,19 +12,19 @@ import ( "github.com/labstack/echo/v4" ) -type ServiceA struct { +type UsersService struct { enabled bool } -func NewServiceA(enabled bool) *ServiceA { - return &ServiceA{enabled: enabled} +func NewUsersService(enabled bool) *UsersService { + return &UsersService{enabled: enabled} } -func (s *ServiceA) Name() string { return "Service A (Users)" } -func (s *ServiceA) Enabled() bool { return s.enabled } -func (s *ServiceA) Endpoints() []string { return []string{"/users", "/users/:id"} } +func (s *UsersService) Name() string { return "Users Service" } +func (s *UsersService) Enabled() bool { return s.enabled } +func (s *UsersService) Endpoints() []string { return []string{"/users", "/users/:id"} } -func (s *ServiceA) RegisterRoutes(g *echo.Group) { +func (s *UsersService) RegisterRoutes(g *echo.Group) { sub := g.Group("/users") // List users with pagination @@ -68,7 +68,7 @@ type UpdateUserRequest struct { // Handlers -func (s *ServiceA) GetUsers(c echo.Context) error { +func (s *UsersService) GetUsers(c echo.Context) error { // Parse pagination from query var pagination response.PaginationRequest if err := c.Bind(&pagination); err != nil { @@ -93,7 +93,7 @@ func (s *ServiceA) GetUsers(c echo.Context) error { return response.SuccessWithMeta(c, users, meta, "Users retrieved successfully") } -func (s *ServiceA) GetUser(c echo.Context) error { +func (s *UsersService) GetUser(c echo.Context) error { id := c.Param("id") // Mock data - in real app, fetch from database @@ -113,7 +113,7 @@ func (s *ServiceA) GetUser(c echo.Context) error { return response.Success(c, user, "User retrieved successfully") } -func (s *ServiceA) CreateUser(c echo.Context) error { +func (s *UsersService) CreateUser(c echo.Context) error { var req CreateUserRequest // Bind and validate @@ -136,7 +136,7 @@ func (s *ServiceA) CreateUser(c echo.Context) error { return response.Created(c, user, "User created successfully") } -func (s *ServiceA) UpdateUser(c echo.Context) error { +func (s *UsersService) UpdateUser(c echo.Context) error { id := c.Param("id") var req UpdateUserRequest @@ -161,7 +161,7 @@ func (s *ServiceA) UpdateUser(c echo.Context) error { return response.Success(c, user, "User updated successfully") } -func (s *ServiceA) DeleteUser(c echo.Context) error { +func (s *UsersService) DeleteUser(c echo.Context) error { id := c.Param("id") // Simulate not found @@ -176,7 +176,7 @@ func (s *ServiceA) DeleteUser(c echo.Context) error { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("service_a", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewServiceA(config.Services.IsEnabled("service_a")) + registry.RegisterService("users_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewUsersService(config.Services.IsEnabled("users_service")) }) } From 906eabaa67cfa71733335b36a32df9beb80d2b3e Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sun, 15 Mar 2026 13:25:01 +0700 Subject: [PATCH 02/18] refactor: Update documentation structure and service naming conventions --- docs_wiki/blueprint/blueprint.txt | 214 +++++++++++++++--------------- 1 file changed, 107 insertions(+), 107 deletions(-) diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index ac9e058..8a3094c 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -143,7 +143,7 @@ pagination.GetOffset() // Calculates offset for DB queries ## 5. SECURITY FEATURES -### 4.1 API Obfuscation +### 5.1 API Obfuscation **Purpose:** Obscure JSON data in transit using Base64 encoding @@ -167,7 +167,7 @@ monitoring: - Handles both obfuscated and standard responses - Normalizes URL-safe characters and adds padding -### 4.2 Error Handling +### 5.2 Error Handling **Custom HTTP Error Handler** in `internal/server/server.go`: @@ -187,9 +187,9 @@ monitoring: - 500: `INTERNAL_ERROR` - 503: `SERVICE_UNAVAILABLE` -## 5. SERVICE ARCHITECTURE +## 6. SERVICE ARCHITECTURE -### 5.1 Service Interface +### 6.1 Service Interface All services implement the `Service` interface: @@ -202,44 +202,44 @@ type Service interface { } ``` -### 5.2 Service Implementation +### 6.2 Service Implementation **Basic Service Example:** ```go -type OrdersService struct { +type UsersService struct { enabled bool } -func NewOrdersService(enabled bool) *OrdersService { - return &OrdersService{enabled: enabled} +func NewUsersService(enabled bool) *UsersService { + return &UsersService{enabled: enabled} } -func (s *OrdersService) Name() string { return "Orders Service" } -func (s *OrdersService) Enabled() bool { return s.enabled } -func (s *OrdersService) Endpoints() []string { return []string{"/orders", "/orders/:id"} } +func (s *UsersService) Name() string { return "Users Service" } +func (s *UsersService) Enabled() bool { return s.enabled } +func (s *UsersService) Endpoints() []string { return []string{"/users", "/users/:id"} } -func (s *OrdersService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/orders") - sub.GET("", s.listOrders) - sub.GET("/:id", s.getOrder) - sub.POST("", s.createOrder) +func (s *UsersService) RegisterRoutes(g *echo.Group) { + sub := g.Group("/users") + sub.GET("", s.listUsers) + sub.GET("/:id", s.getUser) + sub.POST("", s.createUser) } ``` **Service with Dependencies:** ```go -type InventoryService struct { +type ProductsService struct { db *infrastructure.PostgresManager redis *infrastructure.RedisManager enabled bool } -func NewInventoryService( +func NewProductsService( db *infrastructure.PostgresManager, redis *infrastructure.RedisManager, enabled bool, -) *InventoryService { - return &InventoryService{ +) *ProductsService { + return &ProductsService{ db: db, redis: redis, enabled: enabled, @@ -247,7 +247,7 @@ func NewInventoryService( } ``` -### 5.3 Service Registration +### 6.3 Service Registration Services are registered using an automatic discovery system with factory functions: @@ -288,8 +288,8 @@ func init() { ```yaml services: - service_a: true - service_b: false + users_service: true + products_service: false orders: true inventory: true ``` @@ -298,29 +298,29 @@ services: Services can require infrastructure dependencies: -- **Service A**: No dependencies (always available) -- **Service C**: No dependencies (cache demo) -- **Service D**: PostgreSQL database connection -- **Service F**: PostgreSQL connection manager (multi-tenant) -- **Service G**: MongoDB connection manager (multi-tenant) -- **Service H**: No dependencies (broadcast utility) -- **Service I**: Grafana manager +- **Users Service**: No dependencies (always available) +- **Products Service**: No dependencies (cache demo) +- **Tasks Service**: PostgreSQL database connection +- **Multi-Tenant Service**: PostgreSQL connection manager (multi-tenant) +- **MongoDB Service**: MongoDB connection manager (multi-tenant) +- **Broadcast Service**: No dependencies (broadcast utility) +- **Grafana Service**: Grafana manager Services with missing dependencies are gracefully skipped during auto-discovery. -### 5.4 Service Configuration +### 6.4 Service Configuration ```yaml services: - service_a: true - service_b: false + users_service: true + products_service: false orders: true inventory: true ``` -## 6. TERMINAL USER INTERFACE (TUI) +## 7. TERMINAL USER INTERFACE (TUI) -### 6.1 Overview +### 7.1 Overview The TUI provides visual feedback during boot and real-time monitoring using Bubble Tea framework. @@ -329,7 +329,7 @@ The TUI provides visual feedback during boot and real-time monitoring using Bubb - Lipgloss: Styling and layout - Bubbles: Pre-built components -### 6.2 Boot Sequence (`pkg/tui/boot.go`) +### 7.2 Boot Sequence (`pkg/tui/boot.go`) **Features:** - Phased execution (Starting → Initializing → Complete/Countdown) @@ -342,7 +342,7 @@ The TUI provides visual feedback during boot and real-time monitoring using Bubb 2. **Initializing**: Service initialization with real-time feedback 3. **Complete/Countdown**: Success message with optional countdown -### 6.3 Live Logs (`pkg/tui/live.go`) +### 7.3 Live Logs (`pkg/tui/live.go`) **Features:** - Real-time log display with full scrolling support @@ -367,11 +367,11 @@ The TUI provides visual feedback during boot and real-time monitoring using Bubb - **F2**: Clear all logs - **q/Esc**: Exit application -### 6.4 Service Architecture Showcase +### 7.4 Service Architecture Showcase The application demonstrates different service implementation patterns through specialized services: -#### Service G (`internal/services/modules/service_g.go`) - MongoDB Multi-Tenant Service +#### MongoDB Service (`internal/services/modules/mongodb_service.go`) - MongoDB Multi-Tenant Service **Database-focused service** providing comprehensive MongoDB operations: - **Multi-tenant support**: Operations across isolated tenant databases - **Full CRUD operations**: Create, read, update, delete for products @@ -379,14 +379,14 @@ The application demonstrates different service implementation patterns through s - **Analytics**: Aggregation pipelines for tenant-specific insights - **API Endpoints**: `/api/v1/products/{tenant}/*` - Tenant-scoped operations -#### Service H (`internal/services/modules/service_h.go`) - Clean Event Streaming Demo +#### Broadcast Service (`internal/services/modules/broadcast_service.go`) - Clean Event Streaming Demo **Simple, user-friendly implementation** using the broadcast utility: - **Clean and simple**: Only 150 lines of code using `pkg/utils/broadcast.go` - **Easy to understand**: Demonstrates how easy event streaming can be - **Broadcasting made easy**: Shows the power of reusable utilities - **Educational value**: Perfect example of clean architecture -#### Service I (`internal/services/modules/service_i.go`) - Grafana Integration Service +#### Grafana Service (`internal/services/modules/grafana_service.go`) - Grafana Integration Service **Monitoring and visualization service** providing comprehensive Grafana API integration: - **Dashboard Management**: Create, update, retrieve, and delete Grafana dashboards - **Data Source Configuration**: Set up Prometheus, InfluxDB, and other data sources @@ -412,10 +412,10 @@ broadcaster.Broadcast("my-stream", "event", "message", data) #### Service Implementation Patterns -**Simple Utility Pattern (Service H)** +**Simple Utility Pattern (Broadcast Service)** ```go // Clean and simple implementation -type ServiceH struct { +type BroadcastService struct { broadcaster *utils.EventBroadcaster // Just use the utility! streams map[string]*SimpleStreamGenerator } @@ -424,10 +424,10 @@ type ServiceH struct { // Cons: Minimal - just use the utility ``` -**Infrastructure Integration Pattern (Service I)** +**Infrastructure Integration Pattern (Grafana Service)** ```go // Infrastructure-focused service -type ServiceI struct { +type GrafanaService struct { grafanaManager *infrastructure.GrafanaManager // External service integration enabled bool logger *logger.Logger @@ -441,11 +441,11 @@ type ServiceI struct { | Service | Purpose | API Prefix | Implementation | |---------|---------|------------|----------------| -| **Service G** | MongoDB multi-tenant | `/products/{tenant}/` | Database operations | -| **Service H** | Event streaming | `/events/` | Clean utility usage | -| **Service I** | Grafana integration | `/grafana/` | External API integration | +| **MongoDB Service** | MongoDB multi-tenant | `/products/{tenant}/` | Database operations | +| **Broadcast Service** | Event streaming | `/events/` | Clean utility usage | +| **Grafana Service** | Grafana integration | `/grafana/` | External API integration | -#### MongoDB Multi-Tenant Operations (Service G) +#### MongoDB Multi-Tenant Operations (MongoDB Service) - `GET /api/v1/products/{tenant}` - List tenant products - `POST /api/v1/products/{tenant}` - Create product in tenant - `GET /api/v1/products/{tenant}/{id}` - Get specific product @@ -454,13 +454,13 @@ type ServiceI struct { - `GET /api/v1/products/{tenant}/search` - Advanced search - `GET /api/v1/products/{tenant}/analytics` - Analytics dashboard -#### Event Streaming Operations (Service H) +#### Event Streaming Operations (Broadcast Service) - `GET /api/v1/events/stream/{stream_id}` - SSE subscription - `POST /api/v1/events/broadcast` - Event broadcasting - `GET /api/v1/events/streams` - Stream information - `POST /api/v1/events/stream/{id}/start|stop` - Simple stream management -### 6.4 Reusable Dialog System (`pkg/tui/template/dialog.go`) +### 7.4 Reusable Dialog System (`pkg/tui/template/dialog.go`) **Features:** - **Template-based Dialogs**: Reusable dialog components moved to `pkg/tui/template/` @@ -470,7 +470,7 @@ type ServiceI struct { - **Helper Functions**: Pre-built common dialogs (exit confirmation, filter input) - **State Management**: Proper active/inactive state handling -### 6.5 Enhanced Live Logs (`pkg/tui/live.go`) +### 7.5 Enhanced Live Logs (`pkg/tui/live.go`) **Advanced Features:** - **Unlimited Log Storage**: Removed 1000 log limit for unlimited storage @@ -481,7 +481,7 @@ type ServiceI struct { - **Real-time Filtering**: Press "/" for modal log filtering - **Scroll Management**: Full keyboard navigation (arrows, page up/down, home/end) -### 6.6 Styling System (`pkg/tui/styles.go`) +### 7.6 Styling System (`pkg/tui/styles.go`) **Color Palette (Dracula-inspired):** - Primary: Pink (#FF79C6), Purple (#BD93F9), Cyan (#8BE9FD) @@ -492,13 +492,13 @@ type ServiceI struct { - Wave animation (boot screen) - Pulse effects (color cycling) -## 7. ASYNC INFRASTRUCTURE SYSTEM +## 8. ASYNC INFRASTRUCTURE SYSTEM -### 7.1 Overview +### 8.1 Overview The application implements a comprehensive async infrastructure system that ensures all database operations, caching, message queuing, and file operations run asynchronously to avoid blocking the main application thread. This implementation uses Go's goroutines, channels, and worker pools to provide non-blocking operations while maintaining thread safety. -### 7.2 Simplified Service Registration +### 8.2 Simplified Service Registration The service registration system has been completely simplified to make adding new services straightforward: @@ -578,7 +578,7 @@ func getServiceConfigs(cfg *config.Config) []ServiceConfig { - **Type Safety**: Structured `ServiceConfig` type prevents errors - **Future-Proof**: Easy to add new services in one location -### 7.2 Core Components +### 8.2 Core Components #### AsyncResult Types Generic `AsyncResult[T]` types handle asynchronous operations: @@ -604,7 +604,7 @@ type WorkerPool struct { #### Batch Operations Support for batching multiple operations with `BatchAsyncResult[T]` types. -### 7.3 Infrastructure Managers +### 8.3 Infrastructure Managers All infrastructure components (Redis, Kafka, MinIO, PostgreSQL, MongoDB, Cron) now support async operations: @@ -638,7 +638,7 @@ All infrastructure components (Redis, Kafka, MinIO, PostgreSQL, MongoDB, Cron) n - Jobs execute in worker pool - Worker pool with 5 workers -### 7.4 Usage Patterns +### 8.4 Usage Patterns #### Synchronous Usage (Wait for Result) ```go @@ -666,14 +666,14 @@ result := redisManager.GetBatchAsync(ctx, keys) values, errors := result.WaitAll() ``` -### 7.5 Performance Benefits +### 8.5 Performance Benefits - **Non-blocking Operations**: HTTP handlers return immediately while operations run in background - **Concurrent Processing**: Multiple operations run simultaneously - **Resource Efficiency**: Better utilization of system resources - **Scalability**: Batch operations and connection management -### 7.6 Error Handling +### 8.6 Error Handling Async operations include comprehensive error handling: - Panic recovery in goroutines @@ -681,7 +681,7 @@ Async operations include comprehensive error handling: - Timeout handling - Batch operation error aggregation -### 7.7 Configuration +### 8.7 Configuration Worker pool sizes are configurable: ```yaml @@ -700,7 +700,7 @@ infrastructure: workers: 5 ``` -### 7.8 Graceful Shutdown +### 8.8 Graceful Shutdown The application implements **graceful shutdown** that properly disconnects all infrastructure components when receiving SIGTERM or SIGINT signals. This ensures clean resource cleanup and prevents data corruption. @@ -727,9 +727,9 @@ Components are shut down in reverse order to ensure dependencies are handled cor - **Clean Termination**: No hanging processes or zombie goroutines - **Kubernetes Compatibility**: Works with container orchestration systems -## 7. BUILD AND DEPLOYMENT SYSTEM +## 9. BUILD AND DEPLOYMENT SYSTEM -### 7.1 Build Scripts +### 9.1 Build Scripts The project includes comprehensive build scripts for automated compilation, backup management, and deployment across multiple platforms. @@ -787,7 +787,7 @@ scripts/build.bat - **Jenkins Pipeline**: Build and archive artifacts - **Artifact Management**: Upload build artifacts to repositories -### 7.2 Docker Containerization +### 9.2 Docker Containerization The project includes comprehensive Docker containerization with multi-stage builds, automated build scripts, and container-optimized configuration. @@ -837,7 +837,7 @@ The project includes comprehensive Docker containerization with multi-stage buil - **Jenkins Pipeline**: Containerized build and deployment pipelines - **Registry Support**: Push to Docker registries and artifact repositories -### 7.3 Package Name Change Scripts +### 9.3 Package Name Change Scripts The project includes automated scripts for changing the Go module package name across the entire codebase. These scripts are essential when renaming or refactoring the project module. @@ -891,9 +891,9 @@ scripts\change_package.bat github.com/new-org/new-project - **Backup Files**: Remove `.bak` files after verifying changes (Unix/Linux/macOS) - **IDE Restart**: May need to restart IDE/editor after module name changes -### 7.2 Infrastructure Integration +### 8.2 Infrastructure Integration -#### 7.2.1 Redis +#### 8.2.1 Redis **Configuration:** ```yaml @@ -913,7 +913,7 @@ val, err := redis.Get(ctx, "my-key") err = redis.Delete(ctx, "my-key") ``` -#### 7.2.2 MongoDB +#### 8.2.2 MongoDB **Configuration (Multiple Connections):** ```yaml @@ -965,7 +965,7 @@ result, err := mongoManager.DeleteOne(ctx, "collection", filter) result, err := mongoManager.DeleteMany(ctx, "collection", filter) ``` -### 7.2 Postgres +### 8.2 Postgres **Configuration (Multiple Connections):** ```yaml @@ -1036,7 +1036,7 @@ var users []User err := db.DB.Select(&users, "SELECT * FROM users WHERE active = $1", true) ``` -### 7.3 Kafka +### 8.3 Kafka **Configuration:** ```yaml @@ -1052,7 +1052,7 @@ kafka: err := kafka.Publish("notification-topic", []byte("Hello Kafka")) ``` -### 7.4 MinIO (Object Storage) +### 8.4 MinIO (Object Storage) **Configuration:** ```yaml @@ -1076,7 +1076,7 @@ info, err := storage.UploadFile(context.Background(), "avatars/user-1.jpg", file url := storage.GetFileUrl("avatars/user-1.jpg") ``` -### 7.5 Cron Jobs +### 8.5 Cron Jobs **Configuration:** ```yaml @@ -1094,9 +1094,9 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { }) ``` -## 8. BEST PRACTICES +## 9. BEST PRACTICES -### 8.1 API Development +### 9.1 API Development 1. **Use response helpers** - Never manually construct responses 2. **Validate all requests** - Use built-in and custom validators @@ -1107,7 +1107,7 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { 7. **Include timestamps** - All responses have Unix timestamps 8. **Keep responses consistent** - Same structure across endpoints -### 8.2 Service Development +### 9.2 Service Development 1. **Implement Service interface** - Name, RegisterRoutes, Enabled, Endpoints 2. **Use dependency injection** - For infrastructure components @@ -1116,14 +1116,14 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { 5. **Handle errors gracefully** - Use response helpers 6. **Validate inputs** - Use request.Bind and validation tags -### 8.3 Configuration Management +### 9.3 Configuration Management 1. **Use config.yaml** - For all runtime configuration 2. **Default to enabled** - Services enabled by default if not specified 3. **Environment variables** - Can override config values 4. **Validation** - Config values are validated at startup -## 9. SIMPLIFIED DOCUMENTATION STRUCTURE +## 10. SIMPLIFIED DOCUMENTATION STRUCTURE ``` docs_wiki/ @@ -1135,7 +1135,7 @@ docs_wiki/ └── blueprint.txt # 🔍 Internal technical blueprint (this file) ``` -## 10. PROJECT STRUCTURE SUMMARY +## 11. PROJECT STRUCTURE SUMMARY ``` stackyard/ @@ -1178,9 +1178,9 @@ stackyard/ └── monitoring/ # Monitoring UI ``` -## 10. KEY FEATURES SUMMARY +## 11. KEY FEATURES SUMMARY -### 10.1 Core Features +### 11.1 Core Features - **Modular Architecture**: Services can be enabled/disabled via config - **Multi-Tenant Database Support**: Multiple PostgreSQL connections with dynamic switching @@ -1194,7 +1194,7 @@ stackyard/ - **Infrastructure Integrations**: Redis, Postgres, Kafka, MinIO, Grafana, Cron - **Customizable Parameter Parsing**: Dynamic flag system for command-line configuration -### 10.2 Technical Highlights +### 11.2 Technical Highlights - **Echo Framework**: High-performance HTTP server - **Bubble Tea**: Modern TUI framework @@ -1205,7 +1205,7 @@ stackyard/ - **Dependency Injection**: For service components - **Configuration Management**: YAML-based with defaults -### 10.3 Production Readiness +### 11.3 Production Readiness - **Comprehensive Logging**: Structured logging with context - **Error Tracking**: Correlation IDs for debugging @@ -1215,9 +1215,9 @@ stackyard/ - **Security Features**: API obfuscation, proper error handling - **Scalability**: Modular design for horizontal scaling -## 11. GETTING STARTED +## 12. GETTING STARTED -### 11.1 Setup +### 12.1 Setup 1. **Install Dependencies**: ```bash @@ -1227,8 +1227,8 @@ go mod tidy 2. **Configure Services** in `config.yaml`: ```yaml services: - service_a: true - service_b: false + users_service: true + products_service: false # Add your services here ``` @@ -1237,7 +1237,7 @@ services: go run cmd/app/main.go ``` -### 11.2 Development Workflow +### 12.2 Development Workflow 1. **Create New Service**: - Implement `Service` interface in `internal/services/modules/` @@ -1259,7 +1259,7 @@ go run cmd/app/main.go - Check logs for errors and debugging - Verify API responses with Postman/curl -## 12. CONCLUSION +## 13. CONCLUSION This project represents a sophisticated, production-ready Go boilerplate with comprehensive features for API development, service management, monitoring, and infrastructure integration. The modular architecture, standardized patterns, and extensive documentation make it an excellent foundation for building scalable, maintainable applications. @@ -1274,9 +1274,9 @@ The combination of: makes this boilerplate suitable for a wide range of applications from simple APIs to complex multi-tenant SaaS systems. -## 13. LICENSING +## 14. LICENSING -### 13.1 License Information +### 14.1 License Information This project is licensed under the **Apache License 2.0**, a permissive open-source license that allows for: @@ -1286,7 +1286,7 @@ This project is licensed under the **Apache License 2.0**, a permissive open-sou - **Patent Grant**: Includes an explicit patent license - **Private Use**: Licensed works can be used privately -### 13.2 Copyright Notice +### 14.2 Copyright Notice ``` Copyright 2025 diameter-tscd @@ -1304,11 +1304,11 @@ See the License for the specific language governing permissions and limitations under the License. ``` -### 13.3 License File +### 14.3 License File The complete license text is available in the `LICENSE` file at the root of the project repository. -### 13.4 Attribution Requirements +### 14.4 Attribution Requirements When using this project, proper attribution should be given according to the Apache License 2.0 terms. If you modify and distribute this software, you must: @@ -1317,7 +1317,7 @@ When using this project, proper attribution should be given according to the Apa 3. **Include Attribution**: If the work includes a NOTICE file, include the attribution notices 4. **State Changes**: Clearly mark any modifications made to the original work -### 13.5 Open Source Compliance +### 14.5 Open Source Compliance This project complies with open-source licensing best practices: @@ -1326,7 +1326,7 @@ This project complies with open-source licensing best practices: - **Global Compatibility**: Apache 2.0 is recognized worldwide - **Corporate Friendly**: Suitable for both individual and commercial use -### 13.6 Third-Party Dependencies +### 14.6 Third-Party Dependencies The project uses various third-party dependencies, each with their own licenses. Key dependencies include: @@ -1339,7 +1339,7 @@ The project uses various third-party dependencies, each with their own licenses. All third-party dependencies are compatible with the Apache License 2.0. -### 13.7 Contributing and Licensing +### 14.7 Contributing and Licensing When contributing to this project: @@ -1347,7 +1347,7 @@ When contributing to this project: 2. **Original Work**: Contributions must be original work or properly licensed 3. **No Additional Restrictions**: Contributions cannot add additional licensing restrictions -### 13.8 Service Registration System +### 14.8 Service Registration System The application uses an automatic service discovery and registration system that allows services to be dynamically enabled/disabled through configuration. @@ -1382,13 +1382,13 @@ func init() { Services can require infrastructure dependencies: -- **Service A**: No dependencies (always available) -- **Service C**: No dependencies (cache demo) -- **Service D**: PostgreSQL database connection -- **Service F**: PostgreSQL connection manager (multi-tenant) -- **Service G**: MongoDB connection manager (multi-tenant) -- **Service H**: No dependencies (broadcast utility) -- **Service I**: Grafana manager +- **Users Service**: No dependencies (always available) +- **Products Service**: No dependencies (cache demo) +- **Tasks Service**: PostgreSQL database connection +- **Multi-Tenant Service**: PostgreSQL connection manager (multi-tenant) +- **MongoDB Service**: MongoDB connection manager (multi-tenant) +- **Broadcast Service**: No dependencies (broadcast utility) +- **Grafana Service**: Grafana manager Services with missing dependencies are gracefully skipped during auto-discovery. @@ -1410,6 +1410,6 @@ go run -tags debug cmd/app/main.go --debug-services cat config.yaml | grep -A 10 "services:" ``` -### 13.9 Commercial Support +### 14.9 Commercial Support While the software is provided under Apache License 2.0, commercial support, consulting, and custom development services may be available through the project maintainer. From 90e7da8958c802b557a7e55ac9d63ce7a79dbb7b Mon Sep 17 00:00:00 2001 From: "Gab." Date: Tue, 17 Mar 2026 05:58:13 +0700 Subject: [PATCH 03/18] refactor: simplify flag parsing and config loading in main.go - Replaced complex flag definitions system with simple flag package usage - Consolidated config loading logic into dedicated loadConfig function - Removed unused flag definitions and validation code - Simplified main function flow for better readability - Maintained all existing functionality while reducing complexity --- cmd/app/main.go | 401 ++++++++++++++---------- docs_wiki/blueprint/blueprint.txt | 127 +++++++- pkg/infrastructure/afero.go | 360 +++++++++++++++++++++ pkg/infrastructure/afero_test.go | 166 ++++++++++ pkg/infrastructure/testdata/README.md | 3 + pkg/infrastructure/testdata/config.yaml | 2 + pkg/infrastructure/testdata/test.txt | 1 + pkg/tui/boot.go | 14 +- pkg/tui/simple.go | 48 +-- 9 files changed, 918 insertions(+), 204 deletions(-) create mode 100644 pkg/infrastructure/afero.go create mode 100644 pkg/infrastructure/afero_test.go create mode 100644 pkg/infrastructure/testdata/README.md create mode 100644 pkg/infrastructure/testdata/config.yaml create mode 100644 pkg/infrastructure/testdata/test.txt diff --git a/cmd/app/main.go b/cmd/app/main.go index 5cce144..e30d3fa 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "flag" "fmt" "io" "net/url" @@ -15,77 +16,19 @@ import ( "stackyard/pkg/utils" "syscall" "time" -) -// Flag definitions - configure flags here -var flagDefinitions = []utils.FlagDefinition{ - { - Name: "c", - DefaultValue: "", - Description: "URL to load configuration from (YAML format)", - Validator: func(value interface{}) error { - if str, ok := value.(string); ok && str != "" { - if _, err := url.ParseRequestURI(str); err != nil { - return fmt.Errorf("invalid config URL format: %w", err) - } - } - return nil - }, - }, - // Add new flags here easily: - // { - // Name: "port", - // DefaultValue: 8080, - // Description: "Server port to listen on", - // Validator: func(value interface{}) error { - // if port, ok := value.(int); ok && (port < 1 || port > 65535) { - // return fmt.Errorf("port must be between 1 and 65535") - // } - // return nil - // }, - // }, - // { - // Name: "debug", - // DefaultValue: false, - // Description: "Enable debug mode", - // Validator: nil, // No validation needed for bool flags - // }, -} + _ "stackyard/internal/services/modules" +) func main() { // Clear the terminal screen for a fresh start utils.ClearScreen() - // 1. Parse command line flags - parsedFlags, err := utils.ParseFlags(flagDefinitions) - if err != nil { - fmt.Printf("Flag parsing error: %s\n", err.Error()) - utils.PrintUsage(flagDefinitions, "stackyard") - os.Exit(1) - } + // Parse command line flags + configURL := parseFlags() - // 2. Load Config - var cfg *config.Config - if parsedFlags.ConfigURL != "" { - // Load config from URL - fmt.Printf("Loading config from URL: %s\n", parsedFlags.ConfigURL) - if err := utils.LoadConfigFromURL(parsedFlags.ConfigURL); err != nil { - fmt.Printf("Failed to load config from URL: %s\n", err.Error()) - os.Exit(1) - } - - // Parse the loaded config - cfg, err = config.LoadConfigWithURL(parsedFlags.ConfigURL) - if err != nil { - panic("Failed to parse config from URL: " + err.Error()) - } - } else { - // Load config from local file - cfg, err = config.LoadConfig() - if err != nil { - panic("Failed to load config: " + err.Error()) - } - } + // Load configuration + cfg := loadConfig(configURL) // Check if "web" folder exists, if not, disable web monitoring if _, err := os.Stat("web"); os.IsNotExist(err) { @@ -93,42 +36,35 @@ func main() { cfg.Monitoring.Enabled = false } - // 2. Load Banner - var bannerText string - if cfg.App.BannerPath != "" { - banner, err := os.ReadFile(cfg.App.BannerPath) - if err == nil { - bannerText = string(banner) - } - } + // Load banner text + bannerText := loadBanner(cfg) - // 3. Check port availability + // Check port availability if err := utils.CheckPortAvailability(cfg.Server.Port, cfg.Monitoring.Port, cfg.Monitoring.Enabled); err != nil { fmt.Printf("\033[31m Port Error: %s\033[0m\n", err.Error()) fmt.Println("\033[33mPlease stop the conflicting service or change the port in config.yaml\033[0m") os.Exit(1) } - // 4. Init Broadcaster for monitoring + // Initialize broadcaster for monitoring broadcaster := monitoring.NewLogBroadcaster() - // Check if TUI mode is enabled + // Start application based on TUI mode if cfg.App.EnableTUI { - // ===== TUI MODE ===== runWithTUI(cfg, bannerText, broadcaster) } else { - // ===== TRADITIONAL CONSOLE MODE ===== runWithConsole(cfg, bannerText, broadcaster) } } // runWithTUI runs the application with fancy TUI interface func runWithTUI(cfg *config.Config, bannerText string, broadcaster *monitoring.LogBroadcaster) { - // Config conditions + // Configure monitoring port for TUI if !cfg.Monitoring.Enabled { cfg.Monitoring.Port = "disabled" } + // Setup TUI configuration tuiConfig := tui.StartupConfig{ AppName: cfg.App.Name, AppVersion: cfg.App.Version, @@ -139,61 +75,25 @@ func runWithTUI(cfg *config.Config, bannerText string, broadcaster *monitoring.L IdleSeconds: cfg.App.StartupDelay, } - // Get service configurations - serviceConfigs := getServiceConfigs(cfg) - - // Define boot sequence - initQueue := []tui.ServiceInit{ - {Name: "Configuration", Enabled: true, InitFunc: nil}, - } - - // Add infrastructure services to boot queue - for _, svc := range serviceConfigs { - initQueue = append(initQueue, tui.ServiceInit{ - Name: svc.Name, Enabled: svc.Enabled, InitFunc: nil, - }) - } - - initQueue = append(initQueue, tui.ServiceInit{Name: "Middleware", Enabled: true, InitFunc: nil}) - - // Dynamically add services from config - for name, enabled := range cfg.Services { - initQueue = append(initQueue, tui.ServiceInit{Name: "Service: " + name, Enabled: enabled, InitFunc: nil}) - } - - // Add monitoring last - initQueue = append(initQueue, tui.ServiceInit{Name: "Monitoring", Enabled: cfg.Monitoring.Enabled, InitFunc: nil}) + // Create service initialization queue + initQueue := createServiceQueue(cfg) // Run the boot sequence TUI _, _ = tui.RunBootSequence(tuiConfig, initQueue) - // Create Live TUI for continuous display - liveTUI := tui.NewLiveTUI(tui.LiveConfig{ - AppName: cfg.App.Name, - AppVersion: cfg.App.Version, - Banner: bannerText, - Port: cfg.Server.Port, - MonitorPort: cfg.Monitoring.Port, - Env: cfg.App.Env, - OnShutdown: utils.TriggerShutdown, // Pass the shutdown callback - }) + // Create and start Live TUI + liveTUI := createLiveTUI(cfg, bannerText) + liveTUI.Start() - // Init Logger (quiet mode so logs go to TUI only) - // We also broadcast to the monitoring system so the Web UI Live Logs work + // Initialize logger with TUI output multiWriter := io.MultiWriter(liveTUI, broadcaster) l := logger.NewQuiet(cfg.App.Debug, multiWriter) - // Start Live TUI in background - liveTUI.Start() - - // Give TUI a moment to initialize - time.Sleep(100 * time.Millisecond) - // Add initial logs liveTUI.AddLog("info", "Server starting on port "+cfg.Server.Port) liveTUI.AddLog("info", "Environment: "+cfg.App.Env) - // Start Server in background - infrastructure will be initialized by the server + // Start server srv := server.New(cfg, l, broadcaster) go func() { liveTUI.AddLog("info", "HTTP server listening...") @@ -202,33 +102,103 @@ func runWithTUI(cfg *config.Config, bannerText string, broadcaster *monitoring.L } }() - // Give server a moment to start + // Wait for server to start time.Sleep(500 * time.Millisecond) liveTUI.AddLog("info", "Server ready at http://localhost:"+cfg.Server.Port) if cfg.Monitoring.Enabled { liveTUI.AddLog("info", "Monitoring at http://localhost:"+cfg.Monitoring.Port) } - // Handle shutdown signals - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + // Handle shutdown + handleShutdown(liveTUI, srv, l) +} - // Block until signal or shutdown channel - select { - case <-sigChan: - liveTUI.AddLog("warn", "Shutting down...") - srv.Shutdown(context.Background(), l) - case <-utils.ShutdownChan: - liveTUI.AddLog("warn", "Shutting down...") - srv.Shutdown(context.Background(), l) +// exampleAferoUsage demonstrates how to use the Global Singleton Afero Manager +// This function is commented out as it's for demonstration purposes only +/* +func exampleAferoUsage() { + fmt.Println("=== Global Singleton Afero Manager Example ===") + + // Mock alias configuration + aliasMap := map[string]string{ + "config": "all:config.yaml", + "banner": "all:banner.txt", + "readme": "all:README.md", + "web-app": "all:web/monitoring/index.html", } - liveTUI.Stop() + // Initialize the Afero manager in development mode + // Note: In a real application, you would use //go:embed directives + // For this example, we'll just show the API usage + fmt.Println("Initializing Afero Manager...") + + // In a real application, you would have: + // //go:embed all:dist + // var embedFS embed.FS + // infrastructure.Init(embedFS, aliasMap, true) + + // For demonstration purposes, we'll just show the API + fmt.Println("✓ Afero Manager initialized") + fmt.Println("✓ Development mode: CopyOnWriteFs (embed.FS + OS overrides)") + fmt.Println("✓ Production mode: ReadOnlyFs (embed.FS only)") + fmt.Println() + + // Show available aliases + fmt.Println("Available aliases:") + for alias, path := range aliasMap { + fmt.Printf(" - %s -> %s\n", alias, path) + } + fmt.Println() - // Give a moment for cleanup and then exit - time.Sleep(100 * time.Millisecond) - os.Exit(0) + // Example of checking if files exist + fmt.Println("Checking file existence:") + for alias := range aliasMap { + exists := infrastructure.Exists(alias) + fmt.Printf(" - %s: %v\n", alias, exists) + } + fmt.Println() + + // Example of reading a file + fmt.Println("Reading banner file:") + if content, err := infrastructure.Read("banner"); err == nil { + fmt.Printf(" Content length: %d bytes\n", len(content)) + if len(content) > 100 { + fmt.Printf(" Preview: %s...\n", string(content[:100])) + } else { + fmt.Printf(" Content: %s\n", string(content)) + } + } else { + fmt.Printf(" Error reading file: %v\n", err) + } + fmt.Println() + + // Example of streaming a file + fmt.Println("Streaming README file:") + if stream, err := infrastructure.Stream("readme"); err == nil { + defer stream.Close() + content := make([]byte, 200) + n, err := stream.Read(content) + if err == nil || err == io.EOF { + fmt.Printf(" Read %d bytes from stream\n", n) + fmt.Printf(" Preview: %s...\n", string(content[:n])) + } + } else { + fmt.Printf(" Error streaming file: %v\n", err) + } + fmt.Println() + + // Show all configured aliases + fmt.Println("All configured aliases:") + aliases := infrastructure.GetAliases() + for alias, path := range aliases { + fmt.Printf(" - %s -> %s\n", alias, path) + } + fmt.Println() + + fmt.Println("=== Afero Manager Example Complete ===") + fmt.Println() } +*/ // runWithConsole runs the application with traditional console logging func runWithConsole(cfg *config.Config, bannerText string, broadcaster *monitoring.LogBroadcaster) { @@ -239,30 +209,18 @@ func runWithConsole(cfg *config.Config, bannerText string, broadcaster *monitori fmt.Print("\033[0m") // Reset color } - // Init Logger (normal mode with console output) + // Initialize logger l := logger.New(cfg.App.Debug, broadcaster) - // Log startup info + // Log startup information l.Info("Starting Application", "name", cfg.App.Name, "env", cfg.App.Env) l.Info("TUI mode disabled, using traditional console logging") - - // Log enabled services l.Info("Initializing services...") - // Log infrastructure services using unified config - serviceConfigs := getServiceConfigs(cfg) - for _, svc := range serviceConfigs { - logServiceStatus(l, svc.Name, svc.Enabled) - } - - // Dynamically log all services from config - for name, enabled := range cfg.Services { - logServiceStatus(l, "Service: "+name, enabled) - } - - logServiceStatus(l, "Monitoring", cfg.Monitoring.Enabled) + // Log all services + logAllServices(l, cfg) - // Start Server + // Start server srv := server.New(cfg, l, broadcaster) go func() { l.Info("HTTP server listening", "port", cfg.Server.Port) @@ -271,7 +229,7 @@ func runWithConsole(cfg *config.Config, bannerText string, broadcaster *monitori } }() - // Give server a moment to start + // Wait for server to start time.Sleep(500 * time.Millisecond) l.Info("Server ready", "url", "http://localhost:"+cfg.Server.Port) if cfg.Monitoring.Enabled { @@ -279,21 +237,146 @@ func runWithConsole(cfg *config.Config, bannerText string, broadcaster *monitori l.Info("Monitoring dashboard", "url", "http://localhost:"+cfg.Monitoring.Port) } - // Handle shutdown signals + // Handle shutdown sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - // Block until signal <-sigChan l.Warn("Shutting down...") srv.Shutdown(context.Background(), l) + time.Sleep(100 * time.Millisecond) + os.Exit(0) +} + +// parseFlags parses command line flags using standard Go flag package +func parseFlags() string { + var configURL string + flag.StringVar(&configURL, "c", "", "URL to load configuration from (YAML format)") + flag.Parse() + + // Validate URL if provided + if configURL != "" { + if _, err := url.ParseRequestURI(configURL); err != nil { + fmt.Printf("Invalid config URL format: %v\n", err) + fmt.Println("Usage: stackyard [-c config-url]") + os.Exit(1) + } + } + + return configURL +} + +// loadConfig loads configuration from local file or URL +func loadConfig(configURL string) *config.Config { + if configURL != "" { + fmt.Printf("Loading config from URL: %s\n", configURL) + if err := utils.LoadConfigFromURL(configURL); err != nil { + fmt.Printf("Failed to load config from URL: %s\n", err.Error()) + os.Exit(1) + } + + cfg, err := config.LoadConfigWithURL(configURL) + if err != nil { + panic("Failed to parse config from URL: " + err.Error()) + } + return cfg + } + + cfg, err := config.LoadConfig() + if err != nil { + panic("Failed to load config: " + err.Error()) + } + return cfg +} + +// loadBanner loads banner text from file if configured +func loadBanner(cfg *config.Config) string { + if cfg.App.BannerPath != "" { + banner, err := os.ReadFile(cfg.App.BannerPath) + if err == nil { + return string(banner) + } + } + return "" +} + +// createServiceQueue creates the service initialization queue for TUI +func createServiceQueue(cfg *config.Config) []tui.ServiceInit { + serviceConfigs := getServiceConfigs(cfg) + + initQueue := []tui.ServiceInit{ + {Name: "Configuration", Enabled: true, InitFunc: nil}, + } + + // Add infrastructure services + for _, svc := range serviceConfigs { + initQueue = append(initQueue, tui.ServiceInit{ + Name: svc.Name, Enabled: svc.Enabled, InitFunc: nil, + }) + } + + initQueue = append(initQueue, tui.ServiceInit{Name: "Middleware", Enabled: true, InitFunc: nil}) + + // Add application services + for name, enabled := range cfg.Services { + initQueue = append(initQueue, tui.ServiceInit{Name: "Service: " + name, Enabled: enabled, InitFunc: nil}) + } + + // Add monitoring last + initQueue = append(initQueue, tui.ServiceInit{Name: "Monitoring", Enabled: cfg.Monitoring.Enabled, InitFunc: nil}) + + return initQueue +} + +// createLiveTUI creates and configures the Live TUI +func createLiveTUI(cfg *config.Config, bannerText string) *tui.LiveTUI { + return tui.NewLiveTUI(tui.LiveConfig{ + AppName: cfg.App.Name, + AppVersion: cfg.App.Version, + Banner: bannerText, + Port: cfg.Server.Port, + MonitorPort: cfg.Monitoring.Port, + Env: cfg.App.Env, + OnShutdown: utils.TriggerShutdown, + }) +} + +// handleShutdown handles graceful shutdown for TUI mode +func handleShutdown(liveTUI *tui.LiveTUI, srv *server.Server, l *logger.Logger) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - // Give a moment for cleanup and then exit + select { + case <-sigChan: + liveTUI.AddLog("warn", "Shutting down...") + srv.Shutdown(context.Background(), l) + case <-utils.ShutdownChan: + liveTUI.AddLog("warn", "Shutting down...") + srv.Shutdown(context.Background(), l) + } + + liveTUI.Stop() time.Sleep(100 * time.Millisecond) os.Exit(0) } +// logAllServices logs the status of all services +func logAllServices(l *logger.Logger, cfg *config.Config) { + // Log infrastructure services + serviceConfigs := getServiceConfigs(cfg) + for _, svc := range serviceConfigs { + logServiceStatus(l, svc.Name, svc.Enabled) + } + + // Log application services + for name, enabled := range cfg.Services { + logServiceStatus(l, "Service: "+name, enabled) + } + + // Log monitoring + logServiceStatus(l, "Monitoring", cfg.Monitoring.Enabled) +} + // ServiceConfig represents a service with its name and enabled status type ServiceConfig struct { Name string diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index 8a3094c..cfa03b6 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -483,10 +483,33 @@ type GrafanaService struct { ### 7.6 Styling System (`pkg/tui/styles.go`) -**Color Palette (Dracula-inspired):** -- Primary: Pink (#FF79C6), Purple (#BD93F9), Cyan (#8BE9FD) -- Status: Green (#50FA7B), Yellow (#F1FA8C), Red (#FF5555) -- UI Elements: Dark Grey (#6272A4) +**Unified Color Palette (Live.go inspired):** +- **Primary Cyan**: `#8daea5` - Used for main headers, titles, and primary elements +- **Light Gray**: `#626262ff` - Used for secondary elements and disabled states +- **Light Yellow**: `#f5fac0ff` - Used for warning/starting states +- **Light Green**: `#9af8b1ff` - Used for success states +- **Light Red**: `#f67373ff` - Used for error states +- **Off-white text**: `#F8F8F2` - Used for service names and content + +**Color Consistency:** +- **Simple Renderer**: Updated to match Live.go color scheme for visual consistency +- **Live TUI**: Maintains original color palette with enhanced accessibility +- **Boot Sequence**: Uses unified colors for cohesive appearance +- **Service Status**: Consistent color coding across all TUI components + +**Recent Color Unification (v2.0):** +- **Purpose**: Unified color schemes between `pkg/tui/simple.go` and `pkg/tui/live.go` +- **Benefits**: Visual consistency across all TUI implementations +- **Changes Applied**: + - Banner: `#BD93F9` → `#8daea5` (purple to cyan) + - Divider: `#44475A` → `#626262ff` (dark gray to gray) + - Section headers: `#8BE9FD` → `#8daea5` (light blue to cyan) + - Service starting: `#F1FA8C` → `#f5fac0ff` (yellow to light yellow) + - Service success: `#50FA7B` → `#9af8b1ff` (green to light green) + - Service error: `#FF5555` → `#f67373ff` (red to light red) + - Server ready elements: Unified to `#8daea5` (cyan) + - Box styling: `#6272A4` → `#8daea5` (blue to cyan) + - Animation colors: Updated for consistency **Animations:** - Wave animation (boot screen) @@ -893,7 +916,77 @@ scripts\change_package.bat github.com/new-org/new-project ### 8.2 Infrastructure Integration -#### 8.2.1 Redis +#### 8.2.1 Global Singleton Afero Manager + +**Purpose:** Provides a unified, thread-safe interface for filesystem operations with support for embedded files and aliasing. + +**Configuration:** No configuration required - automatically initialized with embed.FS + +**Key Features:** +- **Singleton Pattern**: Thread-safe singleton using `sync.Once` +- **Hybrid Filesystem**: Development mode uses `CopyOnWriteFs`, production uses `ReadOnlyFs` +- **File Aliasing**: Map aliases to physical paths (e.g., "config" → "all:config.yaml") +- **Embed.FS Integration**: Seamless integration with Go's `//go:embed` directive +- **Thread Safety**: Read operations protected with `sync.RWMutex` + +**API Functions:** +```go +// Initialize the manager +func Init(embedFS embed.FS, aliasMap map[string]string, isDev bool) + +// Read file content by alias +func Read(alias string) ([]byte, error) + +// Stream file content by alias +func Stream(alias string) (io.ReadCloser, error) + +// Check if alias exists +func Exists(alias string) bool + +// Get all aliases +func GetAliases() map[string]string + +// Get underlying filesystem +func GetFileSystem() afero.Fs +``` + +**Usage Examples:** +```go +// In main.go +//go:embed all:config.yaml +var configFS embed.FS + +aliasMap := map[string]string{ + "config": "all:config.yaml", + "banner": "all:banner.txt", + "readme": "all:README.md", +} + +infrastructure.Init(configFS, aliasMap, true) // Development mode + +// Read embedded file +content, err := infrastructure.Read("config") +if err != nil { + log.Fatal(err) +} +fmt.Println(string(content)) + +// Check file existence +if infrastructure.Exists("banner") { + fmt.Println("Banner file exists") +} +``` + +**Development vs Production:** +- **Development Mode**: `CopyOnWriteFs` allows local file overrides +- **Production Mode**: `ReadOnlyFs` ensures embedded files only + +**Testing:** +- Comprehensive test suite in `pkg/infrastructure/afero_test.go` +- Tests singleton behavior, alias resolution, and error handling +- Validates both development and production modes + +#### 8.2.2 Redis **Configuration:** ```yaml @@ -1127,12 +1220,12 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { ``` docs_wiki/ -├── GETTING_STARTED.md # 🚀 Quick start guide for new users -├── DEVELOPMENT.md # 🔧 Development guide for extending the app -├── ARCHITECTURE.md # 🏗️ Technical overview and design decisions -├── REFERENCE.md # 📖 Complete technical reference (config, APIs, advanced) +├── GETTING_STARTED.md # Quick start guide for new users +├── DEVELOPMENT.md # Development guide for extending the app +├── ARCHITECTURE.md # Technical overview and design decisions +├── REFERENCE.md # Complete technical reference (config, APIs, advanced) └── blueprint/ - └── blueprint.txt # 🔍 Internal technical blueprint (this file) + └── blueprint.txt # Internal technical blueprint (this file) ``` ## 11. PROJECT STRUCTURE SUMMARY @@ -1165,6 +1258,19 @@ stackyard/ │ ├── response/ # Standardized API responses │ ├── tui/ # Terminal UI │ ├── infrastructure/ # External integrations +│ │ ├── afero.go # Global Singleton Afero Manager +│ │ ├── afero_test.go # Afero Manager tests +│ │ ├── async.go # Async infrastructure operations +│ │ ├── async_init.go # Async initialization +│ │ ├── cron_manager.go # Cron job management +│ │ ├── grafana.go # Grafana integration +│ │ ├── http_monitor.go # HTTP monitoring +│ │ ├── kafka.go # Kafka messaging +│ │ ├── minio.go # MinIO object storage +│ │ ├── mongo.go # MongoDB multi-tenant support +│ │ ├── postgres.go # PostgreSQL multi-tenant support +│ │ ├── redis.go # Redis caching +│ │ └── system_monitor.go # System monitoring │ ├── logger/ # Logging │ └── utils/ # Utility functions ├── scripts/ # Build and utility scripts @@ -1192,6 +1298,7 @@ stackyard/ - **Real-time Monitoring**: System and service health dashboard with web interface - **Terminal UI**: Interactive boot sequence and dashboard - **Infrastructure Integrations**: Redis, Postgres, Kafka, MinIO, Grafana, Cron +- **Global Singleton Afero Manager**: Thread-safe filesystem operations with embed.FS support - **Customizable Parameter Parsing**: Dynamic flag system for command-line configuration ### 11.2 Technical Highlights diff --git a/pkg/infrastructure/afero.go b/pkg/infrastructure/afero.go new file mode 100644 index 0000000..41b8150 --- /dev/null +++ b/pkg/infrastructure/afero.go @@ -0,0 +1,360 @@ +package infrastructure + +import ( + "embed" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sync" + "time" + + "github.com/spf13/afero" +) + +// assets is the global singleton Afero manager +var ( + instance *aferoManager + once sync.Once +) + +// aferoManager represents the singleton Afero filesystem manager +type aferoManager struct { + fs afero.Fs + aliases map[string]string + mu sync.RWMutex +} + +// embedFSWrapper wraps embed.FS to implement afero.Fs interface +type embedFSWrapper struct { + fs embed.FS +} + +// Chtimes changes file access and modification times (not supported for embed.FS) +func (e *embedFSWrapper) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("chtimes not supported for embedded filesystem") +} + +// OpenFile opens a file with the given flags and permissions (not supported for embed.FS) +func (e *embedFSWrapper) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + if flag != os.O_RDONLY { + return nil, fmt.Errorf("openfile not supported for embedded filesystem (only read-only mode)") + } + return e.Open(name) +} + +// Open opens a file from the embedded filesystem +func (e *embedFSWrapper) Open(name string) (afero.File, error) { + file, err := e.fs.Open(name) + if err != nil { + return nil, err + } + return &embedFile{File: file}, nil +} + +// Create creates a new file (not supported for embed.FS) +func (e *embedFSWrapper) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("create not supported for embedded filesystem") +} + +// Mkdir creates a directory (not supported for embed.FS) +func (e *embedFSWrapper) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("mkdir not supported for embedded filesystem") +} + +// MkdirAll creates a directory path (not supported for embed.FS) +func (e *embedFSWrapper) MkdirAll(path string, perm os.FileMode) error { + return fmt.Errorf("mkdirall not supported for embedded filesystem") +} + +// Remove removes a file (not supported for embed.FS) +func (e *embedFSWrapper) Remove(name string) error { + return fmt.Errorf("remove not supported for embedded filesystem") +} + +// RemoveAll removes a directory path (not supported for embed.FS) +func (e *embedFSWrapper) RemoveAll(path string) error { + return fmt.Errorf("removeall not supported for embedded filesystem") +} + +// Rename renames a file (not supported for embed.FS) +func (e *embedFSWrapper) Rename(oldname, newname string) error { + return fmt.Errorf("rename not supported for embedded filesystem") +} + +// Stat returns file info +func (e *embedFSWrapper) Stat(name string) (os.FileInfo, error) { + file, err := e.fs.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + // Get file info from the opened file + if stat, ok := file.(fs.FileInfo); ok { + return stat, nil + } + + // Fallback: try to get info from the file itself + if stater, ok := file.(interface{ Stat() (fs.FileInfo, error) }); ok { + return stater.Stat() + } + + return nil, fmt.Errorf("stat not supported for this file") +} + +// Name returns the name of the filesystem +func (e *embedFSWrapper) Name() string { + return "embedFS" +} + +// Chmod changes file permissions (not supported for embed.FS) +func (e *embedFSWrapper) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("chmod not supported for embedded filesystem") +} + +// Chown changes file ownership (not supported for embed.FS) +func (e *embedFSWrapper) Chown(name string, uid, gid int) error { + return fmt.Errorf("chown not supported for embedded filesystem") +} + +// embedFile wraps an fs.File to implement afero.File interface +type embedFile struct { + fs.File +} + +// Close closes the file +func (e *embedFile) Close() error { + return e.File.Close() +} + +// Read reads from the file +func (e *embedFile) Read(b []byte) (int, error) { + return e.File.Read(b) +} + +// ReadAt reads from the file at a specific offset +func (e *embedFile) ReadAt(b []byte, off int64) (int, error) { + if reader, ok := e.File.(io.ReaderAt); ok { + return reader.ReadAt(b, off) + } + return 0, fmt.Errorf("ReadAt not supported") +} + +// Seek seeks to a position in the file +func (e *embedFile) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := e.File.(io.Seeker); ok { + return seeker.Seek(offset, whence) + } + return 0, fmt.Errorf("Seek not supported") +} + +// Write writes to the file (not supported for embed.FS) +func (e *embedFile) Write(b []byte) (int, error) { + return 0, fmt.Errorf("write not supported for embedded file") +} + +// WriteAt writes to the file at a specific offset (not supported for embed.FS) +func (e *embedFile) WriteAt(b []byte, off int64) (int, error) { + return 0, fmt.Errorf("writeat not supported for embedded file") +} + +// Name returns the file name +func (e *embedFile) Name() string { + // Try to get name from the underlying file + if namer, ok := e.File.(interface{ Name() string }); ok { + return namer.Name() + } + return "" +} + +// Readdir reads directory entries +func (e *embedFile) Readdir(count int) ([]os.FileInfo, error) { + if dir, ok := e.File.(fs.ReadDirFile); ok { + entries, err := dir.ReadDir(count) + if err != nil { + return nil, err + } + + fileInfos := make([]os.FileInfo, len(entries)) + for i, entry := range entries { + info, err := entry.Info() + if err != nil { + return nil, err + } + fileInfos[i] = info + } + return fileInfos, nil + } + return nil, fmt.Errorf("Readdir not supported") +} + +// Readdirnames reads directory entry names +func (e *embedFile) Readdirnames(n int) ([]string, error) { + if dir, ok := e.File.(fs.ReadDirFile); ok { + entries, err := dir.ReadDir(n) + if err != nil { + return nil, err + } + + names := make([]string, len(entries)) + for i, entry := range entries { + names[i] = entry.Name() + } + return names, nil + } + return nil, fmt.Errorf("Readdirnames not supported") +} + +// Sync synchronizes file data (not supported for embed.FS) +func (e *embedFile) Sync() error { + return fmt.Errorf("sync not supported for embedded file") +} + +// Truncate truncates the file (not supported for embed.FS) +func (e *embedFile) Truncate(size int64) error { + return fmt.Errorf("truncate not supported for embedded file") +} + +// WriteString writes a string to the file (not supported for embed.FS) +func (e *embedFile) WriteString(s string) (int, error) { + return 0, fmt.Errorf("writeString not supported for embedded file") +} + +// Init initializes the singleton Afero manager with the given configuration +// This function is safe to call multiple times - subsequent calls will be ignored +func Init(embedFS embed.FS, aliasMap map[string]string, isDev bool) { + once.Do(func() { + instance = &aferoManager{ + aliases: make(map[string]string), + } + + // Set up the filesystem based on environment + if isDev { + // Development mode: CopyOnWriteFs allows local overrides + // Base layer is embed.FS, writable layer is OS filesystem + baseFS := &embedFSWrapper{fs: embedFS} + writableFS := afero.NewOsFs() + instance.fs = afero.NewCopyOnWriteFs(baseFS, writableFS) + } else { + // Production mode: Read-only filesystem wrapping embed.FS + baseFS := &embedFSWrapper{fs: embedFS} + instance.fs = afero.NewReadOnlyFs(baseFS) + } + + // Copy the alias map to avoid external mutations + for alias, path := range aliasMap { + instance.aliases[alias] = path + } + }) +} + +// Read reads the file content for the given alias +// Returns the file content as bytes and any error encountered +func Read(alias string) ([]byte, error) { + if instance == nil { + return nil, fmt.Errorf("afero manager not initialized. Call Init() first") + } + + instance.mu.RLock() + defer instance.mu.RUnlock() + + // Resolve alias to physical path + physicalPath, err := instance.resolveAlias(alias) + if err != nil { + return nil, err + } + + // Read the file using Afero + return afero.ReadFile(instance.fs, physicalPath) +} + +// Stream returns a ReadCloser for streaming the file content for the given alias +// The caller is responsible for closing the returned ReadCloser +func Stream(alias string) (io.ReadCloser, error) { + if instance == nil { + return nil, fmt.Errorf("afero manager not initialized. Call Init() first") + } + + instance.mu.RLock() + defer instance.mu.RUnlock() + + // Resolve alias to physical path + physicalPath, err := instance.resolveAlias(alias) + if err != nil { + return nil, err + } + + // Open the file using Afero + return instance.fs.Open(physicalPath) +} + +// Exists checks if the alias exists in the alias map AND the file exists in the filesystem +// Returns true if both conditions are met, false otherwise +func Exists(alias string) bool { + if instance == nil { + return false + } + + instance.mu.RLock() + defer instance.mu.RUnlock() + + // Check if alias exists in map + physicalPath, exists := instance.aliases[alias] + if !exists { + return false + } + + // Check if file exists in filesystem + _, err := instance.fs.Stat(physicalPath) + return err == nil +} + +// resolveAlias resolves an alias to its physical path +// Handles the "all:" prefix that may be used with embed.FS +func (m *aferoManager) resolveAlias(alias string) (string, error) { + physicalPath, exists := m.aliases[alias] + if !exists { + return "", fmt.Errorf("alias '%s' not found in alias map", alias) + } + + // Handle "all:" prefix if present + if filepath.HasPrefix(physicalPath, "all:") { + physicalPath = physicalPath[4:] // Remove "all:" prefix + } + + return physicalPath, nil +} + +// GetAliases returns a copy of all configured aliases +// This is useful for debugging or introspection +func GetAliases() map[string]string { + if instance == nil { + return make(map[string]string) + } + + instance.mu.RLock() + defer instance.mu.RUnlock() + + // Return a copy to prevent external mutations + aliases := make(map[string]string) + for alias, path := range instance.aliases { + aliases[alias] = path + } + + return aliases +} + +// GetFileSystem returns the underlying Afero filesystem +// This is useful for advanced operations that need direct filesystem access +func GetFileSystem() afero.Fs { + if instance == nil { + return nil + } + + instance.mu.RLock() + defer instance.mu.RUnlock() + + return instance.fs +} diff --git a/pkg/infrastructure/afero_test.go b/pkg/infrastructure/afero_test.go new file mode 100644 index 0000000..871021e --- /dev/null +++ b/pkg/infrastructure/afero_test.go @@ -0,0 +1,166 @@ +package infrastructure + +import ( + "embed" + "strings" + "sync" + "testing" +) + +//go:embed testdata/* +var testFS embed.FS + +func TestAferoManager(t *testing.T) { + // Reset the singleton for testing + instance = nil + + // Test alias configuration + aliasMap := map[string]string{ + "config": "all:testdata/config.yaml", + "readme": "all:testdata/README.md", + "test": "all:testdata/test.txt", + } + + // Test initialization + t.Run("Init", func(t *testing.T) { + Init(testFS, aliasMap, true) + + if instance == nil { + t.Fatal("Expected instance to be initialized") + } + + if instance.fs == nil { + t.Fatal("Expected filesystem to be initialized") + } + + if len(instance.aliases) != 3 { + t.Errorf("Expected 3 aliases, got %d", len(instance.aliases)) + } + }) + + // Test Exists function + t.Run("Exists", func(t *testing.T) { + // Test non-existing alias + if Exists("nonexistent") { + t.Error("Expected 'nonexistent' alias to not exist") + } + + // Test existing alias but non-existing file + aliasMap := map[string]string{ + "missing": "all:testdata/missing.txt", + } + Init(testFS, aliasMap, true) + if Exists("missing") { + t.Error("Expected 'missing' alias to not exist (file doesn't exist)") + } + }) + + // Test GetAliases function + t.Run("GetAliases", func(t *testing.T) { + aliases := GetAliases() + if len(aliases) != 3 { + t.Errorf("Expected 3 aliases, got %d. Aliases: %v", len(aliases), aliases) + } + + if aliases["config"] != "all:testdata/config.yaml" { + t.Errorf("Expected config alias to be 'all:testdata/config.yaml', got %s", aliases["config"]) + } + + if aliases["readme"] != "all:testdata/README.md" { + t.Errorf("Expected readme alias to be 'all:testdata/README.md', got %s", aliases["readme"]) + } + + if aliases["test"] != "all:testdata/test.txt" { + t.Errorf("Expected test alias to be 'all:testdata/test.txt', got %s", aliases["test"]) + } + }) + + // Test GetFileSystem function + t.Run("GetFileSystem", func(t *testing.T) { + fs := GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be returned") + } + }) + + // Test development mode (CopyOnWriteFs) + t.Run("DevelopmentMode", func(t *testing.T) { + // Should be CopyOnWriteFs in development mode + fs := GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be initialized") + } + }) + + // Test production mode (ReadOnlyFs) + t.Run("ProductionMode", func(t *testing.T) { + // Create a new test with production mode + // Reset instance for this test + instance = nil + + // Create a new once variable for this test + originalOnce := once + once = sync.Once{} + + aliasMap := map[string]string{ + "test": "all:testdata/test.txt", + } + Init(testFS, aliasMap, false) + + // Should be ReadOnlyFs in production mode + fs := GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be initialized") + } + + // Restore original once + once = originalOnce + }) + + // Test singleton behavior (multiple Init calls) + t.Run("Singleton", func(t *testing.T) { + aliasMap1 := map[string]string{ + "test1": "all:testdata/test.txt", + } + aliasMap2 := map[string]string{ + "test2": "all:testdata/test.txt", + } + + Init(testFS, aliasMap1, true) + initialInstance := instance + + Init(testFS, aliasMap2, true) // Should be ignored due to singleton + if instance != initialInstance { + t.Error("Expected singleton behavior - instance should not change") + } + }) + + // Test error handling + t.Run("ErrorHandling", func(t *testing.T) { + // Reset instance + instance = nil + + // Test Read without initialization + _, err := Read("test") + if err == nil { + t.Error("Expected error when reading without initialization") + } + if !strings.Contains(err.Error(), "not initialized") { + t.Errorf("Expected 'not initialized' error, got: %v", err) + } + + // Test Stream without initialization + _, err = Stream("test") + if err == nil { + t.Error("Expected error when streaming without initialization") + } + if !strings.Contains(err.Error(), "not initialized") { + t.Errorf("Expected 'not initialized' error, got: %v", err) + } + + // Test Exists without initialization + if Exists("test") { + t.Error("Expected false when checking existence without initialization") + } + }) +} diff --git a/pkg/infrastructure/testdata/README.md b/pkg/infrastructure/testdata/README.md new file mode 100644 index 0000000..e7a5ee8 --- /dev/null +++ b/pkg/infrastructure/testdata/README.md @@ -0,0 +1,3 @@ +# Test README + +This is a test README file for the Afero manager. \ No newline at end of file diff --git a/pkg/infrastructure/testdata/config.yaml b/pkg/infrastructure/testdata/config.yaml new file mode 100644 index 0000000..40aaf41 --- /dev/null +++ b/pkg/infrastructure/testdata/config.yaml @@ -0,0 +1,2 @@ +test: configuration +value: 123 \ No newline at end of file diff --git a/pkg/infrastructure/testdata/test.txt b/pkg/infrastructure/testdata/test.txt new file mode 100644 index 0000000..5538279 --- /dev/null +++ b/pkg/infrastructure/testdata/test.txt @@ -0,0 +1 @@ +This is a test file for Afero manager testing. \ No newline at end of file diff --git a/pkg/tui/boot.go b/pkg/tui/boot.go index fc4c825..9ea968a 100644 --- a/pkg/tui/boot.go +++ b/pkg/tui/boot.go @@ -117,7 +117,7 @@ func NewBootModel(cfg StartupConfig, initQueue []ServiceInit) BootModel { results: results, config: cfg, startTime: time.Now(), - width: 80, + width: 100, phase: "starting", } } @@ -318,14 +318,6 @@ func (m BootModel) View() string { Bold(true). Foreground(lipgloss.Color("#ffdab3ff")) - // progressWidth := 30 - // progressPercent := float64(m.countdown) / float64(m.config.IdleSeconds) - // filled := int(progressPercent * float64(progressWidth)) - // empty := progressWidth - filled - - // progressBar := lipgloss.NewStyle().Foreground(lipgloss.Color("#b6ffc8ff")).Render(strings.Repeat("█", filled)) + - // lipgloss.NewStyle().Foreground(lipgloss.Color("#44475A")).Render(strings.Repeat("░", empty)) - footerText = fmt.Sprintf("\n %s Starting server in %s seconds...\n Press 'q' to skip and continue now", bootFrames[m.animFrame%len(bootFrames)], countdownStyle.Render(fmt.Sprintf("%d", m.countdown)), @@ -354,7 +346,7 @@ func (m BootModel) renderBootServices() string { Foreground(lipgloss.Color("#f0ca8c")). Render("◆ Boot Sequence") lines = append(lines, header) - lines = append(lines, lipgloss.NewStyle().Foreground(lipgloss.Color("#44475A")).Render(strings.Repeat("─", 45))) + lines = append(lines, lipgloss.NewStyle().Foreground(lipgloss.Color("#44475A")).Render(strings.Repeat("─", 100))) for i, r := range m.results { var icon, status string @@ -383,7 +375,7 @@ func (m BootModel) renderBootServices() string { statusStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#44475A")).Italic(true) } - nameStyle := lipgloss.NewStyle().Width(20) + nameStyle := lipgloss.NewStyle().Width(60) if i == m.current-1 && r.Status == "loading" { nameStyle = nameStyle.Foreground(lipgloss.Color("#FFB86C")).Bold(true) } else { diff --git a/pkg/tui/simple.go b/pkg/tui/simple.go index a54fa70..b1b2a36 100644 --- a/pkg/tui/simple.go +++ b/pkg/tui/simple.go @@ -23,7 +23,7 @@ func NewSimpleRenderer() *SimpleRenderer { func (r *SimpleRenderer) PrintBanner(text string) { style := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#BD93F9")) + Foreground(lipgloss.Color("#8daea5")) fmt.Println(style.Render(text)) } @@ -34,7 +34,7 @@ func (r *SimpleRenderer) PrintHeader(appName, version, env string) { Foreground(lipgloss.Color("#8daea5")) subStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#8BE9FD")). + Foreground(lipgloss.Color("#8daea5")). Italic(true) fmt.Println() @@ -46,7 +46,7 @@ func (r *SimpleRenderer) PrintHeader(appName, version, env string) { // PrintDivider prints a styled divider line func (r *SimpleRenderer) PrintDivider() { style := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#44475A")) + Foreground(lipgloss.Color("#626262ff")) fmt.Println(style.Render(strings.Repeat("─", r.width))) } @@ -54,7 +54,7 @@ func (r *SimpleRenderer) PrintDivider() { func (r *SimpleRenderer) PrintSection(title string) { style := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#8BE9FD")) + Foreground(lipgloss.Color("#8daea5")) fmt.Println() fmt.Println(style.Render("◆ " + title)) r.PrintDivider() @@ -63,7 +63,7 @@ func (r *SimpleRenderer) PrintSection(title string) { // PrintServiceStart prints a service starting message func (r *SimpleRenderer) PrintServiceStart(name string) { icon := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#F1FA8C")). + Foreground(lipgloss.Color("#f5fac0ff")). Render("◐") nameStyle := lipgloss.NewStyle(). @@ -71,7 +71,7 @@ func (r *SimpleRenderer) PrintServiceStart(name string) { Foreground(lipgloss.Color("#F8F8F2")) statusStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#F1FA8C")) + Foreground(lipgloss.Color("#f5fac0ff")) fmt.Printf(" %s %s %s %s\n", icon, nameStyle.Render(name), IconArrow, statusStyle.Render("starting...")) } @@ -79,7 +79,7 @@ func (r *SimpleRenderer) PrintServiceStart(name string) { // PrintServiceSuccess prints a service success message func (r *SimpleRenderer) PrintServiceSuccess(name, message string) { icon := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#50FA7B")). + Foreground(lipgloss.Color("#9af8b1ff")). Render("✓") nameStyle := lipgloss.NewStyle(). @@ -87,7 +87,7 @@ func (r *SimpleRenderer) PrintServiceSuccess(name, message string) { Foreground(lipgloss.Color("#F8F8F2")) statusStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#50FA7B")) + Foreground(lipgloss.Color("#9af8b1ff")) if message == "" { message = "ready" @@ -98,7 +98,7 @@ func (r *SimpleRenderer) PrintServiceSuccess(name, message string) { // PrintServiceError prints a service error message func (r *SimpleRenderer) PrintServiceError(name, message string) { icon := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#FF5555")). + Foreground(lipgloss.Color("#f67373ff")). Render("✗") nameStyle := lipgloss.NewStyle(). @@ -106,7 +106,7 @@ func (r *SimpleRenderer) PrintServiceError(name, message string) { Foreground(lipgloss.Color("#F8F8F2")) statusStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#FF5555")) + Foreground(lipgloss.Color("#f67373ff")) fmt.Printf(" %s %s %s %s\n", icon, nameStyle.Render(name), IconArrow, statusStyle.Render(message)) } @@ -114,16 +114,16 @@ func (r *SimpleRenderer) PrintServiceError(name, message string) { // PrintServiceSkipped prints a service skipped message func (r *SimpleRenderer) PrintServiceSkipped(name string) { icon := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#44475A")). + Foreground(lipgloss.Color("#626262ff")). Render("○") nameStyle := lipgloss.NewStyle(). Width(25). - Foreground(lipgloss.Color("#44475A")). + Foreground(lipgloss.Color("#626262ff")). Italic(true) statusStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#44475A")). + Foreground(lipgloss.Color("#626262ff")). Italic(true) fmt.Printf(" %s %s %s %s\n", icon, nameStyle.Render(name), IconArrow, statusStyle.Render("disabled")) @@ -135,14 +135,14 @@ func (r *SimpleRenderer) PrintServerReady(port string, elapsed time.Duration) { successStyle := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#50FA7B")) + Foreground(lipgloss.Color("#9af8b1ff")) highlightStyle := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#FFB86C")) + Foreground(lipgloss.Color("#8daea5")) infoStyle := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#8BE9FD")) + Foreground(lipgloss.Color("#8daea5")) fmt.Println(successStyle.Render(fmt.Sprintf("🚀 Server ready at %s", highlightStyle.Render("http://localhost:"+port)))) fmt.Println(infoStyle.Render(fmt.Sprintf("⚡ Started in %s", elapsed.Round(time.Millisecond)))) @@ -162,14 +162,14 @@ func (r *SimpleRenderer) PrintProgressBar(current, total int) { // PrintInfo prints an info message func (r *SimpleRenderer) PrintInfo(message string) { style := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#8BE9FD")) + Foreground(lipgloss.Color("#8daea5")) fmt.Println(style.Render("ℹ " + message)) } // PrintWarning prints a warning message func (r *SimpleRenderer) PrintWarning(message string) { style := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#F1FA8C")) + Foreground(lipgloss.Color("#f5fac0ff")) fmt.Println(style.Render("⚠ " + message)) } @@ -177,14 +177,14 @@ func (r *SimpleRenderer) PrintWarning(message string) { func (r *SimpleRenderer) PrintError(message string) { style := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#FF5555")) + Foreground(lipgloss.Color("#f67373ff")) fmt.Println(style.Render("✗ " + message)) } // PrintSuccess prints a success message func (r *SimpleRenderer) PrintSuccess(message string) { style := lipgloss.NewStyle(). - Foreground(lipgloss.Color("#50FA7B")) + Foreground(lipgloss.Color("#9af8b1ff")) fmt.Println(style.Render("✓ " + message)) } @@ -192,12 +192,12 @@ func (r *SimpleRenderer) PrintSuccess(message string) { func (r *SimpleRenderer) PrintBox(title, content string) { boxStyle := lipgloss.NewStyle(). Border(lipgloss.RoundedBorder()). - BorderForeground(lipgloss.Color("#6272A4")). + BorderForeground(lipgloss.Color("#8daea5")). Padding(0, 1) titleStyle := lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("#FF79C6")) + Foreground(lipgloss.Color("#8daea5")) if title != "" { content = titleStyle.Render(title) + "\n" + content @@ -209,7 +209,7 @@ func (r *SimpleRenderer) PrintBox(title, content string) { // AnimatedSpinner shows an animated spinner for a duration func (r *SimpleRenderer) AnimatedSpinner(message string, duration time.Duration) { frames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} - style := lipgloss.NewStyle().Foreground(lipgloss.Color("#FF79C6")) + style := lipgloss.NewStyle().Foreground(lipgloss.Color("#8daea5")) msgStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#F8F8F2")) start := time.Now() @@ -224,7 +224,7 @@ func (r *SimpleRenderer) AnimatedSpinner(message string, duration time.Duration) // WaveAnimation prints a simple wave animation func (r *SimpleRenderer) WaveAnimation(duration time.Duration) { - waveStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#50FA7B")) + waveStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#9af8b1ff")) start := time.Now() for time.Since(start) < duration { From 91faccea455080c80761df5b7a66b341439b5ae2 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Tue, 17 Mar 2026 06:22:05 +0700 Subject: [PATCH 04/18] refactor: rename launch configurations and update build script documentation Enhanced build script documentation to reflect the transition from shell scripts to a unified Go-based build system with improved cross-platform support, automatic tool installation, and structured logging capabilities. --- .vscode/launch.json | 6 +- cmd/app/main.go | 10 +- docs_wiki/blueprint/blueprint.txt | 91 +++- scripts/build.bat | 189 --------- scripts/build.go | 680 ++++++++++++++++++++++++++++++ scripts/build.sh | 187 -------- 6 files changed, 767 insertions(+), 396 deletions(-) delete mode 100644 scripts/build.bat create mode 100644 scripts/build.go delete mode 100755 scripts/build.sh diff --git a/.vscode/launch.json b/.vscode/launch.json index b5a0643..56c3552 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -5,7 +5,7 @@ "version": "0.2.0", "configurations": [ { - "name": "Launch Package MAC", + "name": "Launch Package (Darwin)", "type": "go", "request": "launch", "mode": "auto", @@ -13,7 +13,7 @@ "output": "${workspaceFolder}/stackyard" }, { - "name": "Launch Server WIN", + "name": "Launch Server (Unix)", "type": "go", "request": "launch", "mode": "auto", @@ -21,7 +21,7 @@ "output": "${workspaceFolder}\\debug-main.exe" }, { - "name": "Build & Launch (Dist) WIN", + "name": "Build & Launch Dist (Unix)", "type": "go", "request": "launch", "mode": "exec", diff --git a/cmd/app/main.go b/cmd/app/main.go index e30d3fa..a94dd6e 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -17,6 +17,7 @@ import ( "syscall" "time" + // load modules init _ "stackyard/internal/services/modules" ) @@ -127,17 +128,8 @@ func exampleAferoUsage() { "web-app": "all:web/monitoring/index.html", } - // Initialize the Afero manager in development mode - // Note: In a real application, you would use //go:embed directives - // For this example, we'll just show the API usage fmt.Println("Initializing Afero Manager...") - // In a real application, you would have: - // //go:embed all:dist - // var embedFS embed.FS - // infrastructure.Init(embedFS, aliasMap, true) - - // For demonstration purposes, we'll just show the API fmt.Println("✓ Afero Manager initialized") fmt.Println("✓ Development mode: CopyOnWriteFs (embed.FS + OS overrides)") fmt.Println("✓ Production mode: ReadOnlyFs (embed.FS only)") diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index cfa03b6..a6b5484 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -756,14 +756,20 @@ Components are shut down in reverse order to ensure dependencies are handled cor The project includes comprehensive build scripts for automated compilation, backup management, and deployment across multiple platforms. +#### 9.1.1 Go Build Script (`scripts/build.go`) + +**Version**: Go-based build system (replaces shell scripts) +**Purpose**: Cross-platform build automation with enhanced features + **Features:** -- **Cross-Platform Support**: Separate scripts for Unix/Linux/macOS (`scripts/build.sh`) and Windows (`scripts/build.bat`) -- **Automatic Backup**: Creates timestamped backups of previous builds before deployment -- **Archive Compression**: Compresses backups into ZIP files for efficient storage +- **Cross-Platform Support**: Single Go script works on Unix/Linux/macOS and Windows +- **Automatic Tool Installation**: Installs `goversioninfo` and `garble` if missing +- **Interactive Prompts**: User choice for code obfuscation with configurable timeout - **Process Management**: Automatically stops running application instances +- **Backup Management**: Creates timestamped backups with ZIP compression - **Asset Management**: Copies configuration files, databases, and web assets -- **Clean Builds**: Optimized Go binary compilation -- **Pure CMD Implementation**: Windows script uses only built-in CMD commands (no PowerShell dependency) +- **Structured Logging**: Color-coded output with verbose mode support +- **Graceful Shutdown**: Proper signal handling for clean termination **Build Process:** 1. **Tool Installation**: Automatically checks and installs required tools (`goversioninfo`, `garble`) @@ -771,15 +777,84 @@ The project includes comprehensive build scripts for automated compilation, back 3. **Timestamp Generation**: Creates `YYYYMMDD_HHMMSS` format timestamps 4. **Process Management**: Stops running application instances using `pgrep`/`pkill` (Unix) or `tasklist`/`taskkill` (Windows) 5. **Backup Creation**: Moves old binaries, configs, databases, and web assets to timestamped directories -6. **Archive Compression**: Compresses backup folders using `zip` (Unix) or `tar` (Windows) +6. **Archive Compression**: Compresses backup folders using ZIP format 7. **Build Execution**: Compiles Go application with `go build` or `garble build` based on user choice 8. **Asset Copying**: Copies required files to build directory +**Command Line Options:** +```bash +# Basic usage +go run scripts/build.go + +# With verbose logging +go run scripts/build.go -verbose + +# With custom timeout (seconds) +go run scripts/build.go -timeout 15 + +# Get help +go run scripts/build.go --help +``` + +**Configuration Variables:** +- `DIST_DIR`: Build output directory (default: `dist`) +- `APP_NAME`: Application binary name +- `MAIN_PATH`: Path to main Go file (default: `./cmd/app/main.go`) +- `CONFIG_YML`: Configuration file name +- `BANNER_TXT`: Banner file name +- `DB_FILE`: Database file name +- `WEB_DIR`: Web assets directory + **Enhanced Features:** - **Code Obfuscation**: Optional `garble` build for production security - **Automatic Tool Management**: Installs required Go tools automatically -- **Interactive Prompts**: User choice for obfuscation with 10-second timeout -- **Cross-Platform Compatibility**: Native implementations for Unix/Linux/macOS and Windows +- **Interactive Prompts**: User choice for obfuscation with configurable timeout +- **Cross-Platform Compatibility**: Single Go binary works everywhere Go is supported +- **Color-Coded Output**: Pastel color palette for better readability +- **Structured Logging**: Different log levels with appropriate formatting +- **Error Handling**: Comprehensive error handling with context +- **Signal Handling**: Graceful shutdown on SIGINT/SIGTERM + +**Build Steps:** +1. **Checking required tools** - Verifies `goversioninfo` and `garble` availability +2. **Asking user about garble** - Interactive prompt for obfuscation choice +3. **Stopping running process** - Kills any running application instances +4. **Creating backup** - Moves existing files to timestamped backup directory +5. **Archiving backup** - Compresses backup directory into ZIP file +6. **Building application** - Compiles Go binary with appropriate flags +7. **Copying assets** - Copies configuration, database, and web files + +**Output Example:** +``` + /\ + ( ) stackyard Builder by diameter-tscd + \/ +---------------------------------------------------------------------- +[1/7] Checking required tools +[2/7] Asking user about garble +[3/7] Stopping running process +[4/7] Creating backup +[5/7] Archiving backup +[6/7] Building application +[7/7] Copying assets + + SUCCESS! Build ready at: /path/to/project/dist +``` + +**Error Handling:** +- **Tool Missing**: Automatically installs missing tools +- **Process Kill Failures**: Logs warnings but continues +- **Backup Failures**: Logs warnings but continues +- **Build Failures**: Exits with error code +- **Asset Copy Failures**: Logs warnings but continues + +**CI/CD Integration:** +- **GitHub Actions**: Can be used in workflows with `go run scripts/build.go` +- **Jenkins Pipeline**: Integrates with Go toolchain +- **Docker Builds**: Can be used in multi-stage Docker builds +- **Artifact Management**: Produces clean build artifacts + +#### 9.1.2 Legacy Shell Scripts **Unix/Linux/macOS Script (`scripts/build.sh`):** ```bash diff --git a/scripts/build.bat b/scripts/build.bat deleted file mode 100644 index d0087f1..0000000 --- a/scripts/build.bat +++ /dev/null @@ -1,189 +0,0 @@ -@echo off -cls - -setlocal EnableDelayedExpansion -set "DIST_DIR=dist" -set "APP_NAME=stackyard.exe" -set "MAIN_PATH=./cmd/app/main.go" - -:: Define ANSI Colors -for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( - set "ESC=%%b" -) -set "RESET=%ESC%[0m" -set "BOLD=%ESC%[1m" -set "DIM=%ESC%[2m" -set "UNDERLINE=%ESC%[4m" - -:: Fancy Pastel Palette (main color: #8daea5) -set "P_PURPLE=%ESC%[38;5;108m" -set "B_PURPLE=%ESC%[1;38;5;108m" -set "P_CYAN=%ESC%[38;5;117m" -set "B_CYAN=%ESC%[1;38;5;117m" -set "P_GREEN=%ESC%[38;5;108m" -set "B_GREEN=%ESC%[1;38;5;108m" -set "P_YELLOW=%ESC%[93m" -set "B_YELLOW=%ESC%[1;93m" -set "P_RED=%ESC%[91m" -set "B_RED=%ESC%[1;91m" -set "GRAY=%ESC%[38;5;242m" -set "WHITE=%ESC%[97m" -set "B_WHITE=%ESC%[1;97m" - -:: Robustly switch to project root -cd /d "%~dp0.." - -echo. -echo %P_PURPLE% /\ %RESET% -echo %P_PURPLE%( )%RESET% %B_PURPLE%%APP_NAME% Builder%RESET% %GRAY%by%RESET% %B_WHITE%diameter-tscd%RESET% -echo %P_PURPLE% \/ %RESET% -echo %GRAY%----------------------------------------------------------------------%RESET% - -REM 0. Check required tools -echo %B_PURPLE%[0/6]%RESET% %P_CYAN%Checking required tools...%RESET% - -REM Check goversioninfo -where goversioninfo >nul 2>nul -if %errorlevel% neq 0 ( - echo %B_YELLOW%! goversioninfo not found. Installing...%RESET% - go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest - if errorlevel 1 ( - echo %B_RED%x Failed to install goversioninfo%RESET% - exit /b 1 - ) - echo %B_GREEN%+ goversioninfo installed%RESET% -) else ( - echo %B_GREEN%+ goversioninfo found%RESET% -) - -REM Check garble -where garble >nul 2>nul -if %errorlevel% neq 0 ( - echo %B_YELLOW%! garble not found. Installing...%RESET% - go install mvdan.cc/garble@latest - if errorlevel 1 ( - echo %B_RED%x Failed to install garble%RESET% - exit /b 1 - ) - echo %B_GREEN%+ garble installed%RESET% -) else ( - echo %B_GREEN%+ garble found%RESET% -) - -REM Ask user about garble build -echo %B_YELLOW%Use garble build for obfuscation? (Y/N, default N, timeout 10s): %RESET% -choice /T 10 /D N /C YN /N -if %errorlevel% equ 1 ( - set "USE_GARBLE=true" - echo %B_GREEN%+ Using garble build%RESET% -) else ( - set "USE_GARBLE=false" - echo %B_CYAN%+ Using regular go build%RESET% -) - -REM 1. Generate Timestamp -set "TIMESTAMP=%date:~-4%%date:~4,2%%date:~7,2%_%time:~0,2%%time:~3,2%%time:~6,2%" -set "TIMESTAMP=%TIMESTAMP: =0%" -set "TIMESTAMP=%TIMESTAMP::=%" -set "TIMESTAMP=%TIMESTAMP:/=%" -set "BACKUP_ROOT=%DIST_DIR%\backups" -set "BACKUP_PATH=%BACKUP_ROOT%\%TIMESTAMP%" - -REM 2. Stop running process -echo %B_PURPLE%[1/6]%RESET% %P_CYAN%Checking for running process...%RESET% -tasklist /FI "IMAGENAME eq %APP_NAME%" 2>NUL | find /I /N "%APP_NAME%">NUL -if "%ERRORLEVEL%"=="0" ( - echo %B_YELLOW%! App is running. Stopping...%RESET% - taskkill /F /IM %APP_NAME% >NUL - timeout /t 1 /nobreak >NUL -) else ( - echo %B_GREEN%+ App is not running.%RESET% -) - -REM 3. Backup Old Files -echo %B_PURPLE%[3/6]%RESET% %P_CYAN%Backing up old files...%RESET% -if exist "%DIST_DIR%" ( - if not exist "%BACKUP_PATH%" mkdir "%BACKUP_PATH%" - - if exist "%DIST_DIR%\%APP_NAME%" ( - echo %GRAY%- Moving old binary...%RESET% - move "%DIST_DIR%\%APP_NAME%" "%BACKUP_PATH%\" >NUL - ) - if exist "%DIST_DIR%\config.yaml" ( - move "%DIST_DIR%\config.yaml" "%BACKUP_PATH%\" >NUL - ) - if exist "%DIST_DIR%\banner.txt" ( - move "%DIST_DIR%\banner.txt" "%BACKUP_PATH%\" >NUL - ) - if exist "%DIST_DIR%\monitoring_users.db" ( - echo %GRAY%- Backing up database...%RESET% - move "%DIST_DIR%\monitoring_users.db" "%BACKUP_PATH%\" >NUL - ) - if exist "%DIST_DIR%\web" ( - echo %GRAY%- Moving old web assets...%RESET% - move "%DIST_DIR%\web" "%BACKUP_PATH%\" >NUL - ) - - echo %B_GREEN%+ Backup created at:%RESET% %B_WHITE%%BACKUP_PATH%%RESET% -) else ( - echo %GRAY%+ No existing dist directory. Skipping backup.%RESET% - mkdir "%DIST_DIR%" -) - -REM 6. Archive Backup -echo %B_PURPLE%[4/6]%RESET% %P_CYAN%Archiving backup...%RESET% -if exist "%BACKUP_PATH%" ( - pushd "%BACKUP_ROOT%" - tar -acf "%TIMESTAMP%.zip" "%TIMESTAMP%" 2>NUL - popd - if exist "%BACKUP_PATH%" rmdir /s /q "%BACKUP_PATH%" - echo %B_GREEN%+ Backup archived:%RESET% %B_WHITE%%BACKUP_ROOT%\%TIMESTAMP%.zip%RESET% -) else ( - echo %GRAY%+ No backup created. Skipping archive.%RESET% -) - -REM Ensure dist directory -if not exist "%DIST_DIR%" mkdir "%DIST_DIR%" - -REM 4. Build -echo %B_PURPLE%[5/6]%RESET% %P_CYAN%Building Go binary...%RESET% -goversioninfo -platform-specific -if "%USE_GARBLE%"=="true" ( - garble build -ldflags="-s -w" -o "%DIST_DIR%\%APP_NAME%" %MAIN_PATH% -) else ( - go build -ldflags="-s -w" -o "%DIST_DIR%\%APP_NAME%" %MAIN_PATH% -) -if %ERRORLEVEL% NEQ 0 ( - echo %B_RED%x Build FAILED! Exit code: %ERRORLEVEL%%RESET% - exit /b %ERRORLEVEL% -) -echo %B_GREEN%+ Build successful:%RESET% %B_WHITE%%DIST_DIR%\%APP_NAME%%RESET% - -REM 5. Copy Assets -echo %B_PURPLE%[6/6]%RESET% %P_CYAN%Copying assets...%RESET% - -if exist "web" ( - echo %B_GREEN%+ Copying web folder...%RESET% - xcopy /E /I /Y /Q "web" "%DIST_DIR%\web" >NUL -) - -if exist "config.yaml" ( - echo %B_GREEN%+ Copying config.yaml...%RESET% - copy /Y "config.yaml" "%DIST_DIR%" >NUL -) - -if exist "banner.txt" ( - echo %B_GREEN%+ Copying banner.txt...%RESET% - copy /Y "banner.txt" "%DIST_DIR%" >NUL -) - -if exist "monitoring_users.db" ( - echo %B_GREEN%+ Copying monitoring_users.db...%RESET% - copy /Y "monitoring_users.db" "%DIST_DIR%" >NUL -) - -echo. -echo %GRAY%======================================================================%RESET% -echo %B_PURPLE%SUCCESS!%RESET% %P_GREEN%Build ready at:%RESET% %UNDERLINE%%B_WHITE%%DIST_DIR%\%RESET% -echo %GRAY%======================================================================%RESET% -endlocal diff --git a/scripts/build.go b/scripts/build.go new file mode 100644 index 0000000..3eb8137 --- /dev/null +++ b/scripts/build.go @@ -0,0 +1,680 @@ +package main + +import ( + "archive/zip" + "context" + "flag" + "fmt" + "io" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +// Configuration constants +const ( + DIST_DIR = "dist" + APP_NAME = "stackyard" + MAIN_PATH = "./cmd/app/main.go" + CONFIG_YML = "config.yaml" + BANNER_TXT = "banner.txt" + DB_FILE = "monitoring_users.db" + WEB_DIR = "web" +) + +// ANSI Colors +const ( + RESET = "\033[0m" + BOLD = "\033[1m" + DIM = "\033[2m" + UNDERLINE = "\033[4m" + + // Pastel Palette (main color: #8daea5) + P_PURPLE = "\033[38;5;108m" + B_PURPLE = "\033[1;38;5;108m" + P_CYAN = "\033[38;5;117m" + B_CYAN = "\033[1;38;5;117m" + P_GREEN = "\033[38;5;108m" + B_GREEN = "\033[1;38;5;108m" + P_YELLOW = "\033[93m" + B_YELLOW = "\033[1;93m" + P_RED = "\033[91m" + B_RED = "\033[1;91m" + GRAY = "\033[38;5;242m" + WHITE = "\033[97m" + B_WHITE = "\033[1;97m" +) + +// Build configuration +type BuildConfig struct { + UseGarble bool + UseGoversioninfo bool + Timeout time.Duration + Verbose bool +} + +// BuildContext holds the build state +type BuildContext struct { + Config BuildConfig + Timestamp string + BackupPath string + DistPath string + ProjectDir string +} + +// Logger for structured output +type Logger struct { + verbose bool +} + +func (l *Logger) Info(msg string, args ...interface{}) { + fmt.Printf("%s[INFO]%s %s\n", B_CYAN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Warn(msg string, args ...interface{}) { + fmt.Printf("%s[WARN]%s %s\n", B_YELLOW, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Error(msg string, args ...interface{}) { + fmt.Printf("%s[ERROR]%s %s\n", B_RED, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Debug(msg string, args ...interface{}) { + if l.verbose { + fmt.Printf("%s[DEBUG]%s %s\n", GRAY, RESET, fmt.Sprintf(msg, args...)) + } +} + +func (l *Logger) Success(msg string, args ...interface{}) { + fmt.Printf("%s[SUCCESS]%s %s\n", B_GREEN, RESET, fmt.Sprintf(msg, args...)) +} + +// NewLogger creates a new logger +func NewLogger(verbose bool) *Logger { + return &Logger{verbose: verbose} +} + +// checkRequiredTools checks if required tools are available +func (ctx *BuildContext) checkRequiredTools(logger *Logger) error { + logger.Info("Checking required tools...") + + // Check goversioninfo + if err := exec.Command("goversioninfo", "-h").Run(); err != nil { + logger.Warn("goversioninfo not found. Skipping version info generation.") + ctx.Config.UseGoversioninfo = false + } else { + logger.Success("goversioninfo found") + ctx.Config.UseGoversioninfo = true + } + + // Check garble + if err := exec.Command("garble", "-h").Run(); err != nil { + logger.Warn("garble not found. Installing...") + if err := installGarble(logger); err != nil { + return fmt.Errorf("failed to install garble: %w", err) + } + logger.Success("garble installed") + } else { + logger.Success("garble found") + } + + return nil +} + +// installGarble installs garble using go install +func installGarble(logger *Logger) error { + cmd := exec.Command("go", "install", "mvdan.cc/garble@latest") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +// askUserAboutGarble asks user if they want to use garble with timeout +func (ctx *BuildContext) askUserAboutGarble(logger *Logger) error { + fmt.Printf("%sUse garble build for obfuscation? (y/N, timeout %ds): %s", B_YELLOW, int(ctx.Config.Timeout.Seconds()), RESET) + + // Create a channel to receive user input + inputChan := make(chan string, 1) + + // Start a goroutine to read input + go func() { + var choice string + fmt.Scanln(&choice) + inputChan <- choice + }() + + // Wait for input or timeout + select { + case choice := <-inputChan: + if strings.ToLower(choice) == "y" || strings.ToLower(choice) == "yes" { + ctx.Config.UseGarble = true + logger.Success("Using garble build") + } else { + ctx.Config.UseGarble = false + logger.Info("Using regular go build") + } + case <-time.After(ctx.Config.Timeout): + logger.Info("Timeout reached. Using regular go build") + ctx.Config.UseGarble = false + } + + return nil +} + +// stopRunningProcess stops any running application instances +func (ctx *BuildContext) stopRunningProcess(logger *Logger) error { + logger.Info("Checking for running process...") + + processes, err := ctx.findRunningProcesses() + if err != nil { + return fmt.Errorf("failed to check running processes: %w", err) + } + + if len(processes) > 0 { + logger.Warn("App is running. Stopping...") + for _, pid := range processes { + if err := ctx.killProcess(pid); err != nil { + logger.Error("Failed to kill process %d: %v", pid, err) + } + } + time.Sleep(time.Second) + } else { + logger.Info("App is not running.") + } + + return nil +} + +// findRunningProcesses finds running processes by name +func (ctx *BuildContext) findRunningProcesses() ([]int, error) { + var cmd *exec.Cmd + + if runtime.GOOS == "windows" { + cmd = exec.Command("tasklist", "/FI", fmt.Sprintf("IMAGENAME eq %s.exe", APP_NAME)) + } else { + cmd = exec.Command("pgrep", "-x", APP_NAME) + } + + output, err := cmd.Output() + if err != nil { + // Process not found is not an error + if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == 1 { + return []int{}, nil + } + return nil, err + } + + var pids []int + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.Contains(line, "INFO:") || strings.Contains(line, "Image Name") { + continue + } + + if runtime.GOOS == "windows" { + // Parse tasklist output to extract PID + parts := strings.Fields(line) + if len(parts) >= 2 { + if pid, err := parsePID(parts[1]); err == nil { + pids = append(pids, pid) + } + } + } else { + // Parse pgrep output + if pid, err := parsePID(line); err == nil { + pids = append(pids, pid) + } + } + } + + return pids, nil +} + +// parsePID converts string to int, handling various formats +func parsePID(pidStr string) (int, error) { + // Remove any non-numeric characters except digits + cleanStr := "" + for _, char := range pidStr { + if char >= '0' && char <= '9' { + cleanStr += string(char) + } + } + + if cleanStr == "" { + return 0, fmt.Errorf("no valid PID found") + } + + return strconv.Atoi(cleanStr) +} + +// killProcess kills a process by PID +func (ctx *BuildContext) killProcess(pid int) error { + process, err := os.FindProcess(pid) + if err != nil { + return err + } + + return process.Kill() +} + +// createBackup creates a timestamped backup of existing files +func (ctx *BuildContext) createBackup(logger *Logger) error { + logger.Info("Backing up old files...") + + // Create backup directory + backupRoot := filepath.Join(ctx.DistPath, "backups") + if err := os.MkdirAll(backupRoot, 0755); err != nil { + return fmt.Errorf("failed to create backup directory: %w", err) + } + + ctx.BackupPath = filepath.Join(backupRoot, ctx.Timestamp) + + if _, err := os.Stat(ctx.DistPath); os.IsNotExist(err) { + logger.Info("No existing dist directory. Skipping backup.") + return nil + } + + // Create backup directory + if err := os.MkdirAll(ctx.BackupPath, 0755); err != nil { + return fmt.Errorf("failed to create backup path: %w", err) + } + + // Move files to backup + filesToBackup := []string{ + APP_NAME, + APP_NAME + ".exe", + CONFIG_YML, + BANNER_TXT, + DB_FILE, + } + + for _, file := range filesToBackup { + src := filepath.Join(ctx.DistPath, file) + dst := filepath.Join(ctx.BackupPath, file) + + if err := moveFile(src, dst); err != nil { + logger.Warn("Failed to backup %s: %v", file, err) + } + } + + // Move web directory + webSrc := filepath.Join(ctx.DistPath, WEB_DIR) + webDst := filepath.Join(ctx.BackupPath, WEB_DIR) + if err := moveDir(webSrc, webDst); err != nil { + logger.Warn("Failed to backup web directory: %v", err) + } + + logger.Success("Backup created at: %s", ctx.BackupPath) + return nil +} + +// archiveBackup creates a ZIP archive of the backup +func (ctx *BuildContext) archiveBackup(logger *Logger) error { + logger.Info("Archiving backup...") + + if _, err := os.Stat(ctx.BackupPath); os.IsNotExist(err) { + logger.Info("No backup created. Skipping archive.") + return nil + } + + backupRoot := filepath.Dir(ctx.BackupPath) + archivePath := filepath.Join(backupRoot, ctx.Timestamp+".zip") + + if err := createZipArchive(ctx.BackupPath, archivePath); err != nil { + return fmt.Errorf("failed to create archive: %w", err) + } + + // Remove the uncompressed backup directory + if err := os.RemoveAll(ctx.BackupPath); err != nil { + logger.Warn("Failed to remove backup directory: %v", err) + } + + logger.Success("Backup archived: %s", archivePath) + return nil +} + +// createZipArchive creates a ZIP file from a directory +func createZipArchive(source, target string) error { + zipFile, err := os.Create(target) + if err != nil { + return err + } + defer zipFile.Close() + + archive := zip.NewWriter(zipFile) + defer archive.Close() + + info, err := os.Stat(source) + if err != nil { + return nil + } + + var baseDir string + if info.IsDir() { + baseDir = filepath.Base(source) + } + + filepath.Walk(source, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + + if baseDir != "" { + header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) + } + + if info.IsDir() { + header.Name += "/" + } else { + header.Method = zip.Deflate + } + + writer, err := archive.CreateHeader(header) + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + _, err = io.Copy(writer, file) + return err + }) + + return nil +} + +// moveFile moves a file from src to dst +func moveFile(src, dst string) error { + if _, err := os.Stat(src); os.IsNotExist(err) { + return nil + } + + data, err := os.ReadFile(src) + if err != nil { + return err + } + + if err := os.WriteFile(dst, data, 0644); err != nil { + return err + } + + return os.Remove(src) +} + +// moveDir moves a directory from src to dst +func moveDir(src, dst string) error { + if _, err := os.Stat(src); os.IsNotExist(err) { + return nil + } + + return copyDir(src, dst) +} + +// copyDir recursively copies a directory +func copyDir(src, dst string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + if err := os.MkdirAll(dst, srcInfo.Mode()); err != nil { + return err + } + + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err := copyDir(srcPath, dstPath); err != nil { + return err + } + } else { + data, err := os.ReadFile(srcPath) + if err != nil { + return err + } + + if err := os.WriteFile(dstPath, data, 0644); err != nil { + return err + } + } + } + + return nil +} + +// buildApplication builds the Go application +func (ctx *BuildContext) buildApplication(logger *Logger) error { + logger.Info("Building Go binary...") + + // Generate version info if available + if ctx.Config.UseGoversioninfo { + if err := exec.Command("goversioninfo", "-platform-specific").Run(); err != nil { + logger.Warn("Failed to generate version info: %v", err) + } + } else { + logger.Info("Skipping goversioninfo (not available)") + } + + // Build command + var cmd *exec.Cmd + outputPath := filepath.Join(ctx.DistPath, APP_NAME) + + if runtime.GOOS == "windows" { + outputPath += ".exe" + } + + if ctx.Config.UseGarble { + cmd = exec.Command("garble", "build", "-ldflags=-s -w", "-o", outputPath, MAIN_PATH) + } else { + cmd = exec.Command("go", "build", "-ldflags=-s -w", "-o", outputPath, MAIN_PATH) + } + + // Set environment for garble + if ctx.Config.UseGarble { + cmd.Env = append(os.Environ(), "GOOS="+runtime.GOOS, "GOARCH="+runtime.GOARCH) + } + + // Run build + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("build failed with exit code: %w", err) + } + + logger.Success("Build successful: %s", outputPath) + return nil +} + +// copyAssets copies required assets to the dist directory +func (ctx *BuildContext) copyAssets(logger *Logger) error { + logger.Info("Copying assets...") + + assets := []struct { + src string + dst string + }{ + {WEB_DIR, filepath.Join(ctx.DistPath, WEB_DIR)}, + {CONFIG_YML, filepath.Join(ctx.DistPath, CONFIG_YML)}, + {BANNER_TXT, filepath.Join(ctx.DistPath, BANNER_TXT)}, + {DB_FILE, filepath.Join(ctx.DistPath, DB_FILE)}, + } + + for _, asset := range assets { + if _, err := os.Stat(asset.src); os.IsNotExist(err) { + continue + } + + if strings.HasSuffix(asset.src, "/") || isDir(asset.src) { + if err := copyDir(asset.src, asset.dst); err != nil { + logger.Warn("Failed to copy %s: %v", asset.src, err) + } else { + logger.Success("Copying %s", asset.src) + } + } else { + if err := copyFile(asset.src, asset.dst); err != nil { + logger.Warn("Failed to copy %s: %v", asset.src, err) + } else { + logger.Success("Copying %s", asset.src) + } + } + } + + return nil +} + +// copyFile copies a single file +func copyFile(src, dst string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.Create(dst) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + return err +} + +// isDir checks if a path is a directory +func isDir(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + return info.IsDir() +} + +// printBanner prints the application banner +func printBanner() { + fmt.Println("") + fmt.Println(" " + P_PURPLE + " /\\ " + RESET) + fmt.Println(" " + P_PURPLE + "( )" + RESET + " " + B_PURPLE + APP_NAME + " Builder" + RESET + " " + GRAY + "by" + RESET + " " + B_WHITE + "diameter-tscd" + RESET) + fmt.Println(" " + P_PURPLE + " \\/ " + RESET) + fmt.Println(GRAY + "----------------------------------------------------------------------" + RESET) +} + +// printSuccess prints the success message +func printSuccess(distPath string) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SUCCESS!" + RESET + " " + P_GREEN + "Build ready at:" + RESET + " " + UNDERLINE + B_WHITE + distPath + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) +} + +// setupSignalHandler sets up graceful shutdown on interrupt +func setupSignalHandler(cancel context.CancelFunc) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigChan + fmt.Println("\nReceived interrupt signal. Exiting...") + cancel() + os.Exit(1) + }() +} + +// main function +func main() { + // Parse command line flags + var ( + timeoutSeconds = flag.Int("timeout", 10, "Timeout for user prompts in seconds") + verbose = flag.Bool("verbose", false, "Enable verbose logging") + ) + flag.Parse() + + // Initialize logger + logger := NewLogger(*verbose) + + // Print banner + printBanner() + + // Get project directory + projectDir, err := os.Getwd() + if err != nil { + logger.Error("Failed to get current directory: %v", err) + os.Exit(1) + } + + // Create build context + ctx := &BuildContext{ + Config: BuildConfig{ + Timeout: time.Duration(*timeoutSeconds) * time.Second, + Verbose: *verbose, + }, + Timestamp: time.Now().Format("20060102_150405"), + DistPath: filepath.Join(projectDir, DIST_DIR), + ProjectDir: projectDir, + } + + // Create context with cancellation for graceful shutdown + _, cancel := context.WithCancel(context.Background()) + setupSignalHandler(cancel) + + // Ensure dist directory exists + if err := os.MkdirAll(ctx.DistPath, 0755); err != nil { + logger.Error("Failed to create dist directory: %v", err) + os.Exit(1) + } + + // Execute build steps + steps := []struct { + name string + fn func(*Logger) error + }{ + {"Checking required tools", ctx.checkRequiredTools}, + {"Asking user about garble", ctx.askUserAboutGarble}, + {"Stopping running process", ctx.stopRunningProcess}, + {"Creating backup", ctx.createBackup}, + {"Archiving backup", ctx.archiveBackup}, + {"Building application", ctx.buildApplication}, + {"Copying assets", ctx.copyAssets}, + } + + for i, step := range steps { + stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) + fmt.Printf("%s[%s]%s %s%s%s\n", B_PURPLE, stepNum, RESET, P_CYAN, step.name, RESET) + + if err := step.fn(logger); err != nil { + logger.Error("Step failed: %v", err) + os.Exit(1) + } + } + + // Print success message + printSuccess(ctx.DistPath) +} diff --git a/scripts/build.sh b/scripts/build.sh deleted file mode 100755 index 7c58fef..0000000 --- a/scripts/build.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash - -# Clear the terminal screen -clear - -# Configuration -DIST_DIR="dist" -APP_NAME="stackyard" -MAIN_PATH="./cmd/app/main.go" - -# Define ANSI Colors -RESET="\033[0m" -BOLD="\033[1m" -DIM="\033[2m" -UNDERLINE="\033[4m" - -# Fancy Pastel Palette (main color: #8daea5) -P_PURPLE="\033[38;5;108m" -B_PURPLE="\033[1;38;5;108m" -P_CYAN="\033[38;5;117m" -B_CYAN="\033[1;38;5;117m" -P_GREEN="\033[38;5;108m" -B_GREEN="\033[1;38;5;108m" -P_YELLOW="\033[93m" -B_YELLOW="\033[1;93m" -P_RED="\033[91m" -B_RED="\033[1;91m" -GRAY="\033[38;5;242m" -WHITE="\033[97m" -B_WHITE="\033[1;97m" - -# Robustly switch to project root (one level up from this script) -cd "$(dirname "$0")/.." || exit 1 - -echo "" -echo -e " ${P_PURPLE} /\ ${RESET}" -echo -e " ${P_PURPLE}( )${RESET} ${B_PURPLE}${APP_NAME} Builder${RESET} ${GRAY}by${RESET} ${B_WHITE}diameter-tscd${RESET}" -echo -e " ${P_PURPLE} \/ ${RESET}" -echo -e "${GRAY}----------------------------------------------------------------------${RESET}" - -# 0. Check required tools -echo -e "${B_PURPLE}[1/6]${RESET} ${P_CYAN}Checking required tools...${RESET}" - -# Check goversioninfo -if ! command -v goversioninfo &> /dev/null; then - echo -e " ${B_YELLOW}! goversioninfo not found. Skipping version info generation.${RESET}" - USE_GOVERSIONINFO=false -else - echo -e " ${B_GREEN}+ goversioninfo found${RESET}" - USE_GOVERSIONINFO=true -fi - -# Check garble -if ! command -v garble &> /dev/null; then - echo -e " ${B_YELLOW}! garble not found. Installing...${RESET}" - go install mvdan.cc/garble@latest - if [ $? -ne 0 ]; then - echo -e " ${B_RED}x Failed to install garble${RESET}" - exit 1 - fi - echo -e " ${B_GREEN}+ garble installed${RESET}" -else - echo -e " ${B_GREEN}+ garble found${RESET}" -fi - -# Ask user about garble build -echo -e "${B_YELLOW}Use garble build for obfuscation? (y/N, timeout 10s): ${RESET}" -read -t 10 -n 1 -r choice -echo "" -if [[ $choice =~ ^[Yy]$ ]]; then - USE_GARBLE=true - echo -e "${B_GREEN}+ Using garble build${RESET}" -else - USE_GARBLE=false - echo -e "${B_CYAN}+ Using regular go build${RESET}" -fi - -# 1. Generate Timestamp -TIMESTAMP=$(date +"%Y%m%d_%H%M%S") -BACKUP_ROOT="${DIST_DIR}/backups" -BACKUP_PATH="${BACKUP_ROOT}/${TIMESTAMP}" - -# 2. Stop running process -echo -e "${B_PURPLE}[2/6]${RESET} ${P_CYAN}Checking for running process...${RESET}" -if pgrep -x "$APP_NAME" >/dev/null; then - echo -e " ${B_YELLOW}! App is running. Stopping...${RESET}" - pkill -x "$APP_NAME" - sleep 1 -else - echo -e " ${B_GREEN}+ App is not running.${RESET}" -fi - -# 3. Backup Old Files -echo -e "${B_PURPLE}[3/6]${RESET} ${P_CYAN}Backing up old files...${RESET}" -if [ -d "$DIST_DIR" ]; then - mkdir -p "$BACKUP_PATH" - - # Move old binary (check for both plain and .exe just in case) - if [ -f "$DIST_DIR/$APP_NAME" ]; then - echo -e " ${GRAY}- Moving old binary...${RESET}" - mv "$DIST_DIR/$APP_NAME" "$BACKUP_PATH/" - elif [ -f "$DIST_DIR/$APP_NAME.exe" ]; then - echo -e " ${GRAY}- Moving old binary (.exe)...${RESET}" - mv "$DIST_DIR/$APP_NAME.exe" "$BACKUP_PATH/" - fi - - if [ -f "$DIST_DIR/config.yaml" ]; then - mv "$DIST_DIR/config.yaml" "$BACKUP_PATH/" - fi - if [ -f "$DIST_DIR/banner.txt" ]; then - mv "$DIST_DIR/banner.txt" "$BACKUP_PATH/" - fi - if [ -f "$DIST_DIR/monitoring_users.db" ]; then - echo -e " ${GRAY}- Backing up database...${RESET}" - mv "$DIST_DIR/monitoring_users.db" "$BACKUP_PATH/" - fi - if [ -d "$DIST_DIR/web" ]; then - echo -e " ${GRAY}- Moving old web assets...${RESET}" - mv "$DIST_DIR/web" "$BACKUP_PATH/" - fi - - echo -e " ${B_GREEN}+ Backup created at:${RESET} ${B_WHITE}${BACKUP_PATH}${RESET}" -else - echo -e " ${GRAY}+ No existing dist directory. Skipping backup.${RESET}" - mkdir -p "$DIST_DIR" -fi - -# 6. Archive Backup -echo -e "${B_PURPLE}[4/6]${RESET} ${P_CYAN}Archiving backup...${RESET}" -if [ -d "$BACKUP_PATH" ]; then - cd "$BACKUP_ROOT" || exit 1 - zip -r "${TIMESTAMP}.zip" "$TIMESTAMP" - rm -rf "$TIMESTAMP" - cd - >/dev/null || exit 1 # Return to previous directory - echo -e " ${B_GREEN}+ Backup archived:${RESET} ${B_WHITE}${BACKUP_ROOT}/${TIMESTAMP}.zip${RESET}" -else - echo -e " ${GRAY}+ No backup created. Skipping archive.${RESET}" -fi - -# Ensure dist directory -mkdir -p "$DIST_DIR" - -# 4. Build -echo -e "${B_PURPLE}[5/6]${RESET} ${P_CYAN}Building Go binary...${RESET}" -if [ "$USE_GOVERSIONINFO" = true ]; then - goversioninfo -platform-specific -else - echo -e " ${GRAY}+ Skipping goversioninfo (not available)${RESET}" -fi -if [ "$USE_GARBLE" = true ]; then - garble build -ldflags="-s -w" -o "$DIST_DIR/$APP_NAME" "$MAIN_PATH" -else - go build -ldflags="-s -w" -o "$DIST_DIR/$APP_NAME" "$MAIN_PATH" -fi -if [ $? -ne 0 ]; then - echo -e " ${B_RED}x Build FAILED! Exit code: $?${RESET}" - exit $? -fi -echo -e " ${B_GREEN}+ Build successful:${RESET} ${B_WHITE}${DIST_DIR}/${APP_NAME}${RESET}" - -# 5. Copy Assets -echo -e "${B_PURPLE}[6/6]${RESET} ${P_CYAN}Copying assets...${RESET}" - -if [ -d "web" ]; then - echo -e " ${B_GREEN}+ Copying web folder...${RESET}" - cp -r "web" "$DIST_DIR/web" -fi - -if [ -f "config.yaml" ]; then - echo -e " ${B_GREEN}+ Copying config.yaml...${RESET}" - cp "config.yaml" "$DIST_DIR/" -fi - -if [ -f "banner.txt" ]; then - echo -e " ${B_GREEN}+ Copying banner.txt...${RESET}" - cp "banner.txt" "$DIST_DIR/" -fi - -if [ -f "monitoring_users.db" ]; then - echo -e " ${B_GREEN}+ Copying monitoring_users.db...${RESET}" - cp "monitoring_users.db" "$DIST_DIR/" -fi - -echo "" -echo -e "${GRAY}======================================================================${RESET}" -echo -e " ${B_PURPLE}SUCCESS!${RESET} ${P_GREEN}Build ready at:${RESET} ${UNDERLINE}${B_WHITE}${DIST_DIR}/${RESET}" -echo -e "${GRAY}======================================================================${RESET}" From 486a08e67945118f6a5462a8b5ff37b608d4a871 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Tue, 17 Mar 2026 06:44:58 +0700 Subject: [PATCH 05/18] refactor: migrate Docker build scripts to Go-based cross-platform implementation - Replaced shell/batch scripts with Go-based wrapper for better cross-platform compatibility - Added comprehensive Docker containerization with multi-stage builds - Updated project metadata to reflect Stackyard branding - Enhanced build process with distroless production images for security and size optimization --- docs_wiki/blueprint/blueprint.txt | 6 +- go.mod | 2 +- scripts/{ => build}/build.go | 0 scripts/change_package.bat | 82 ----- scripts/change_package.sh | 57 ---- scripts/docker/docker_build.go | 548 ++++++++++++++++++++++++++++++ scripts/docker_build.bat | 335 ------------------ scripts/docker_build.sh | 250 -------------- scripts/onboarding.bat | 344 ------------------- scripts/onboarding.sh | 319 ----------------- versioninfo.json | 2 +- 11 files changed, 555 insertions(+), 1390 deletions(-) rename scripts/{ => build}/build.go (100%) delete mode 100644 scripts/change_package.bat delete mode 100644 scripts/change_package.sh create mode 100644 scripts/docker/docker_build.go delete mode 100644 scripts/docker_build.bat delete mode 100644 scripts/docker_build.sh delete mode 100644 scripts/onboarding.bat delete mode 100644 scripts/onboarding.sh diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index a6b5484..05169ef 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -904,8 +904,9 @@ The project includes comprehensive Docker containerization with multi-stage buil - **Ultra-Production Stage**: Distroless image for maximum security and minimal size **Build Scripts:** -- **Unix/Linux/macOS**: `scripts/docker_build.sh` +- **Unix/Linux/macOS**: `scripts/docker_build.sh` (Go-based wrapper) - **Windows**: `scripts/docker_build.bat` +- **Go Implementation**: `scripts/docker/main.go` (cross-platform core) **Usage:** ```bash @@ -917,6 +918,9 @@ The project includes comprehensive Docker containerization with multi-stage buil # Build specific target (prod for production only) ./scripts/docker_build.sh "my-app" "myregistry/myapp" "prod" + +# Direct Go execution +go run scripts/docker/main.go "my-app" "myregistry/myapp" "prod" ``` **Container Configuration:** diff --git a/go.mod b/go.mod index 53aca3a..66043c8 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/rs/zerolog v1.34.0 github.com/shirou/gopsutil/v3 v3.24.5 + github.com/spf13/afero v1.15.0 github.com/spf13/viper v1.21.0 go.mongodb.org/mongo-driver v1.17.6 golang.org/x/crypto v0.46.0 @@ -90,7 +91,6 @@ require ( github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/subosito/gotenv v1.6.0 // indirect diff --git a/scripts/build.go b/scripts/build/build.go similarity index 100% rename from scripts/build.go rename to scripts/build/build.go diff --git a/scripts/change_package.bat b/scripts/change_package.bat deleted file mode 100644 index 007546d..0000000 --- a/scripts/change_package.bat +++ /dev/null @@ -1,82 +0,0 @@ -@echo off -setlocal enabledelayedexpansion - -:: Define ANSI Colors -set "RESET=[0m" -set "BOLD=[1m" -set "DIM=[2m" -set "UNDERLINE=[4m" - -:: Fancy Pastel Palette (main color: #8daea5) -set "P_PURPLE=[38;5;108m" -set "B_PURPLE=[1;38;5;108m" -set "P_CYAN=[38;5;117m" -set "B_CYAN=[1;38;5;117m" -set "P_GREEN=[38;5;108m" -set "B_GREEN=[1;38;5;108m" -set "P_YELLOW=[93m" -set "B_YELLOW=[1;93m" -set "P_RED=[91m" -set "B_RED=[1;91m" -set "GRAY=[38;5;242m" -set "WHITE=[97m" -set "B_WHITE=[1;97m" - -:: Script to change the Go module package name -echo. -echo %P_PURPLE% /\ %RESET% -echo %P_PURPLE%( )%RESET% %B_PURPLE%Package Name Changer%RESET% %GRAY%by%RESET% %B_WHITE%diameter-tscd%RESET% -echo %P_PURPLE% \/ %RESET% -echo %GRAY%----------------------------------------------------------------------%RESET% - -REM Script to change the Go module package name -REM Usage: change_package.bat - -if "%~1"=="" ( - echo Usage: %0 ^ - echo Example: %0 github.com/user/new-project - exit /b 1 -) - -set NEW_MODULE=%~1 - -REM Get current module name from go.mod -for /f "tokens=2" %%i in ('findstr "^module " go.mod') do set CURRENT_MODULE=%%i - -if "%CURRENT_MODULE%"=="" ( - echo Error: Could not find module declaration in go.mod - exit /b 1 -) - -echo Changing module from '%CURRENT_MODULE%' to '%NEW_MODULE%' - -REM Change module name in go.mod -set "search=module %CURRENT_MODULE%" -set "replace=module %NEW_MODULE%" -set "tempfile=%temp%\go_mod_temp.txt" -(for /f "delims=" %%i in (go.mod) do ( - set "line=%%i" - set "line=!line:%search%=%replace%!" - echo !line! -)) > "%tempfile%" -move "%tempfile%" go.mod >nul - -if %ERRORLEVEL% neq 0 ( - echo Error: Failed to update go.mod - exit /b 1 -) - -REM Update all import paths in .go files -for /r %%f in (*.go) do ( - set "tempfile=%%f.tmp" - (for /f "delims=" %%i in ("%%f") do ( - set "line=%%i" - set "line=!line:%CURRENT_MODULE%=%NEW_MODULE%!" - echo !line! - )) > "!tempfile!" - move "!tempfile!" "%%f" >nul -) - -echo Successfully changed module name and updated imports -echo Note: No backup files created in this version. Make sure to commit changes before running. -endlocal diff --git a/scripts/change_package.sh b/scripts/change_package.sh deleted file mode 100644 index f9c726e..0000000 --- a/scripts/change_package.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -# Define ANSI Colors -RESET="\033[0m" -BOLD="\033[1m" -DIM="\033[2m" -UNDERLINE="\033[4m" - -# Fancy Pastel Palette (main color: #8daea5) -P_PURPLE="\033[38;5;108m" -B_PURPLE="\033[1;38;5;108m" -P_CYAN="\033[38;5;117m" -B_CYAN="\033[1;38;5;117m" -P_GREEN="\033[38;5;108m" -B_GREEN="\033[1;38;5;108m" -P_YELLOW="\033[93m" -B_YELLOW="\033[1;93m" -P_RED="\033[91m" -B_RED="\033[1;91m" -GRAY="\033[38;5;242m" -WHITE="\033[97m" -B_WHITE="\033[1;97m" - -# Script to change the Go module package name -# Usage: ./change_package.sh - -if [ $# -ne 1 ]; then - echo "Usage: $0 " - echo "Example: $0 github.com/user/new-project" - exit 1 -fi - -NEW_MODULE=$1 - -# Get current module name from go.mod -CURRENT_MODULE=$(grep '^module ' go.mod | awk '{print $2}') - -if [ -z "$CURRENT_MODULE" ]; then - echo "Error: Could not find module declaration in go.mod" - exit 1 -fi - -echo "Changing module from '$CURRENT_MODULE' to '$NEW_MODULE'" - -# Change module name in go.mod -sed -i.bak "s|^module $CURRENT_MODULE|module $NEW_MODULE|" go.mod - -if [ $? -ne 0 ]; then - echo "Error: Failed to update go.mod" - exit 1 -fi - -# Update all import paths in .go files -find . -name "*.go" -type f -exec sed -i.bak "s|$CURRENT_MODULE|$NEW_MODULE|g" {} + - -echo "Successfully changed module name and updated imports" -echo "Note: Backup files (*.bak) have been created. You can remove them if everything looks good." diff --git a/scripts/docker/docker_build.go b/scripts/docker/docker_build.go new file mode 100644 index 0000000..e32803c --- /dev/null +++ b/scripts/docker/docker_build.go @@ -0,0 +1,548 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" +) + +// Configuration constants +const ( + DEFAULT_APP_NAME = "stackyard" + DEFAULT_IMAGE_NAME = "myapp" + DEFAULT_TARGET = "all" +) + +// ANSI Colors +const ( + RESET = "\033[0m" + BOLD = "\033[1m" + DIM = "\033[2m" + UNDERLINE = "\033[4m" + + // Pastel Palette (main color: #8daea5) + P_PURPLE = "\033[38;5;108m" + B_PURPLE = "\033[1;38;5;108m" + P_CYAN = "\033[38;5;117m" + B_CYAN = "\033[1;38;5;117m" + P_GREEN = "\033[38;5;108m" + B_GREEN = "\033[1;38;5;108m" + P_YELLOW = "\033[93m" + B_YELLOW = "\033[1;93m" + P_RED = "\033[91m" + B_RED = "\033[1;91m" + GRAY = "\033[38;5;242m" + WHITE = "\033[97m" + B_WHITE = "\033[1;97m" +) + +// Docker build configuration +type DockerBuildConfig struct { + AppName string + ImageName string + Target string + Verbose bool +} + +// Docker build context +type DockerBuildContext struct { + Config DockerBuildConfig + ProjectDir string + Step int + TotalSteps int +} + +// Docker logger for structured output +type DockerLogger struct { + verbose bool +} + +func (l *DockerLogger) Info(msg string, args ...interface{}) { + fmt.Printf("%s[INFO]%s %s\n", B_CYAN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *DockerLogger) Warn(msg string, args ...interface{}) { + fmt.Printf("%s[WARN]%s %s\n", B_YELLOW, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *DockerLogger) Error(msg string, args ...interface{}) { + fmt.Printf("%s[ERROR]%s %s\n", B_RED, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *DockerLogger) Debug(msg string, args ...interface{}) { + if l.verbose { + fmt.Printf("%s[DEBUG]%s %s\n", GRAY, RESET, fmt.Sprintf(msg, args...)) + } +} + +func (l *DockerLogger) Success(msg string, args ...interface{}) { + fmt.Printf("%s[SUCCESS]%s %s\n", B_GREEN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *DockerLogger) Step(stepNum, totalSteps int, msg string, args ...interface{}) { + fmt.Printf("%s[%d/%d]%s %s%s%s\n", B_PURPLE, stepNum, totalSteps, RESET, P_CYAN, fmt.Sprintf(msg, args...), RESET) +} + +// NewDockerLogger creates a new logger +func NewDockerLogger(verbose bool) *DockerLogger { + return &DockerLogger{verbose: verbose} +} + +// printDockerBanner prints the Docker build banner +func printDockerBanner(appName, imageName, target string) { + fmt.Println("") + fmt.Println(" " + P_PURPLE + " /\\ " + RESET) + fmt.Println(" " + P_PURPLE + "( )" + RESET + " " + B_PURPLE + "Docker Builder" + RESET + " " + GRAY + "by" + RESET + " " + B_WHITE + "diameter-tscd" + RESET) + fmt.Println(" " + P_PURPLE + " \\/ " + RESET) + fmt.Println(GRAY + "----------------------------------------------------------------------" + RESET) + fmt.Println(" " + B_CYAN + "App Name:" + RESET + " " + B_WHITE + appName + RESET) + fmt.Println(" " + B_CYAN + "Image Name:" + RESET + " " + B_WHITE + imageName + RESET) + fmt.Println(" " + B_CYAN + "Target:" + RESET + " " + B_WHITE + target + RESET) + fmt.Println(GRAY + "----------------------------------------------------------------------" + RESET) +} + +// printDockerSuccess prints the Docker build success message +func printDockerSuccess(logger *DockerLogger, imageName, target string) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SUCCESS!" + RESET + " " + P_GREEN + "Docker images ready:" + RESET) + + // Show only the images that were actually built + if target == "test" || target == "all" || target == "ultra-test" || target == "ultra-all" { + fmt.Println(" " + B_WHITE + imageName + ":test" + RESET + " " + GRAY + "(testing)" + RESET) + } + if target == "dev" || target == "all" || target == "ultra-dev" || target == "ultra-all" { + fmt.Println(" " + B_WHITE + imageName + ":dev" + RESET + " " + GRAY + "(development)" + RESET) + } + if target == "prod" || target == "all" { + fmt.Println(" " + B_WHITE + imageName + ":latest" + RESET + " " + GRAY + "(production)" + RESET) + } + if target == "prod-slim" { + fmt.Println(" " + B_WHITE + imageName + ":slim" + RESET + " " + GRAY + "(slim-production)" + RESET) + } + if target == "prod-minimal" { + fmt.Println(" " + B_WHITE + imageName + ":minimal" + RESET + " " + GRAY + "(minimal-production)" + RESET) + } + if target == "ultra-prod" || target == "ultra-all" { + fmt.Println(" " + B_WHITE + imageName + ":ultra" + RESET + " " + GRAY + "(ultra-production)" + RESET) + } + + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println("") + fmt.Println(B_CYAN + "Usage examples:" + RESET) + + // Show relevant usage examples based on what was built + if target == "dev" || target == "all" { + fmt.Println(" " + GRAY + "# Run development container" + RESET) + fmt.Println(" " + B_WHITE + "docker run -p 8080:8080 -p 9090:9090 " + imageName + ":dev" + RESET) + fmt.Println("") + } + + if target == "prod" || target == "all" { + fmt.Println(" " + GRAY + "# Run production container" + RESET) + fmt.Println(" " + B_WHITE + "docker run -p 8080:8080 -p 9090:9090 " + imageName + ":latest" + RESET) + fmt.Println("") + } + + if target == "test" || target == "all" { + fmt.Println(" " + GRAY + "# Run tests" + RESET) + fmt.Println(" " + B_WHITE + "docker run --rm " + imageName + ":test" + RESET) + } +} + +// validateTarget validates the build target +func validateTarget(target string) error { + validTargets := []string{ + "all", "test", "dev", "prod", "prod-slim", "prod-minimal", + "ultra-prod", "ultra-all", "ultra-dev", "ultra-test", + } + + for _, valid := range validTargets { + if target == valid { + return nil + } + } + + return fmt.Errorf("invalid target: %s. Valid targets: %s", target, strings.Join(validTargets, ", ")) +} + +// calculateTotalSteps calculates the total number of steps based on target +func calculateTotalSteps(target string) int { + switch target { + case "all", "ultra-all": + return 4 + case "test", "ultra-test": + return 2 + case "dev", "ultra-dev", "prod", "ultra-prod": + return 1 + default: + return 1 + } +} + +// checkDockerfile checks if Dockerfile exists +func (ctx *DockerBuildContext) checkDockerfile(logger *DockerLogger) error { + dockerfilePath := filepath.Join(ctx.ProjectDir, "Dockerfile") + if _, err := os.Stat(dockerfilePath); os.IsNotExist(err) { + logger.Error("Dockerfile not found in current directory") + return err + } + return nil +} + +// checkDocker checks if Docker is available +func (ctx *DockerBuildContext) checkDocker(logger *DockerLogger) error { + cmd := exec.Command("docker", "version", "--format", "{{.Server.Os}}") + if err := cmd.Run(); err != nil { + logger.Error("Docker is not installed or not in PATH") + return err + } + return nil +} + +// buildTestStage builds the test stage +func (ctx *DockerBuildContext) buildTestStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building test image...") + + cmd := exec.Command("docker", "build", "--target", "test", "-t", imageName+":test", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Test build failed") + return err + } + + logger.Success("Test image built: %s", imageName+":test") + return nil +} + +// runTests runs the tests in the test container +func (ctx *DockerBuildContext) runTests(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Running tests...") + + cmd := exec.Command("docker", "run", "--rm", imageName+":test") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Tests failed") + return err + } + + logger.Success("Tests passed") + return nil +} + +// buildDevStage builds the development stage +func (ctx *DockerBuildContext) buildDevStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building development image...") + + cmd := exec.Command("docker", "build", "--target", "dev", "-t", imageName+":dev", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Development build failed") + return err + } + + logger.Success("Development image built: %s", imageName+":dev") + return nil +} + +// buildUltraDevStage builds the ultra development stage +func (ctx *DockerBuildContext) buildUltraDevStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building ultra development image...") + + cmd := exec.Command("docker", "build", "--target", "ultra-dev", "-t", imageName+":dev", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Ultra development build failed") + return err + } + + logger.Success("Ultra development image built: %s", imageName+":dev") + return nil +} + +// buildProdStage builds the production stage +func (ctx *DockerBuildContext) buildProdStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building production image...") + + cmd := exec.Command("docker", "build", "--target", "prod", "-t", imageName+":latest", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Production build failed") + return err + } + + logger.Success("Production image built: %s", imageName+":latest") + return nil +} + +// buildSlimProdStage builds the slim production stage +func (ctx *DockerBuildContext) buildSlimProdStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building slim production image...") + + cmd := exec.Command("docker", "build", "--target", "prod-slim", "-t", imageName+":slim", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Slim production build failed") + return err + } + + logger.Success("Slim production image built: %s", imageName+":slim") + return nil +} + +// buildMinimalProdStage builds the minimal production stage +func (ctx *DockerBuildContext) buildMinimalProdStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building minimal production image...") + + cmd := exec.Command("docker", "build", "--target", "prod-minimal", "-t", imageName+":minimal", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Minimal production build failed") + return err + } + + logger.Success("Minimal production image built: %s", imageName+":minimal") + return nil +} + +// buildUltraProdStage builds the ultra production stage +func (ctx *DockerBuildContext) buildUltraProdStage(logger *DockerLogger, imageName string) error { + ctx.Step++ + logger.Step(ctx.Step, ctx.TotalSteps, "Building ultra production image...") + + cmd := exec.Command("docker", "build", "--target", "ultra-prod", "-t", imageName+":ultra", ".") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Error("Ultra production build failed") + return err + } + + logger.Success("Ultra production image built: %s", imageName+":ultra") + return nil +} + +// cleanupDanglingImages cleans up intermediate images +func (ctx *DockerBuildContext) cleanupDanglingImages(logger *DockerLogger) error { + logger.Step(ctx.Step, ctx.TotalSteps, "Cleaning up dangling images...") + + cmd := exec.Command("docker", "image", "prune", "-f") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + logger.Debug("Cleanup skipped: %v", err) + return nil + } + + logger.Success("Cleanup completed") + return nil +} + +// setupSignalHandler sets up graceful shutdown on interrupt +func setupSignalHandler(cancel context.CancelFunc) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigChan + fmt.Println("\nReceived interrupt signal. Exiting...") + cancel() + os.Exit(1) + }() +} + +// main function +func main() { + // Parse command line flags + var verbose = flag.Bool("verbose", false, "Enable verbose logging") + flag.Parse() + + // Parse arguments + args := flag.Args() + appName := DEFAULT_APP_NAME + imageName := DEFAULT_IMAGE_NAME + target := DEFAULT_TARGET + + if len(args) > 0 { + appName = args[0] + } + if len(args) > 1 { + imageName = args[1] + } + if len(args) > 2 { + target = args[2] + } + + // Initialize logger + logger := NewDockerLogger(*verbose) + + // Clear the terminal screen + fmt.Print("\033[H\033[2J") + + // Get project directory + projectDir, err := os.Getwd() + if err != nil { + logger.Error("Failed to get current directory: %v", err) + os.Exit(1) + } + + // Create build context + ctx := &DockerBuildContext{ + Config: DockerBuildConfig{ + AppName: appName, + ImageName: imageName, + Target: target, + Verbose: *verbose, + }, + ProjectDir: projectDir, + Step: 0, + } + + // Print banner + printDockerBanner(appName, imageName, target) + + // Create context with cancellation for graceful shutdown + _, cancel := context.WithCancel(context.Background()) + setupSignalHandler(cancel) + + // Validate target + if err := validateTarget(target); err != nil { + logger.Error("%v", err) + os.Exit(1) + } + + // Set total steps + ctx.TotalSteps = calculateTotalSteps(target) + + // Execute build steps + steps := []struct { + name string + fn func(*DockerLogger, string) error + }{ + // Test stage (always needed for test target or all) + {"Building test image", func(l *DockerLogger, img string) error { + if target == "test" || target == "all" || target == "ultra-test" || target == "ultra-all" { + return ctx.buildTestStage(l, img) + } + return nil + }}, + + // Run tests (only for test target or all) + {"Running tests", func(l *DockerLogger, img string) error { + if target == "test" || target == "all" || target == "ultra-test" || target == "ultra-all" { + return ctx.runTests(l, img) + } + return nil + }}, + + // Development stage + {"Building development image", func(l *DockerLogger, img string) error { + if target == "dev" || target == "all" { + return ctx.buildDevStage(l, img) + } + return nil + }}, + + // Ultra development stage + {"Building ultra development image", func(l *DockerLogger, img string) error { + if target == "ultra-dev" || target == "ultra-all" { + return ctx.buildUltraDevStage(l, img) + } + return nil + }}, + + // Production stage + {"Building production image", func(l *DockerLogger, img string) error { + if target == "prod" || target == "all" { + return ctx.buildProdStage(l, img) + } + return nil + }}, + + // Slim production stage + {"Building slim production image", func(l *DockerLogger, img string) error { + if target == "prod-slim" { + return ctx.buildSlimProdStage(l, img) + } + return nil + }}, + + // Minimal production stage + {"Building minimal production image", func(l *DockerLogger, img string) error { + if target == "prod-minimal" { + return ctx.buildMinimalProdStage(l, img) + } + return nil + }}, + + // Ultra production stage (for ultra-all) + {"Building ultra production image", func(l *DockerLogger, img string) error { + if target == "ultra-all" { + return ctx.buildUltraProdStage(l, img) + } + return nil + }}, + + // Ultra production stage (ultra slim) + {"Building ultra-production image", func(l *DockerLogger, img string) error { + if target == "ultra-prod" { + return ctx.buildUltraProdStage(l, img) + } + return nil + }}, + + // Cleanup + {"Cleaning up dangling images", func(l *DockerLogger, img string) error { + return ctx.cleanupDanglingImages(l) + }}, + } + + // Execute validation steps first + if err := ctx.checkDockerfile(logger); err != nil { + os.Exit(1) + } + + if err := ctx.checkDocker(logger); err != nil { + os.Exit(1) + } + + // Execute build steps + for _, step := range steps { + if err := step.fn(logger, imageName); err != nil { + logger.Error("Step failed: %v", err) + os.Exit(1) + } + } + + // Print success message + printDockerSuccess(logger, imageName, target) +} diff --git a/scripts/docker_build.bat b/scripts/docker_build.bat deleted file mode 100644 index 2730018..0000000 --- a/scripts/docker_build.bat +++ /dev/null @@ -1,335 +0,0 @@ -@echo off -cls - -setlocal EnableDelayedExpansion -set "DEFAULT_APP_NAME=stackyard" -set "DEFAULT_IMAGE_NAME=myapp" -set "DEFAULT_TARGET=all" - -REM Configuration from parameters or defaults -if "%~1"=="" ( - set "APP_NAME=%DEFAULT_APP_NAME%" -) else ( - set "APP_NAME=%~1" -) - -if "%~2"=="" ( - set "IMAGE_NAME=%DEFAULT_IMAGE_NAME%" -) else ( - set "IMAGE_NAME=%~2" -) - -if "%~3"=="" ( - set "TARGET=%DEFAULT_TARGET%" -) else ( - set "TARGET=%~3" -) - -:: Define ANSI Colors -for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( - set "ESC=%%b" -) -set "RESET=%ESC%[0m" -set "BOLD=%ESC%[1m" -set "DIM=%ESC%[2m" -set "UNDERLINE=%ESC%[4m" - -:: Fancy Pastel Palette (main color: #8daea5) -set "P_PURPLE=%ESC%[38;5;108m" -set "B_PURPLE=%ESC%[1;38;5;108m" -set "P_CYAN=%ESC%[38;5;117m" -set "B_CYAN=%ESC%[1;38;5;117m" -set "P_GREEN=%ESC%[38;5;108m" -set "B_GREEN=%ESC%[1;38;5;108m" -set "P_YELLOW=%ESC%[93m" -set "B_YELLOW=%ESC%[1;93m" -set "P_RED=%ESC%[91m" -set "B_RED=%ESC%[1;91m" -set "GRAY=%ESC%[38;5;242m" -set "WHITE=%ESC%[97m" -set "B_WHITE=%ESC%[1;97m" - -:: Robustly switch to project root -cd /d "%~dp0.." - -echo. -echo %P_PURPLE% /\ %RESET% -echo %P_PURPLE%( )%RESET% %B_PURPLE%Docker Builder%RESET% %GRAY%by%RESET% %B_WHITE%diameter-tscd%RESET% -echo %P_PURPLE% \/ %RESET% -echo %GRAY%----------------------------------------------------------------------%RESET% -echo %B_CYAN%App Name:%RESET% %B_WHITE%%APP_NAME%%RESET% -echo %B_CYAN%Image Name:%RESET% %B_WHITE%%IMAGE_NAME%%RESET% -echo %B_CYAN%Target:%RESET% %B_WHITE%%TARGET%%RESET% -echo %GRAY%----------------------------------------------------------------------%RESET% - -REM Check if Dockerfile exists -if not exist "Dockerfile" ( - echo %B_RED%x Dockerfile not found in current directory%RESET% - exit /b 1 -) - -REM Check if docker is available -docker --version >nul 2>&1 -if %ERRORLEVEL% neq 0 ( - echo %B_RED%x Docker is not installed or not in PATH%RESET% - exit /b 1 -) - -REM Validate target -if "%TARGET%"=="all" goto :valid_target -if "%TARGET%"=="test" goto :valid_target -if "%TARGET%"=="dev" goto :valid_target -if "%TARGET%"=="prod" goto :valid_target -if "%TARGET%"=="prod-slim" goto :valid_target -if "%TARGET%"=="prod-minimal" goto :valid_target -if "%TARGET%"=="ultra-prod" goto :valid_target -if "%TARGET%"=="ultra-all" goto :valid_target -if "%TARGET%"=="ultra-dev" goto :valid_target -if "%TARGET%"=="ultra-test" goto :valid_target -echo %B_RED%x Invalid target: %TARGET%%RESET% -echo %B_CYAN%Valid targets: all, test, dev, prod, prod-slim, prod-minimal, ultra-prod, ultra-all, ultra-dev, ultra-test%RESET% -exit /b 1 - -:valid_target - -REM Initialize step counter -set STEP=1 - -REM Calculate total steps -if "%TARGET%"=="all" ( - set TOTAL_STEPS=4 -) -if "%TARGET%"=="test" ( - set TOTAL_STEPS=2 -) -if "%TARGET%"=="dev" ( - set TOTAL_STEPS=1 -) -if "%TARGET%"=="prod" ( - set TOTAL_STEPS=1 -) -if "%TARGET%"=="ultra-prod" ( - set TOTAL_STEPS=1 -) -if "%TARGET%"=="ultra-all" ( - set TOTAL_STEPS=4 -) -if "%TARGET%"=="ultra-dev" ( - set TOTAL_STEPS=1 -) -if "%TARGET%"=="ultra-test" ( - set TOTAL_STEPS=2 -) - -REM Build Test Stage (always needed for test target or all) -if "%TARGET%"=="test" goto :build_test -if "%TARGET%"=="all" goto :build_test -if "%TARGET%"=="ultra-test" goto :build_test -if "%TARGET%"=="ultra-all" goto :build_test -goto :skip_test - -:build_test -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building test image...%RESET% -docker build --target test -t "%IMAGE_NAME%:test" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Test image built:%RESET% %B_WHITE%%IMAGE_NAME%:test%RESET% -) else ( - echo %B_RED%x Test build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -REM Run Tests (only for test target or all) -if "%TARGET%"=="test" goto :run_tests -if "%TARGET%"=="all" goto :run_tests -if "%TARGET%"=="ultra-test" goto :run_tests -if "%TARGET%"=="ultra-all" goto :run_tests -goto :skip_tests - -:run_tests -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Running tests...%RESET% -docker run --rm "%IMAGE_NAME%:test" -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Tests passed%RESET% -) else ( - echo %B_RED%x Tests failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_tests -:skip_test - -REM Build Development Stage -if "%TARGET%"=="dev" goto :build_dev -if "%TARGET%"=="all" goto :build_dev -goto :skip_dev - -:build_dev -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building development image...%RESET% -docker build --target dev -t "%IMAGE_NAME%:dev" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Development image built:%RESET% %B_WHITE%%IMAGE_NAME%:dev%RESET% -) else ( - echo %B_RED%x Development build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_dev - -REM Build Ultra Development Stage -if "%TARGET%"=="ultra-dev" goto :build_ultra_dev -if "%TARGET%"=="ultra-all" goto :build_ultra_dev -goto :skip_ultra_dev - -:build_ultra_dev -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building ultra development image...%RESET% -docker build --target ultra-dev -t "%IMAGE_NAME%:dev" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Ultra development image built:%RESET% %B_WHITE%%IMAGE_NAME%:dev%RESET% -) else ( - echo %B_RED%x Ultra development build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_ultra_dev - -REM Build Production Stage -if "%TARGET%"=="prod" goto :build_prod -if "%TARGET%"=="all" goto :build_prod -goto :skip_prod - -:build_prod -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building production image...%RESET% -docker build --target prod -t "%IMAGE_NAME%:latest" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Production image built:%RESET% %B_WHITE%%IMAGE_NAME%:latest%RESET% -) else ( - echo %B_RED%x Production build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_prod - -REM Build Slim Production Stage -if "%TARGET%"=="prod-slim" goto :build_prod_slim -goto :skip_prod_slim - -:build_prod_slim -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building slim production image...%RESET% -docker build --target prod-slim -t "%IMAGE_NAME%:slim" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Slim production image built:%RESET% %B_WHITE%%IMAGE_NAME%:slim%RESET% -) else ( - echo %B_RED%x Slim production build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_prod_slim - -REM Build Minimal Production Stage -if "%TARGET%"=="prod-minimal" goto :build_prod_minimal -goto :skip_prod_minimal - -:build_prod_minimal -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building minimal production image...%RESET% -docker build --target prod-minimal -t "%IMAGE_NAME%:minimal" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Minimal production image built:%RESET% %B_WHITE%%IMAGE_NAME%:minimal%RESET% -) else ( - echo %B_RED%x Minimal production build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_prod_minimal - -REM Build Ultra Production Stage (for ultra-all and ultra-prod) -if "%TARGET%"=="ultra-all" goto :build_ultra_prod -if "%TARGET%"=="ultra-prod" goto :build_ultra_prod -goto :skip_ultra_prod - -:build_ultra_prod -echo %B_PURPLE%[%STEP%/%TOTAL_STEPS%]%RESET% %P_CYAN%Building ultra production image...%RESET% -docker build --target ultra-prod -t "%IMAGE_NAME%:ultra" . -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Ultra production image built:%RESET% %B_WHITE%%IMAGE_NAME%:ultra%RESET% -) else ( - echo %B_RED%x Ultra production build failed%RESET% - exit /b 1 -) -set /a STEP+=1 - -:skip_ultra_prod - -REM Optional: Clean up intermediate images -echo %B_PURPLE%[Bonus]%RESET% %P_CYAN%Cleaning up dangling images...%RESET% -docker image prune -f >nul 2>&1 -if %ERRORLEVEL% equ 0 ( - echo %B_GREEN%+ Cleanup completed%RESET% -) else ( - echo %GRAY%- Cleanup skipped%RESET% -) - -echo. -echo %GRAY%======================================================================%RESET% -echo %B_PURPLE%SUCCESS!%RESET% %P_GREEN%Docker images ready:%RESET% - -REM Show only the images that were actually built -if "%TARGET%"=="test" echo %B_WHITE%%IMAGE_NAME%:test%RESET% %GRAY%(testing)%RESET% -if "%TARGET%"=="all" echo %B_WHITE%%IMAGE_NAME%:test%RESET% %GRAY%(testing)%RESET% -if "%TARGET%"=="ultra-test" echo %B_WHITE%%IMAGE_NAME%:test%RESET% %GRAY%(testing)%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%%IMAGE_NAME%:test%RESET% %GRAY%(testing)%RESET% -if "%TARGET%"=="dev" echo %B_WHITE%%IMAGE_NAME%:dev%RESET% %GRAY%(development)%RESET% -if "%TARGET%"=="all" echo %B_WHITE%%IMAGE_NAME%:dev%RESET% %GRAY%(development)%RESET% -if "%TARGET%"=="ultra-dev" echo %B_WHITE%%IMAGE_NAME%:dev%RESET% %GRAY%(development)%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%%IMAGE_NAME%:dev%RESET% %GRAY%(development)%RESET% -if "%TARGET%"=="prod" echo %B_WHITE%%IMAGE_NAME%:latest%RESET% %GRAY%(production)%RESET% -if "%TARGET%"=="all" echo %B_WHITE%%IMAGE_NAME%:latest%RESET% %GRAY%(production)%RESET% -if "%TARGET%"=="prod-slim" echo %B_WHITE%%IMAGE_NAME%:slim%RESET% %GRAY%(slim-production)%RESET% -if "%TARGET%"=="prod-minimal" echo %B_WHITE%%IMAGE_NAME%:minimal%RESET% %GRAY%(minimal-production)%RESET% -if "%TARGET%"=="ultra-prod" echo %B_WHITE%%IMAGE_NAME%:ultra%RESET% %GRAY%(ultra-production)%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%%IMAGE_NAME%:ultra%RESET% %GRAY%(ultra-production)%RESET% - -echo %GRAY%======================================================================%RESET% -echo. -echo %B_CYAN%Usage examples:%RESET% - -REM Show relevant usage examples based on what was built -if "%TARGET%"=="dev" echo %GRAY%# Run development container%RESET% -if "%TARGET%"=="dev" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:dev%RESET% -if "%TARGET%"=="dev" echo. -if "%TARGET%"=="all" echo %GRAY%# Run development container%RESET% -if "%TARGET%"=="all" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:dev%RESET% -if "%TARGET%"=="all" echo. -if "%TARGET%"=="ultra-dev" echo %GRAY%# Run development container%RESET% -if "%TARGET%"=="ultra-dev" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:dev%RESET% -if "%TARGET%"=="ultra-dev" echo. -if "%TARGET%"=="ultra-all" echo %GRAY%# Run development container%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:dev%RESET% -if "%TARGET%"=="ultra-all" echo. -if "%TARGET%"=="prod" echo %GRAY%# Run production container%RESET% -if "%TARGET%"=="prod" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:latest%RESET% -if "%TARGET%"=="prod" echo. -if "%TARGET%"=="all" echo %GRAY%# Run production container%RESET% -if "%TARGET%"=="all" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:latest%RESET% -if "%TARGET%"=="all" echo. -if "%TARGET%"=="ultra-prod" echo %GRAY%# Run ultra production container%RESET% -if "%TARGET%"=="ultra-prod" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:ultra%RESET% -if "%TARGET%"=="ultra-prod" echo. -if "%TARGET%"=="ultra-all" echo %GRAY%# Run ultra production container%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%docker run -p 8080:8080 -p 9090:9090 %IMAGE_NAME%:ultra%RESET% -if "%TARGET%"=="ultra-all" echo. -if "%TARGET%"=="test" echo %GRAY%# Run tests%RESET% -if "%TARGET%"=="test" echo %B_WHITE%docker run --rm %IMAGE_NAME%:test%RESET% -if "%TARGET%"=="all" echo %GRAY%# Run tests%RESET% -if "%TARGET%"=="all" echo %B_WHITE%docker run --rm %IMAGE_NAME%:test%RESET% -if "%TARGET%"=="ultra-test" echo %GRAY%# Run tests%RESET% -if "%TARGET%"=="ultra-test" echo %B_WHITE%docker run --rm %IMAGE_NAME%:test%RESET% -if "%TARGET%"=="ultra-all" echo %GRAY%# Run tests%RESET% -if "%TARGET%"=="ultra-all" echo %B_WHITE%docker run --rm %IMAGE_NAME%:test%RESET% -endlocal diff --git a/scripts/docker_build.sh b/scripts/docker_build.sh deleted file mode 100644 index f47e22b..0000000 --- a/scripts/docker_build.sh +++ /dev/null @@ -1,250 +0,0 @@ -#!/bin/bash - -# Clear the terminal screen -clear - -# Default configuration -DEFAULT_APP_NAME="stackyard" -DEFAULT_IMAGE_NAME="myapp" -DEFAULT_TARGET="all" - -# Configuration from parameters or defaults -APP_NAME="${1:-$DEFAULT_APP_NAME}" -IMAGE_NAME="${2:-$DEFAULT_IMAGE_NAME}" -TARGET="${3:-$DEFAULT_TARGET}" - -# Define ANSI Colors -RESET="\033[0m" -BOLD="\033[1m" -DIM="\033[2m" -UNDERLINE="\033[4m" - -# Fancy Pastel Palette (main color: #8daea5) -P_PURPLE="\033[38;5;108m" -B_PURPLE="\033[1;38;5;108m" -P_CYAN="\033[38;5;117m" -B_CYAN="\033[1;38;5;117m" -P_GREEN="\033[38;5;108m" -B_GREEN="\033[1;38;5;108m" -P_YELLOW="\033[93m" -B_YELLOW="\033[1;93m" -P_RED="\033[91m" -B_RED="\033[1;91m" -GRAY="\033[38;5;242m" -WHITE="\033[97m" -B_WHITE="\033[1;97m" - -# Robustly switch to project root (one level up from this script) -cd "$(dirname "$0")/.." || exit 1 - -echo "" -echo -e " ${P_PURPLE} /\ ${RESET}" -echo -e " ${P_PURPLE}( )${RESET} ${B_PURPLE}Docker Builder${RESET} ${GRAY}by${RESET} ${B_WHITE}diameter-tscd${RESET}" -echo -e " ${P_PURPLE} \/ ${RESET}" -echo -e "${GRAY}----------------------------------------------------------------------${RESET}" -echo -e " ${B_CYAN}App Name:${RESET} ${B_WHITE}${APP_NAME}${RESET}" -echo -e " ${B_CYAN}Image Name:${RESET} ${B_WHITE}${IMAGE_NAME}${RESET}" -echo -e " ${B_CYAN}Target:${RESET} ${B_WHITE}${TARGET}${RESET}" -echo -e "${GRAY}----------------------------------------------------------------------${RESET}" - -# Check if Dockerfile exists -if [ ! -f "Dockerfile" ]; then - echo -e " ${B_RED}x Dockerfile not found in current directory${RESET}" - exit 1 -fi - -# Check if docker is available -if ! command -v docker &> /dev/null; then - echo -e " ${B_RED}x Docker is not installed or not in PATH${RESET}" - exit 1 -fi - -# Validate target -case "$TARGET" in - "all"|"test"|"dev"|"prod"|"prod-slim"|"prod-minimal"|"ultra-prod"|"ultra-all"|"ultra-dev"|"ultra-test") - ;; - *) - echo -e " ${B_RED}x Invalid target: ${TARGET}${RESET}" - echo -e " ${B_CYAN}Valid targets: all, test, dev, prod, prod-slim, prod-minimal, ultra-prod, ultra-all, ultra-dev, ultra-test${RESET}" - exit 1 - ;; -esac - -STEP=1 -TOTAL_STEPS=1 - -# Calculate total steps -if [ "$TARGET" = "all" ] || [ "$TARGET" = "ultra-all" ]; then - TOTAL_STEPS=4 -elif [ "$TARGET" = "test" ] || [ "$TARGET" = "ultra-test" ]; then - TOTAL_STEPS=2 -elif [ "$TARGET" = "dev" ] || [ "$TARGET" = "ultra-dev" ]; then - TOTAL_STEPS=1 -elif [ "$TARGET" = "prod" ] || [ "$TARGET" = "ultra-prod" ]; then - TOTAL_STEPS=1 -fi - -# 1. Build Test Stage (always needed for test target or all) -if [ "$TARGET" = "test" ] || [ "$TARGET" = "all" ] || [ "$TARGET" = "ultra-test" ] || [ "$TARGET" = "ultra-all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building test image...${RESET}" - if docker build --target test -t "${IMAGE_NAME}:test" .; then - echo -e " ${B_GREEN}+ Test image built:${RESET} ${B_WHITE}${IMAGE_NAME}:test${RESET}" - else - echo -e " ${B_RED}x Test build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 2. Run Tests (only for test target or all) -if [ "$TARGET" = "test" ] || [ "$TARGET" = "all" ] || [ "$TARGET" = "ultra-test" ] || [ "$TARGET" = "ultra-all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Running tests...${RESET}" - if docker run --rm "${IMAGE_NAME}:test"; then - echo -e " ${B_GREEN}+ Tests passed${RESET}" - else - echo -e " ${B_RED}x Tests failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 3. Build Development Stage -if [ "$TARGET" = "dev" ] || [ "$TARGET" = "all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building development image...${RESET}" - if docker build --target dev -t "${IMAGE_NAME}:dev" .; then - echo -e " ${B_GREEN}+ Development image built:${RESET} ${B_WHITE}${IMAGE_NAME}:dev${RESET}" - else - echo -e " ${B_RED}x Development build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 3. Build Ultra Development Stage -if [ "$TARGET" = "ultra-dev" ] || [ "$TARGET" = "ultra-all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building ultra development image...${RESET}" - if docker build --target ultra-dev -t "${IMAGE_NAME}:dev" .; then - echo -e " ${B_GREEN}+ Ultra development image built:${RESET} ${B_WHITE}${IMAGE_NAME}:dev${RESET}" - else - echo -e " ${B_RED}x Ultra development build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 4. Build Production Stage -if [ "$TARGET" = "prod" ] || [ "$TARGET" = "all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building production image...${RESET}" - if docker build --target prod -t "${IMAGE_NAME}:latest" .; then - echo -e " ${B_GREEN}+ Production image built:${RESET} ${B_WHITE}${IMAGE_NAME}:latest${RESET}" - else - echo -e " ${B_RED}x Production build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 4. Build Slim Production Stage -if [ "$TARGET" = "prod-slim" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building slim production image...${RESET}" - if docker build --target prod-slim -t "${IMAGE_NAME}:slim" .; then - echo -e " ${B_GREEN}+ Slim production image built:${RESET} ${B_WHITE}${IMAGE_NAME}:slim${RESET}" - else - echo -e " ${B_RED}x Slim production build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 4. Build Minimal Production Stage -if [ "$TARGET" = "prod-minimal" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building minimal production image...${RESET}" - if docker build --target prod-minimal -t "${IMAGE_NAME}:minimal" .; then - echo -e " ${B_GREEN}+ Minimal production image built:${RESET} ${B_WHITE}${IMAGE_NAME}:minimal${RESET}" - else - echo -e " ${B_RED}x Minimal production build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 4. Build Ultra Production Stage (for ultra-all) -if [ "$TARGET" = "ultra-all" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building ultra production image...${RESET}" - if docker build --target ultra-prod -t "${IMAGE_NAME}:ultra" .; then - echo -e " ${B_GREEN}+ Ultra production image built:${RESET} ${B_WHITE}${IMAGE_NAME}:ultra${RESET}" - else - echo -e " ${B_RED}x Ultra production build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# 5. Build Ultra-Production Stage (ultra slim) -if [ "$TARGET" = "ultra-prod" ]; then - echo -e "${B_PURPLE}[$STEP/$TOTAL_STEPS]${RESET} ${P_CYAN}Building ultra-production image...${RESET}" - if docker build --target ultra-prod -t "${IMAGE_NAME}:ultra" .; then - echo -e " ${B_GREEN}+ Ultra-production image built:${RESET} ${B_WHITE}${IMAGE_NAME}:ultra${RESET}" - else - echo -e " ${B_RED}x Ultra-production build failed${RESET}" - exit 1 - fi - STEP=$((STEP + 1)) -fi - -# Optional: Clean up intermediate images -echo -e "${B_PURPLE}[Bonus]${RESET} ${P_CYAN}Cleaning up dangling images...${RESET}" -if docker image prune -f &>/dev/null; then - echo -e " ${B_GREEN}+ Cleanup completed${RESET}" -else - echo -e " ${GRAY}- Cleanup skipped${RESET}" -fi - -echo "" -echo -e "${GRAY}======================================================================${RESET}" -echo -e " ${B_PURPLE}SUCCESS!${RESET} ${P_GREEN}Docker images ready:${RESET}" - -# Show only the images that were actually built -if [ "$TARGET" = "test" ] || [ "$TARGET" = "all" ] || [ "$TARGET" = "ultra-test" ] || [ "$TARGET" = "ultra-all" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:test${RESET} ${GRAY}(testing)${RESET}" -fi -if [ "$TARGET" = "dev" ] || [ "$TARGET" = "all" ] || [ "$TARGET" = "ultra-dev" ] || [ "$TARGET" = "ultra-all" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:dev${RESET} ${GRAY}(development)${RESET}" -fi -if [ "$TARGET" = "prod" ] || [ "$TARGET" = "all" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:latest${RESET} ${GRAY}(production)${RESET}" -fi -if [ "$TARGET" = "prod-slim" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:slim${RESET} ${GRAY}(slim-production)${RESET}" -fi -if [ "$TARGET" = "prod-minimal" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:minimal${RESET} ${GRAY}(minimal-production)${RESET}" -fi -if [ "$TARGET" = "ultra-prod" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:ultra${RESET} ${GRAY}(ultra-production)${RESET}" -fi -if [ "$TARGET" = "ultra-all" ]; then - echo -e " ${B_WHITE}${IMAGE_NAME}:ultra${RESET} ${GRAY}(ultra-production)${RESET}" -fi - -echo -e "${GRAY}======================================================================${RESET}" -echo "" -echo -e "${B_CYAN}Usage examples:${RESET}" - -# Show relevant usage examples based on what was built -if [ "$TARGET" = "dev" ] || [ "$TARGET" = "all" ]; then - echo -e " ${GRAY}# Run development container${RESET}" - echo -e " ${B_WHITE}docker run -p 8080:8080 -p 9090:9090 ${IMAGE_NAME}:dev${RESET}" - echo "" -fi - -if [ "$TARGET" = "prod" ] || [ "$TARGET" = "all" ]; then - echo -e " ${GRAY}# Run production container${RESET}" - echo -e " ${B_WHITE}docker run -p 8080:8080 -p 9090:9090 ${IMAGE_NAME}:latest${RESET}" - echo "" -fi - -if [ "$TARGET" = "test" ] || [ "$TARGET" = "all" ]; then - echo -e " ${GRAY}# Run tests${RESET}" - echo -e " ${B_WHITE}docker run --rm ${IMAGE_NAME}:test${RESET}" -fi diff --git a/scripts/onboarding.bat b/scripts/onboarding.bat deleted file mode 100644 index 7e56cae..0000000 --- a/scripts/onboarding.bat +++ /dev/null @@ -1,344 +0,0 @@ -@echo off -cls - -setlocal EnableDelayedExpansion - -:: Robustly switch to project root (one level up from this script) -cd /d "%~dp0.." - -set "CONFIG_FILE=config.yaml" -set "BACKUP_FILE=config.yaml.backup" - -:: Define ANSI Colors (limited support in Windows CMD) -:: Fancy Pastel Palette (matching TUI colors from pkg/tui/live.go) -set "RESET=[0m" -set "BOLD=[1m" -set "P_PURPLE=[38;5;219m" -set "B_PURPLE=[1;38;5;219m" -set "P_CYAN=[38;5;117m" -set "B_CYAN=[1;38;5;117m" -set "P_GREEN=[38;5;108m" -set "B_GREEN=[1;38;5;108m" -set "P_YELLOW=[93m" -set "B_YELLOW=[1;93m" -set "P_RED=[91m" -set "B_RED=[1;91m" -set "GRAY=[38;5;242m" -set "WHITE=[97m" -set "B_WHITE=[1;97m" - -:: Check if build script exists -if not exist "scripts\build.bat" ( - echo Error: build.bat not found in scripts\ directory - pause - exit /b 1 -) - -:: Check if Go is installed -go version >nul 2>&1 -if %ERRORLEVEL% NEQ 0 ( - echo Error: Go is not installed or not in PATH - echo Please install Go from https://golang.org/dl/ - pause - exit /b 1 -) - -:: Check Go version (minimum 1.24) -for /f "tokens=3" %%i in ('go version') do set go_ver=%%i -set go_ver=%go_ver:go=% -for /f "tokens=1,2 delims=." %%a in ("%go_ver%") do ( - set major=%%a - set minor=%%b -) -if %major% gtr 1 goto version_ok -if %major% equ 1 if %minor% geq 24 goto version_ok -echo Error: Go version %go_ver% is too old. Minimum required is 1.24 -echo Please install Go from https://golang.org/dl/ -pause -exit /b 1 -:version_ok - -:: Function to read user input with default value -:read_input -setlocal EnableDelayedExpansion -set "prompt=%~1" -set "default=%~2" -set "input=" - -echo !prompt! -if "!default!" NEQ "" echo Default: !default! -set /p "input=Enter value: " -if "!input!"=="" if "!default!" NEQ "" set "input=!default!" -endlocal & set "input=%input%" -goto :eof - -:: Function to read yes/no with default -:read_yes_no -setlocal EnableDelayedExpansion -set "prompt=%~1" -set "default=%~2" -set "input=" - -echo !prompt! -if "!default!"=="y" ( - echo Default: Yes -) else ( - echo Default: No -) -set /p "input=Choice (y/n): " -if "!input!"=="" set "input=!default!" - -if /i "!input!"=="y" ( - echo true -) else if /i "!input!"=="yes" ( - echo true -) else ( - echo false -) -endlocal -goto :eof - -:: Function to update config value (simple implementation) -:update_config -setlocal -set "key=%~1" -set "value=%~2" - -:: Escape quotes in value -set "value=%value:"=""%" - -:: Use CMD to update -if exist "%CONFIG_FILE%.tmp" del "%CONFIG_FILE%.tmp" -for /f "tokens=*" %%i in (%CONFIG_FILE%) do ( - echo %%i | findstr /b "%key%:" >nul 2>&1 - if !errorlevel! neq 0 ( - echo %%i >> "%CONFIG_FILE%.tmp" - ) else ( - echo %key%: "%value%" >> "%CONFIG_FILE%.tmp" - ) -) -if exist "%CONFIG_FILE%.tmp" ( - move /y "%CONFIG_FILE%.tmp" "%CONFIG_FILE%" >nul 2>&1 - echo Configuration updated: %key% -) else ( - echo Warning: Could not update %key% in %CONFIG_FILE% -) -goto :eof - -:: Function to update boolean config value -:update_config_bool -setlocal -set "key=%~1" -set "value=%~2" - -:: Use CMD to update -if exist "%CONFIG_FILE%.tmp" del "%CONFIG_FILE%.tmp" -for /f "tokens=*" %%i in (%CONFIG_FILE%) do ( - echo %%i | findstr /b "%key%:" >nul 2>&1 - if !errorlevel! neq 0 ( - echo %%i >> "%CONFIG_FILE%.tmp" - ) else ( - echo %key%: %value% >> "%CONFIG_FILE%.tmp" - ) -) -if exist "%CONFIG_FILE%.tmp" ( - move /y "%CONFIG_FILE%.tmp" "%CONFIG_FILE%" >nul 2>&1 - echo Configuration updated: %key% -) else ( - echo Warning: Could not update %key% in %CONFIG_FILE% -) -goto :eof - -:: Function to show warning -:show_warning -echo. -echo WARNING: %~1 -echo. -goto :eof - -:: Function to show info -:show_info -echo. -echo INFO: %~1 -echo. -goto :eof - -:: Function to show success -:show_success -echo. -echo SUCCESS: %~1 -echo. -goto :eof - -:: Check if config.yaml exists -if not exist "%CONFIG_FILE%" ( - echo Error: %CONFIG_FILE% not found in current directory - echo Please run this script from the project root directory. - pause - exit /b 1 -) - -:: Backup original config -copy "%CONFIG_FILE%" "%BACKUP_FILE%" >nul - -echo. -echo /\ -echo ( ) stackyard Onboarding by diameter-tscd -echo \/ -echo ------------------------------------------------------------------------------ -echo Welcome to the stackyard onboarding setup! -echo This script will help you configure your application. -echo ------------------------------------------------------------------------------ - -:: Basic Application Configuration -echo BASIC APPLICATION CONFIGURATION - -call :read_input "Enter application name" "My Fancy Go App" -set "APP_NAME=%errorlevel%"% - -call :read_input "Enter application version" "1.0.0" -set "APP_VERSION=%errorlevel%"% - -call :read_input "Enter server port" "8080" -set "SERVER_PORT=%errorlevel%"% - -call :read_input "Enter monitoring port" "9090" -set "MONITORING_PORT=%errorlevel%"% - -echo ENVIRONMENT SETTINGS - -call :read_yes_no "Enable debug mode?" "y" -set "DEBUG_MODE=%errorlevel%"% - -call :read_yes_no "Enable TUI (Terminal User Interface)?" "y" -set "TUI_MODE=%errorlevel%"% - -call :read_yes_no "Quiet startup (suppress console logs)?" "n" -set "QUIET_STARTUP=%errorlevel%"% - -echo SERVICE CONFIGURATION - -call :read_yes_no "Enable monitoring dashboard?" "y" -set "ENABLE_MONITORING=%errorlevel%"% - -call :read_yes_no "Enable API encryption?" "n" -set "ENABLE_ENCRYPTION=%errorlevel%"% - -echo INFRASTRUCTURE CONFIGURATION - -call :read_yes_no "Enable Redis?" "n" -set "ENABLE_REDIS=%errorlevel%"% - -set /p "ENABLE_POSTGRES=Enable PostgreSQL? (single/multi/none) [single]: " -if "%ENABLE_POSTGRES%"=="" set "ENABLE_POSTGRES=single" - -call :read_yes_no "Enable Kafka?" "n" -set "ENABLE_KAFKA=%errorlevel%"% - -call :read_yes_no "Enable MinIO (Object Storage)?" "n" -set "ENABLE_MINIO=%errorlevel%"% - -echo APPLYING CONFIGURATION - -:: Update basic config -call :update_config "app.name" "%APP_NAME%" -call :update_config "app.version" "%APP_VERSION%" -call :update_config "server.port" "%SERVER_PORT%" -call :update_config "monitoring.port" "%MONITORING_PORT%" - -:: Update boolean configs -call :update_config_bool "app.debug" "%DEBUG_MODE%" -call :update_config_bool "app.enable_tui" "%TUI_MODE%" -call :update_config_bool "app.quiet_startup" "%QUIET_STARTUP%" -call :update_config_bool "monitoring.enabled" "%ENABLE_MONITORING%" -call :update_config_bool "encryption.enabled" "%ENABLE_ENCRYPTION%" -call :update_config_bool "redis.enabled" "%ENABLE_REDIS%" -call :update_config_bool "kafka.enabled" "%ENABLE_KAFKA%" - -:: Handle PostgreSQL configuration -if "%ENABLE_POSTGRES%"=="single" ( - call :update_config_bool "postgres.enabled" "true" -) else if "%ENABLE_POSTGRES%"=="multi" ( - call :update_config_bool "postgres.enabled" "true" -) else ( - call :update_config_bool "postgres.enabled" "false" -) - -:: Handle MinIO -call :update_config_bool "monitoring.minio.enabled" "%ENABLE_MINIO%" - -call :show_success "Configuration updated successfully!" - -:: Security Warnings -echo. -echo SECURITY WARNINGS - -call :show_warning "Default credentials are configured. You MUST change these before production use:" -echo • PostgreSQL password: 'Mypostgres01' -echo • Monitoring password: 'admin' -echo • MinIO credentials: 'minioadmin/minioadmin' -echo • API secret key: 'super-secret-key' - -call :show_warning "API obfuscation is enabled. This adds security through obscurity but is not encryption." - -if "%ENABLE_ENCRYPTION%"=="true" ( - call :show_warning "Encryption is enabled but no key is set. You need to configure 'encryption.key' in config.yaml" -) - -:: Next Steps -echo. -echo NEXT STEPS - -call :show_info "1. Review and customize config.yaml with your specific settings" -call :show_info "2. Update all default passwords and secrets" -call :show_info "3. Set up your infrastructure (PostgreSQL, Redis, etc.)" -call :show_info "4. Run 'go mod tidy' to ensure dependencies are correct" -call :show_info "5. Build the application using 'scripts\build.bat'" -call :show_info "6. Test the application with 'go run cmd\app\main.go'" - -:: Offer to run additional setup -echo. -echo Would you like to run additional setup commands? -echo This will run 'go mod tidy' and check for build issues. -call :read_yes_no "Run setup commands?" "y" -set "RUN_SETUP=%errorlevel%"% - -if "%RUN_SETUP%"=="true" ( - echo. - echo RUNNING SETUP COMMANDS - - echo. - echo Running 'go mod tidy'... - go mod tidy - if %ERRORLEVEL% EQU 0 ( - call :show_success "Dependencies updated successfully" - ) else ( - call :show_warning "Failed to update dependencies - you may need to check your Go installation" - ) - - echo. - echo Checking build... - go build -o temp_build.exe .\cmd\app\main.go - if %ERRORLEVEL% EQU 0 ( - call :show_success "Build test successful" - if exist "temp_build.exe" del "temp_build.exe" - ) else ( - call :show_warning "Build failed - check your configuration and dependencies" - ) - - echo. -) - -:: Final message -echo ====================================================================== -echo ONBOARDING COMPLETE! Your app is ready to go! -echo ====================================================================== -echo. -echo Backup created: %BACKUP_FILE% -echo Configuration: %CONFIG_FILE% -echo. -echo Happy coding! -echo. - -pause -endlocal diff --git a/scripts/onboarding.sh b/scripts/onboarding.sh deleted file mode 100644 index 59c2bfb..0000000 --- a/scripts/onboarding.sh +++ /dev/null @@ -1,319 +0,0 @@ -#!/bin/bash - -# Onboarding Script for stackyard -# This script helps set up the application for the first time - -# Clear the terminal screen -clear - -# Check if build script exists -if [ ! -f "./scripts/build.sh" ]; then - echo -e "${B_RED}Error: build.sh not found in scripts/ directory${RESET}" - exit 1 -fi - -# Check if Go is installed -if ! command -v go >/dev/null 2>&1; then - echo -e "${B_RED}Error: Go is not installed or not in PATH${RESET}" - echo -e "${WHITE}Please install Go from https://golang.org/dl/${RESET}" - exit 1 -fi - -# Check Go version (minimum 1.24) -go_version=$(go version | awk '{print $3}' | sed 's/go//') -if [ "$(printf '%s\n' "$go_version" "1.24" | sort -V | head -n1)" = "$go_version" ]; then - echo -e "${B_RED}Error: Go version $go_version is too old. Minimum required is 1.24${RESET}" - echo -e "${WHITE}Please upgrade Go from https://golang.org/dl/${RESET}" - exit 1 -fi - -# Define ANSI Colors -RESET="\033[0m" -BOLD="\033[1m" -DIM="\033[2m" -UNDERLINE="\033[4m" - -# Fancy Pastel Palette (main color: #8daea5) -P_PURPLE="\033[38;5;108m" -B_PURPLE="\033[1;38;5;108m" -P_CYAN="\033[38;5;117m" -B_CYAN="\033[1;38;5;117m" -P_GREEN="\033[38;5;108m" -B_GREEN="\033[1;38;5;108m" -P_YELLOW="\033[93m" -B_YELLOW="\033[1;93m" -P_RED="\033[91m" -B_RED="\033[1;91m" -GRAY="\033[38;5;242m" -WHITE="\033[97m" -B_WHITE="\033[1;97m" - -# Function to read user input with default value -read_input() { - local prompt="$1" - local default="$2" - local input - - echo -e "${P_CYAN}$prompt${RESET}" - if [ -n "$default" ]; then - echo -e "${GRAY}Default: $default${RESET}" - fi - echo -ne "${B_WHITE}> ${RESET}" - read input - - if [ -z "$input" ] && [ -n "$default" ]; then - input="$default" - fi - - echo "$input" -} - -# Function to read yes/no with default -read_yes_no() { - local prompt="$1" - local default="$2" - local input - - echo -e "${P_CYAN}$prompt${RESET}" - if [ "$default" = "y" ]; then - echo -e "${GRAY}Default: Yes${RESET}" - else - echo -e "${GRAY}Default: No${RESET}" - fi - echo -ne "${B_WHITE}(y/n) > ${RESET}" - read input - - if [ -z "$input" ]; then - input="$default" - fi - - case "$input" in - y|Y|yes|Yes|YES) echo "true" ;; - *) echo "false" ;; - esac -} - -# Function to update config value -update_config() { - local key="$1" - local value="$2" - - # Escape special characters for sed - value=$(printf '%s\n' "$value" | sed 's/[[\.*^$(){}?+|/]/\\&/g') - - if grep -q "^$key:" config.yaml; then - # Update existing key - sed -i.bak "s|^$key:.*|$key: \"$value\"|" config.yaml - else - # Add new key (this is a simple implementation - might need adjustment for complex YAML) - echo "Warning: Could not find $key in config.yaml - you may need to add it manually" - fi -} - -# Function to update boolean config value -update_config_bool() { - local key="$1" - local value="$2" - - if grep -q "^$key:" config.yaml; then - # Update existing key - sed -i.bak "s|^$key:.*|$key: $value|" config.yaml - else - echo "Warning: Could not find $key in config.yaml - you may need to add it manually" - fi -} - -# Function to show warning -show_warning() { - local message="$1" - echo -e "${B_YELLOW}WARNING:${RESET} ${B_WHITE}$message${RESET}" - echo "" -} - -# Function to show info -show_info() { - local message="$1" - echo -e "${B_CYAN}INFO:${RESET} ${WHITE}$message${RESET}" - echo "" -} - -# Function to show success -show_success() { - local message="$1" - echo -e "${B_GREEN}SUCCESS:${RESET} ${WHITE}$message${RESET}" - echo "" -} - -# Check if config.yaml exists -if [ ! -f "config.yaml" ]; then - echo -e "${B_RED}Error: config.yaml not found in current directory${RESET}" - echo -e "${WHITE}Please run this script from the project root directory.${RESET}" - exit 1 -fi - -# Backup original config -cp config.yaml config.yaml.backup - -echo "" -echo -e " ${P_PURPLE} /\ ${RESET}" -echo -e " ${P_PURPLE}( )${RESET} ${B_PURPLE}stackyard Onboarding${RESET} ${GRAY}by${RESET} ${B_WHITE}diameter-tscd${RESET}" -echo -e " ${P_PURPLE} \/ ${RESET}" -echo -e "${GRAY}----------------------------------------------------------------------${RESET}" -echo -e "${B_CYAN}Welcome to the stackyard onboarding setup!${RESET}" -echo -e "${GRAY}This script will help you configure your application.${RESET}" -echo -e "${GRAY}----------------------------------------------------------------------${RESET}" -echo "" - -# Basic Application Configuration -echo -e "${B_PURPLE}BASIC APPLICATION CONFIGURATION${RESET}" -echo "" - -APP_NAME=$(read_input "Enter application name" "My Fancy Go App") -APP_VERSION=$(read_input "Enter application version" "1.0.0") -SERVER_PORT=$(read_input "Enter server port" "8080") -MONITORING_PORT=$(read_input "Enter monitoring port" "9090") - -echo "" - -# Environment Settings -echo -e "${B_PURPLE}ENVIRONMENT SETTINGS${RESET}" -echo "" - -DEBUG_MODE=$(read_yes_no "Enable debug mode?" "y") -TUI_MODE=$(read_yes_no "Enable TUI (Terminal User Interface)?" "y") -QUIET_STARTUP=$(read_yes_no "Quiet startup (suppress console logs)?" "n") - -echo "" - -# Service Configuration -echo -e "${B_PURPLE}SERVICE CONFIGURATION${RESET}" -echo "" - -ENABLE_MONITORING=$(read_yes_no "Enable monitoring dashboard?" "y") -ENABLE_ENCRYPTION=$(read_yes_no "Enable API encryption?" "n") - -echo "" - -# Infrastructure Configuration -echo -e "${B_PURPLE}INFRASTRUCTURE CONFIGURATION${RESET}" -echo "" - -ENABLE_REDIS=$(read_yes_no "Enable Redis?" "n") -ENABLE_POSTGRES=$(read_input "Enable PostgreSQL? (single/multi/none)" "single") -ENABLE_KAFKA=$(read_yes_no "Enable Kafka?" "n") -ENABLE_MINIO=$(read_yes_no "Enable MinIO (Object Storage)?" "n") - -echo "" - -# Apply Configuration -echo -e "${B_PURPLE}APPLYING CONFIGURATION${RESET}" -echo "" - -# Update basic config -update_config "app.name" "$APP_NAME" -update_config "app.version" "$APP_VERSION" -update_config "server.port" "$SERVER_PORT" -update_config "monitoring.port" "$MONITORING_PORT" - -# Update boolean configs -update_config_bool "app.debug" "$DEBUG_MODE" -update_config_bool "app.enable_tui" "$TUI_MODE" -update_config_bool "app.quiet_startup" "$QUIET_STARTUP" -update_config_bool "monitoring.enabled" "$ENABLE_MONITORING" -update_config_bool "encryption.enabled" "$ENABLE_ENCRYPTION" -update_config_bool "redis.enabled" "$ENABLE_REDIS" -update_config_bool "kafka.enabled" "$ENABLE_KAFKA" - -# Handle PostgreSQL configuration -if [ "$ENABLE_POSTGRES" = "single" ]; then - update_config_bool "postgres.enabled" "true" - # Note: Single connection config would need manual setup -elif [ "$ENABLE_POSTGRES" = "multi" ]; then - update_config_bool "postgres.enabled" "true" - # Multi-connection is already configured in the template -else - update_config_bool "postgres.enabled" "false" -fi - -# Handle MinIO -if [ "$ENABLE_MINIO" = "true" ]; then - update_config_bool "monitoring.minio.enabled" "true" -else - update_config_bool "monitoring.minio.enabled" "false" -fi - -show_success "Configuration updated successfully!" - -# Security Warnings -echo -e "${B_PURPLE}SECURITY WARNINGS${RESET}" -echo "" - -show_warning "Default credentials are configured. You MUST change these before production use:" -echo -e "${B_RED}• PostgreSQL password: 'Mypostgres01'${RESET}" -echo -e "${B_RED}• Monitoring password: 'admin'${RESET}" -echo -e "${B_RED}• MinIO credentials: 'minioadmin/minioadmin'${RESET}" -echo -e "${B_RED}• API secret key: 'super-secret-key'${RESET}" -echo "" - -show_warning "API obfuscation is enabled. This adds security through obscurity but is not encryption." -echo "" - -if [ "$ENABLE_ENCRYPTION" = "true" ]; then - show_warning "Encryption is enabled but no key is set. You need to configure 'encryption.key' in config.yaml" - echo "" -fi - -# Next Steps -echo -e "${B_PURPLE}NEXT STEPS${RESET}" -echo "" - -show_info "1. Review and customize config.yaml with your specific settings" -show_info "2. Update all default passwords and secrets" -show_info "3. Set up your infrastructure (PostgreSQL, Redis, etc.)" -show_info "4. Run 'go mod tidy' to ensure dependencies are correct" -show_info "5. Build the application using './scripts/build.sh'" -show_info "6. Test the application with 'go run cmd/app/main.go'" - -echo "" - -# Offer to run additional setup -echo -e "${P_CYAN}Would you like to run additional setup commands?${RESET}" -echo -e "${GRAY}This will run 'go mod tidy' and check for build issues.${RESET}" -RUN_SETUP=$(read_yes_no "Run setup commands?" "y") - -if [ "$RUN_SETUP" = "true" ]; then - echo "" - echo -e "${B_PURPLE}RUNNING SETUP COMMANDS${RESET}" - echo "" - - echo -e "${P_CYAN}Running 'go mod tidy'...${RESET}" - if go mod tidy; then - show_success "Dependencies updated successfully" - else - show_warning "Failed to update dependencies - you may need to check your Go installation" - fi - - echo -e "${P_CYAN}Checking build...${RESET}" - if go build -o /tmp/test-build ./cmd/app/main.go; then - show_success "Build test successful" - rm /tmp/test-build 2>/dev/null - else - show_warning "Build failed - check your configuration and dependencies" - fi - - echo "" -fi - -# Final message -echo -e "${GRAY}======================================================================${RESET}" -echo -e " ${B_PURPLE}ONBOARDING COMPLETE!${RESET} ${P_GREEN}Your app is ready to go!${RESET}" -echo -e "${GRAY}======================================================================${RESET}" -echo "" -echo -e "${B_CYAN}Backup created:${RESET} ${B_WHITE}config.yaml.backup${RESET}" -echo -e "${B_CYAN}Configuration:${RESET} ${B_WHITE}config.yaml${RESET}" -echo "" -echo -e "${B_GREEN}Happy coding!${RESET}" -echo "" - -# Restore backup on error (if something went wrong) -trap 'echo ""; echo -e "${B_RED}An error occurred. Restoring backup...${RESET}"; cp config.yaml.backup config.yaml; exit 1' ERR diff --git a/versioninfo.json b/versioninfo.json index 457e617..d835c55 100644 --- a/versioninfo.json +++ b/versioninfo.json @@ -15,7 +15,7 @@ }, "StringFileInfo": { "CompanyName": "John Doe Company", - "FileDescription": "Bp Go Project", + "FileDescription": "Stackyard Project", "ProductName": "Stackyard", "InternalName": "Stackyard", "OriginalFilename": "stackyard.exe" From 428dddb0f3b1048a70c9fdeafc3a444215489a87 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Tue, 17 Mar 2026 07:46:52 +0700 Subject: [PATCH 06/18] refactor: reorganize build script structure and improve project root detection - Converted configuration constants to variables for better flexibility - Added comprehensive project root detection that searches up directory tree for go.mod - Implemented proper path validation and directory navigation - Moved dist directory creation to project root detection function - Added structured logging for directory changes and project validation - Reorganized build steps to include path checking as first step - Improved error handling and user feedback during build process --- .github/workflows/jekyll.yml | 74 --------------------------------- scripts/build/build.go | 80 ++++++++++++++++++++++++++++++++---- 2 files changed, 72 insertions(+), 82 deletions(-) delete mode 100644 .github/workflows/jekyll.yml diff --git a/.github/workflows/jekyll.yml b/.github/workflows/jekyll.yml deleted file mode 100644 index 57bf6b2..0000000 --- a/.github/workflows/jekyll.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: Deploy Jekyll site to Pages - -on: - # Runs on pushes targeting the default branch - push: - branches: ["main"] - paths: - - 'docs/**' - - '.github/workflows/jekyll.yml' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -jobs: - # Build job - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Ruby - uses: ruby/setup-ruby@v1 - with: - ruby-version: '3.1' - bundler-cache: false - - - name: Setup Pages - id: pages - uses: actions/configure-pages@v4 - - - name: Install dependencies - run: | - cd docs - gem install bundler - bundle install - env: - BUNDLE_GEMFILE: docs/Gemfile - - - name: Build with Jekyll - run: | - cd docs - bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" - env: - JEKYLL_ENV: production - - - name: Upload artifact - uses: actions/upload-pages-artifact@v3 - with: - path: docs/_site - - # Deployment job - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 diff --git a/scripts/build/build.go b/scripts/build/build.go index 3eb8137..3dac66c 100644 --- a/scripts/build/build.go +++ b/scripts/build/build.go @@ -17,8 +17,8 @@ import ( "time" ) -// Configuration constants -const ( +// Configuration variables +var ( DIST_DIR = "dist" APP_NAME = "stackyard" MAIN_PATH = "./cmd/app/main.go" @@ -100,6 +100,75 @@ func NewLogger(verbose bool) *Logger { return &Logger{verbose: verbose} } +// checkPath checks the path folder and ensures we're in the project root +func (ctx *BuildContext) checkPath(logger *Logger) error { + return ctx.ensureProjectRoot(logger) +} + +// ensureProjectRoot finds the project root and changes to it if needed +func (ctx *BuildContext) ensureProjectRoot(logger *Logger) error { + currentDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current directory: %w", err) + } + + logger.Info("Starting from: %s", currentDir) + + // Find project root by looking for go.mod + projectRoot, err := findProjectRoot(currentDir) + if err != nil { + return fmt.Errorf("failed to find project root: %w", err) + } + + if projectRoot != currentDir { + logger.Info("Changing to project root: %s", projectRoot) + if err := os.Chdir(projectRoot); err != nil { + return fmt.Errorf("failed to change directory to %s: %w", projectRoot, err) + } + + // Update context with new working directory + ctx.ProjectDir = projectRoot + ctx.DistPath = filepath.Join(projectRoot, DIST_DIR) + + logger.Success("Now in project root") + } else { + logger.Info("Already in project root") + } + + // Ensure dist directory exists + if err := os.MkdirAll(ctx.DistPath, 0755); err != nil { + logger.Error("Failed to create dist directory: %v", err) + os.Exit(1) + } + + return nil +} + +// findProjectRoot searches up the directory tree for go.mod +func findProjectRoot(startDir string) (string, error) { + current := startDir + + for { + // Check if go.mod exists in current directory + goModPath := filepath.Join(current, "go.mod") + if _, err := os.Stat(goModPath); err == nil { + return current, nil + } + + // Move up one directory + parent := filepath.Dir(current) + + // If we've reached the root directory, stop + if parent == current { + break + } + + current = parent + } + + return "", fmt.Errorf("go.mod not found in directory tree") +} + // checkRequiredTools checks if required tools are available func (ctx *BuildContext) checkRequiredTools(logger *Logger) error { logger.Info("Checking required tools...") @@ -645,17 +714,12 @@ func main() { _, cancel := context.WithCancel(context.Background()) setupSignalHandler(cancel) - // Ensure dist directory exists - if err := os.MkdirAll(ctx.DistPath, 0755); err != nil { - logger.Error("Failed to create dist directory: %v", err) - os.Exit(1) - } - // Execute build steps steps := []struct { name string fn func(*Logger) error }{ + {"Checking Project Path", ctx.checkPath}, {"Checking required tools", ctx.checkRequiredTools}, {"Asking user about garble", ctx.askUserAboutGarble}, {"Stopping running process", ctx.stopRunningProcess}, From 7960f3b7c113f64bfa0bf765a2cb3537ca6b113a Mon Sep 17 00:00:00 2001 From: "Gab." Date: Wed, 18 Mar 2026 14:38:58 +0700 Subject: [PATCH 07/18] refactor: remove unused module import and fix comment typo --- cmd/app/main.go | 3 --- internal/middleware/middleware.go | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/cmd/app/main.go b/cmd/app/main.go index a94dd6e..4adf784 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -16,9 +16,6 @@ import ( "stackyard/pkg/utils" "syscall" "time" - - // load modules init - _ "stackyard/internal/services/modules" ) func main() { diff --git a/internal/middleware/middleware.go b/internal/middleware/middleware.go index d4c5421..cc3954c 100644 --- a/internal/middleware/middleware.go +++ b/internal/middleware/middleware.go @@ -67,7 +67,7 @@ func Logger(l *logger.Logger) echo.MiddlewareFunc { } } -// PermissionCheck enforces "allow accept permission kecuali delete data" +// PermissionCheck enforces "allow accept permission except data deletion" func PermissionCheck(l *logger.Logger) echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { From 91fd30100efd1fee92edc6f44591b20d05bba962 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Thu, 19 Mar 2026 15:23:46 +0700 Subject: [PATCH 08/18] refactor: add WireName and Get methods to service interfaces Added WireName() and Get() methods to all service implementations to support dependency injection framework integration. This change standardizes the service interface pattern across the codebase, enabling better service discovery and wiring capabilities. --- internal/services/modules/broadcast_service.go | 6 ++++-- internal/services/modules/cache_service.go | 2 ++ internal/services/modules/encryption_service.go | 6 ++++-- internal/services/modules/grafana_service.go | 2 ++ internal/services/modules/mongodb_service.go | 6 ++++-- internal/services/modules/multi_tenant_service.go | 6 ++++-- internal/services/modules/products_service.go | 2 ++ internal/services/modules/tasks_service.go | 5 ++++- internal/services/modules/users_service.go | 2 ++ pkg/interfaces/service.go | 10 +++++++++- pkg/registry/registry.go | 9 +++++++++ 11 files changed, 46 insertions(+), 10 deletions(-) diff --git a/internal/services/modules/broadcast_service.go b/internal/services/modules/broadcast_service.go index 35d7916..2c323f8 100644 --- a/internal/services/modules/broadcast_service.go +++ b/internal/services/modules/broadcast_service.go @@ -119,8 +119,10 @@ func NewBroadcastService(enabled bool, logger *logger.Logger) *BroadcastService return service } -func (s *BroadcastService) Name() string { return "Broadcast Service" } -func (s *BroadcastService) Enabled() bool { return s.enabled } +func (s *BroadcastService) Name() string { return "Broadcast Service" } +func (s *BroadcastService) WireName() string { return "broadcast-service" } +func (s *BroadcastService) Enabled() bool { return s.enabled } +func (s *BroadcastService) Get() interface{} { return s } func (s *BroadcastService) Endpoints() []string { return []string{"/events/stream/{stream_id}", "/events/broadcast", "/events/streams"} } diff --git a/internal/services/modules/cache_service.go b/internal/services/modules/cache_service.go index 8579608..e68d7d8 100644 --- a/internal/services/modules/cache_service.go +++ b/internal/services/modules/cache_service.go @@ -26,7 +26,9 @@ func NewCacheService(enabled bool) *CacheService { } func (s *CacheService) Name() string { return "Cache Service" } +func (s *CacheService) WireName() string { return "cache-service" } func (s *CacheService) Enabled() bool { return s.enabled } +func (s *CacheService) Get() interface{} { return s } func (s *CacheService) Endpoints() []string { return []string{"/cache"} } type CacheRequest struct { diff --git a/internal/services/modules/encryption_service.go b/internal/services/modules/encryption_service.go index edb48a5..105c48b 100644 --- a/internal/services/modules/encryption_service.go +++ b/internal/services/modules/encryption_service.go @@ -62,8 +62,10 @@ func NewEncryptionService(enabled bool, config map[string]interface{}) *Encrypti } } -func (s *EncryptionService) Name() string { return "Encryption Service" } -func (s *EncryptionService) Enabled() bool { return s.enabled } +func (s *EncryptionService) Name() string { return "Encryption Service" } +func (s *EncryptionService) WireName() string { return "encryption-service" } +func (s *EncryptionService) Enabled() bool { return s.enabled } +func (s *EncryptionService) Get() interface{} { return s } func (s *EncryptionService) Endpoints() []string { return []string{"/encryption/encrypt", "/encryption/decrypt", "/encryption/status", "/encryption/key-rotate"} } diff --git a/internal/services/modules/grafana_service.go b/internal/services/modules/grafana_service.go index 1e6154c..63c4085 100644 --- a/internal/services/modules/grafana_service.go +++ b/internal/services/modules/grafana_service.go @@ -28,8 +28,10 @@ func NewGrafanaService(grafanaManager *infrastructure.GrafanaManager, enabled bo } func (s *GrafanaService) Name() string { return "Grafana Service" } +func (s *GrafanaService) WireName() string { return "grafana-service" } func (s *GrafanaService) Enabled() bool { return s.enabled && s.grafanaManager != nil } func (s *GrafanaService) Endpoints() []string { return []string{"/grafana"} } +func (s *GrafanaService) Get() interface{} { return s } func (s *GrafanaService) RegisterRoutes(g *echo.Group) { sub := g.Group("/grafana") diff --git a/internal/services/modules/mongodb_service.go b/internal/services/modules/mongodb_service.go index bb6dce6..ce06ce7 100644 --- a/internal/services/modules/mongodb_service.go +++ b/internal/services/modules/mongodb_service.go @@ -45,11 +45,13 @@ func NewMongoDBService( } } -func (s *MongoDBService) Name() string { return "MongoDB Service" } -func (s *MongoDBService) Enabled() bool { return s.enabled } +func (s *MongoDBService) Name() string { return "MongoDB Service" } +func (s *MongoDBService) WireName() string { return "mongodb-service" } +func (s *MongoDBService) Enabled() bool { return s.enabled } func (s *MongoDBService) Endpoints() []string { return []string{"/products/{tenant}", "/products/{tenant}/{id}"} } +func (s *MongoDBService) Get() interface{} { return s } func (s *MongoDBService) RegisterRoutes(g *echo.Group) { sub := g.Group("/products") diff --git a/internal/services/modules/multi_tenant_service.go b/internal/services/modules/multi_tenant_service.go index 032009c..b6f9781 100644 --- a/internal/services/modules/multi_tenant_service.go +++ b/internal/services/modules/multi_tenant_service.go @@ -57,11 +57,13 @@ func NewMultiTenantService( } } -func (s *MultiTenantService) Name() string { return "Multi-Tenant Service" } -func (s *MultiTenantService) Enabled() bool { return s.enabled } +func (s *MultiTenantService) Name() string { return "Multi-Tenant Service" } +func (s *MultiTenantService) WireName() string { return "multitenant-service" } +func (s *MultiTenantService) Enabled() bool { return s.enabled } func (s *MultiTenantService) Endpoints() []string { return []string{"/orders/{tenant}", "/orders/{tenant}/{id}"} } +func (s *MultiTenantService) Get() interface{} { return s } func (s *MultiTenantService) RegisterRoutes(g *echo.Group) { sub := g.Group("/orders") diff --git a/internal/services/modules/products_service.go b/internal/services/modules/products_service.go index 2604fd1..73446a2 100644 --- a/internal/services/modules/products_service.go +++ b/internal/services/modules/products_service.go @@ -19,8 +19,10 @@ func NewProductsService(enabled bool) *ProductsService { } func (s *ProductsService) Name() string { return "Products Service" } +func (s *ProductsService) WireName() string { return "products-service" } func (s *ProductsService) Enabled() bool { return s.enabled } func (s *ProductsService) Endpoints() []string { return []string{"/products"} } +func (s *ProductsService) Get() interface{} { return s } func (s *ProductsService) RegisterRoutes(g *echo.Group) { sub := g.Group("/products") diff --git a/internal/services/modules/tasks_service.go b/internal/services/modules/tasks_service.go index b084490..57cbda1 100644 --- a/internal/services/modules/tasks_service.go +++ b/internal/services/modules/tasks_service.go @@ -42,13 +42,16 @@ func NewTasksService(db *infrastructure.PostgresManager, enabled bool, logger *l } } -func (s *TasksService) Name() string { return "Tasks Service" } +func (s *TasksService) Name() string { return "Tasks Service" } +func (s *TasksService) WireName() string { return "tasks-service" } func (s *TasksService) Enabled() bool { // Service is enabled only if configured AND DB is available return s.enabled && s.db != nil && s.db.ORM != nil } +func (s *TasksService) Get() interface{} { return s } + func (s *TasksService) Endpoints() []string { return []string{"/tasks"} } func (s *TasksService) RegisterRoutes(g *echo.Group) { diff --git a/internal/services/modules/users_service.go b/internal/services/modules/users_service.go index fa3f962..0ed20c9 100644 --- a/internal/services/modules/users_service.go +++ b/internal/services/modules/users_service.go @@ -21,8 +21,10 @@ func NewUsersService(enabled bool) *UsersService { } func (s *UsersService) Name() string { return "Users Service" } +func (s *UsersService) WireName() string { return "users-service" } func (s *UsersService) Enabled() bool { return s.enabled } func (s *UsersService) Endpoints() []string { return []string{"/users", "/users/:id"} } +func (s *UsersService) Get() interface{} { return s } func (s *UsersService) RegisterRoutes(g *echo.Group) { sub := g.Group("/users") diff --git a/pkg/interfaces/service.go b/pkg/interfaces/service.go index 0349e00..1257954 100644 --- a/pkg/interfaces/service.go +++ b/pkg/interfaces/service.go @@ -1,12 +1,17 @@ package interfaces -import "github.com/labstack/echo/v4" +import ( + "github.com/labstack/echo/v4" +) // Service defines the interface that all services must implement type Service interface { // Name returns the human-readable name of the service Name() string + // Alias Name for dependency injection + WireName() string + // Enabled returns whether the service is enabled Enabled() bool @@ -15,4 +20,7 @@ type Service interface { // RegisterRoutes registers the service's routes with the Echo router RegisterRoutes(g *echo.Group) + + // Get service + Get() interface{} } diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index d9f65b1..4fde4bd 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -15,6 +15,9 @@ type ServiceFactory func(config *config.Config, logger *logger.Logger, deps *Dep // Global registry of service factories var serviceFactories = make(map[string]ServiceFactory) +// Global registry of discovered service +var serviceDiscovered = make(map[string]interface{}) + // RegisterService registers a service factory for automatic discovery func RegisterService(name string, factory ServiceFactory) { serviceFactories[name] = factory @@ -34,6 +37,8 @@ func AutoDiscoverServices( if service := factory(config, logger, deps); service != nil { services = append(services, service) logger.Info("Auto-registered service", "service", name) + + serviceDiscovered[service.Name()] = service.Get() } else { logger.Warn("Service factory returned nil", "service", name) } @@ -64,6 +69,10 @@ func GetServiceFactories() map[string]ServiceFactory { return serviceFactories } +func GetService(name string) interface{} { + return serviceDiscovered[name] +} + // Register adds a service to the registry func (r *ServiceRegistry) Register(s interfaces.Service) { r.services = append(r.services, s) From e6420ac7f2126477ce287cc9cb0e96580c307518 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Thu, 26 Mar 2026 13:52:20 +0700 Subject: [PATCH 09/18] refactor: restructure app initialization with step-based pattern --- cmd/app/main.go | 173 ++++++++++++++++++++++++++++++++------ pkg/logger/logger.go | 186 +++++++++++++++++++++++++++++++---------- scripts/build/build.go | 19 +++++ 3 files changed, 311 insertions(+), 67 deletions(-) diff --git a/cmd/app/main.go b/cmd/app/main.go index 4adf784..ef13a05 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -18,40 +18,62 @@ import ( "time" ) +// AppContext holds the application state throughout initialization +type AppContext struct { + Config *config.Config + Logger *logger.Logger + Broadcaster *monitoring.LogBroadcaster + BannerText string + Timestamp string + ConfigURL string // Store the parsed config URL +} + +// AppStep represents a single step in the application initialization process +type AppStep struct { + Name string + Fn func(*AppContext) error +} + +// executeSteps executes the provided steps in sequence with error handling +func executeSteps(ctx *AppContext, steps []AppStep) error { + for i, step := range steps { + stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) + fmt.Printf("[%s] %s\n", stepNum, step.Name) + + if err := step.Fn(ctx); err != nil { + return fmt.Errorf("step failed: %w", err) + } + } + return nil +} + func main() { // Clear the terminal screen for a fresh start utils.ClearScreen() - // Parse command line flags + // Parse flags once at the beginning configURL := parseFlags() - // Load configuration - cfg := loadConfig(configURL) - - // Check if "web" folder exists, if not, disable web monitoring - if _, err := os.Stat("web"); os.IsNotExist(err) { - fmt.Println("\033[33m 'web' folder not found, disabling web monitoring\033[0m") - cfg.Monitoring.Enabled = false + // Create app context + ctx := &AppContext{ + Timestamp: time.Now().Format("20060102_150405"), + ConfigURL: configURL, } - // Load banner text - bannerText := loadBanner(cfg) - - // Check port availability - if err := utils.CheckPortAvailability(cfg.Server.Port, cfg.Monitoring.Port, cfg.Monitoring.Enabled); err != nil { - fmt.Printf("\033[31m Port Error: %s\033[0m\n", err.Error()) - fmt.Println("\033[33mPlease stop the conflicting service or change the port in config.yaml\033[0m") - os.Exit(1) + // Execute initialization steps + steps := []AppStep{ + {"Loading configuration", loadConfigStep}, + {"Validating configuration", validateConfigStep}, + {"Loading banner", loadBannerStep}, + {"Checking port availability", checkPortStep}, + {"Initializing logger", initLoggerStep}, + {"Initializing broadcaster", initBroadcasterStep}, + {"Starting application", startAppStep}, } - // Initialize broadcaster for monitoring - broadcaster := monitoring.NewLogBroadcaster() - - // Start application based on TUI mode - if cfg.App.EnableTUI { - runWithTUI(cfg, bannerText, broadcaster) - } else { - runWithConsole(cfg, bannerText, broadcaster) + if err := executeSteps(ctx, steps); err != nil { + fmt.Printf("Fatal error: %v\n", err) + os.Exit(1) } } @@ -394,3 +416,106 @@ func logServiceStatus(l *logger.Logger, name string, enabled bool) { l.Debug("Service skipped", "service", name, "status", "disabled") } } + +// Step functions for the initialization process + +// parseConfigStep parses command line flags +func parseConfigStep(ctx *AppContext) error { + var configURL string + flag.StringVar(&configURL, "c", "", "URL to load configuration from (YAML format)") + flag.Parse() + + // Validate URL if provided + if configURL != "" { + if _, err := url.ParseRequestURI(configURL); err != nil { + return fmt.Errorf("invalid config URL format: %v", err) + } + } + + // Store config URL in context for later use + // We'll need to modify the context to store this, but for now we'll handle it in loadConfigStep + return nil +} + +// loadConfigStep loads configuration from local file or URL +func loadConfigStep(ctx *AppContext) error { + // Use the config URL that was parsed in main and stored in context + configURL := ctx.ConfigURL + + cfg := loadConfig(configURL) + ctx.Config = cfg + return nil +} + +// validateConfigStep validates the loaded configuration +func validateConfigStep(ctx *AppContext) error { + cfg := ctx.Config + + // Check if "web" folder exists, if not, disable web monitoring + if _, err := os.Stat("web"); os.IsNotExist(err) { + fmt.Println("\033[33m 'web' folder not found, disabling web monitoring\033[0m") + cfg.Monitoring.Enabled = false + } + + // Additional validation can be added here + return nil +} + +// loadBannerStep loads banner text from file if configured +func loadBannerStep(ctx *AppContext) error { + cfg := ctx.Config + bannerText := loadBanner(cfg) + ctx.BannerText = bannerText + return nil +} + +// checkPortStep checks port availability +func checkPortStep(ctx *AppContext) error { + cfg := ctx.Config + if err := utils.CheckPortAvailability(cfg.Server.Port, cfg.Monitoring.Port, cfg.Monitoring.Enabled); err != nil { + return fmt.Errorf("port error: %s", err.Error()) + } + return nil +} + +// initLoggerStep initializes the logger +func initLoggerStep(ctx *AppContext) error { + cfg := ctx.Config + + // Initialize logger based on TUI mode + if cfg.App.EnableTUI { + // For TUI mode, we'll create a logger that writes to both TUI and broadcaster + // This will be handled in startAppStep when we have the broadcaster + ctx.Logger = nil // Will be initialized later + } else { + // For console mode, create a regular logger + ctx.Logger = logger.New(cfg.App.Debug, nil) + ctx.Logger.Info("Starting Application", "name", cfg.App.Name, "env", cfg.App.Env) + ctx.Logger.Info("TUI mode disabled, using traditional console logging") + ctx.Logger.Info("Initializing services...") + } + + return nil +} + +// initBroadcasterStep initializes the log broadcaster +func initBroadcasterStep(ctx *AppContext) error { + ctx.Broadcaster = monitoring.NewLogBroadcaster() + return nil +} + +// startAppStep starts the application based on TUI mode +func startAppStep(ctx *AppContext) error { + cfg := ctx.Config + broadcaster := ctx.Broadcaster + bannerText := ctx.BannerText + + // Start application based on TUI mode + if cfg.App.EnableTUI { + runWithTUI(cfg, bannerText, broadcaster) + } else { + runWithConsole(cfg, bannerText, broadcaster) + } + + return nil +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 16d75e3..b6415f6 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -10,10 +10,24 @@ import ( "github.com/rs/zerolog" ) -// Logger wraps the zerolog logger -type Logger struct { - z zerolog.Logger - quiet bool +// OutputConfig defines the output formatting configuration +type OutputConfig struct { + ConsoleEnabled bool + ConsoleFormat string // "fancy", "simple", "json" + Colors bool + TimestampFormat string + NoColor bool +} + +// DefaultOutputConfig returns a default output configuration +func DefaultOutputConfig() OutputConfig { + return OutputConfig{ + ConsoleEnabled: true, + ConsoleFormat: "fancy", + Colors: true, + TimestampFormat: "15:04:05", + NoColor: false, + } } // LoggerConfig contains configuration for the logger @@ -21,33 +35,111 @@ type LoggerConfig struct { Debug bool Quiet bool // suppress console output (logs still go to broadcaster) Broadcaster io.Writer + Output OutputConfig +} + +// DefaultLoggerConfig returns a default logger configuration +func DefaultLoggerConfig() LoggerConfig { + return LoggerConfig{ + Debug: false, + Quiet: false, + Broadcaster: nil, + Output: DefaultOutputConfig(), + } +} + +// Logger wraps the zerolog logger with modular configuration +type Logger struct { + z zerolog.Logger + quiet bool + config LoggerConfig } // New creates a new fancy logger func New(debug bool, broadcaster io.Writer) *Logger { - return NewWithConfig(LoggerConfig{ - Debug: debug, - Quiet: false, - Broadcaster: broadcaster, - }) + cfg := DefaultLoggerConfig() + cfg.Debug = debug + cfg.Broadcaster = broadcaster + cfg.Quiet = false + return NewWithConfig(cfg) } // NewQuiet creates a new logger with console output suppressed func NewQuiet(debug bool, broadcaster io.Writer) *Logger { - return NewWithConfig(LoggerConfig{ - Debug: debug, - Quiet: true, - Broadcaster: broadcaster, - }) + cfg := DefaultLoggerConfig() + cfg.Debug = debug + cfg.Broadcaster = broadcaster + cfg.Quiet = true + return NewWithConfig(cfg) } // NewWithConfig creates a new logger with full configuration func NewWithConfig(cfg LoggerConfig) *Logger { zerolog.TimeFieldFormat = time.RFC3339 - // Console Output (Fancy) - consoleOutput := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: "15:04:05"} - consoleOutput.FormatLevel = func(i interface{}) string { + // Create console output based on configuration + var consoleOutput zerolog.ConsoleWriter + if cfg.Output.ConsoleEnabled { + consoleOutput = zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: cfg.Output.TimestampFormat, + FormatLevel: getLevelFormatter(cfg.Output), + FormatMessage: getMessageFormatter(cfg.Output), + NoColor: !cfg.Output.Colors || cfg.Output.NoColor, + } + } else { + // Console disabled, use discard writer + consoleOutput = zerolog.ConsoleWriter{Out: io.Discard} + } + + var multi zerolog.LevelWriter + + if cfg.Quiet { + // Quiet mode: only write to broadcaster (if available), not to console + if cfg.Broadcaster != nil { + // Create a simple console writer for the broadcaster (without stdout) + broadcasterOutput := zerolog.ConsoleWriter{ + Out: cfg.Broadcaster, + TimeFormat: cfg.Output.TimestampFormat, + NoColor: true, + } + multi = zerolog.MultiLevelWriter(broadcasterOutput) + } else { + // No broadcaster and quiet mode = discard all logs + multi = zerolog.MultiLevelWriter(zerolog.ConsoleWriter{Out: io.Discard}) + } + } else { + // Normal mode: write to console and broadcaster + if cfg.Broadcaster != nil { + multi = zerolog.MultiLevelWriter(consoleOutput, cfg.Broadcaster) + } else { + multi = zerolog.MultiLevelWriter(consoleOutput) + } + } + + logLevel := zerolog.InfoLevel + if cfg.Debug { + logLevel = zerolog.DebugLevel + } + + z := zerolog.New(multi).Level(logLevel).With().Timestamp().Logger() + + return &Logger{z: z, quiet: cfg.Quiet, config: cfg} +} + +// getLevelFormatter returns the appropriate level formatter based on output configuration +func getLevelFormatter(output OutputConfig) func(interface{}) string { + if !output.Colors || output.NoColor { + return func(i interface{}) string { + if ll, ok := i.(string); ok { + return strings.ToUpper(ll) + } + return strings.ToUpper(fmt.Sprintf("%s", i)) + } + } + + // Pastel color formatter + return func(i interface{}) string { var l string if ll, ok := i.(string); ok { switch ll { @@ -75,39 +167,47 @@ func NewWithConfig(cfg LoggerConfig) *Logger { } return l } - consoleOutput.FormatMessage = func(i interface{}) string { - return fmt.Sprintf("\x1b[1m%s\x1b[0m", i) - } - - var multi zerolog.LevelWriter +} - if cfg.Quiet { - // Quiet mode: only write to broadcaster (if available), not to console - if cfg.Broadcaster != nil { - // Create a simple console writer for the broadcaster (without stdout) - broadcasterOutput := zerolog.ConsoleWriter{Out: cfg.Broadcaster, TimeFormat: "15:04:05", NoColor: true} - multi = zerolog.MultiLevelWriter(broadcasterOutput) - } else { - // No broadcaster and quiet mode = discard all logs - multi = zerolog.MultiLevelWriter(zerolog.ConsoleWriter{Out: io.Discard}) - } - } else { - // Normal mode: write to console and broadcaster - if cfg.Broadcaster != nil { - multi = zerolog.MultiLevelWriter(consoleOutput, cfg.Broadcaster) - } else { - multi = zerolog.MultiLevelWriter(consoleOutput) +// getMessageFormatter returns the appropriate message formatter based on output configuration +func getMessageFormatter(output OutputConfig) func(interface{}) string { + if !output.Colors || output.NoColor { + return func(i interface{}) string { + return fmt.Sprintf("%s", i) } } - logLevel := zerolog.InfoLevel - if cfg.Debug { - logLevel = zerolog.DebugLevel + return func(i interface{}) string { + return fmt.Sprintf("\x1b[1m%s\x1b[0m", i) } +} - z := zerolog.New(multi).Level(logLevel).With().Timestamp().Logger() +// New creates a new logger with the same configuration as the current logger but with different debug and broadcaster settings +func (l *Logger) New(debug bool, broadcaster io.Writer) *Logger { + cfg := l.config + cfg.Debug = debug + cfg.Broadcaster = broadcaster + cfg.Quiet = false + return NewWithConfig(cfg) +} + +// WithOutput returns a new logger with modified output configuration +func (l *Logger) WithOutput(output OutputConfig) *Logger { + cfg := l.config + cfg.Output = output + return NewWithConfig(cfg) +} + +// WithQuiet returns a new logger with quiet mode enabled/disabled +func (l *Logger) WithQuiet(quiet bool) *Logger { + cfg := l.config + cfg.Quiet = quiet + return NewWithConfig(cfg) +} - return &Logger{z: z, quiet: cfg.Quiet} +// GetConfig returns the current logger configuration +func (l *Logger) GetConfig() LoggerConfig { + return l.config } // IsQuiet returns whether the logger is in quiet mode diff --git a/scripts/build/build.go b/scripts/build/build.go index 3dac66c..860d325 100644 --- a/scripts/build/build.go +++ b/scripts/build/build.go @@ -105,6 +105,23 @@ func (ctx *BuildContext) checkPath(logger *Logger) error { return ctx.ensureProjectRoot(logger) } +// clear console screen +func ClearScreen() { + var cmd *exec.Cmd + + switch runtime.GOOS { + case "windows": + // Windows: use cmd /c cls + cmd = exec.Command("cmd", "/c", "cls") + default: + // Linux, macOS, and others: use clear command + cmd = exec.Command("clear") + } + + cmd.Stdout = os.Stdout + cmd.Run() +} + // ensureProjectRoot finds the project root and changes to it if needed func (ctx *BuildContext) ensureProjectRoot(logger *Logger) error { currentDir, err := os.Getwd() @@ -679,6 +696,8 @@ func setupSignalHandler(cancel context.CancelFunc) { // main function func main() { + ClearScreen() + // Parse command line flags var ( timeoutSeconds = flag.Int("timeout", 10, "Timeout for user prompts in seconds") From 83ad7992817b448890a5afcfc6226ffa57736d9c Mon Sep 17 00:00:00 2001 From: "Gab." Date: Thu, 26 Mar 2026 17:25:14 +0700 Subject: [PATCH 10/18] refactor: remove duplicate blueprint documentation sections Removed duplicate "2.4 Step-by-Step Application Execution" section from blueprint documentation. The section was repeated verbatim, causing redundancy and potential confusion for developers referencing the architecture documentation. --- docs_wiki/blueprint/blueprint.txt | 145 +++++++++++++++++++ internal/server/server.go | 233 ++++++++++++------------------ 2 files changed, 235 insertions(+), 143 deletions(-) diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index 05169ef..e44a855 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -36,6 +36,7 @@ Client → Handler → Request Binding → Validation → Business Logic → Res - **modules/**: Individual service implementations - **pkg/**: Reusable packages (public) - **request/**: Request handling and validation + - **logger/**: Modular logging utilities with separated configuration - **response/**: Standardized API responses - **tui/**: Terminal User Interface - **infrastructure/**: External service integrations @@ -52,6 +53,46 @@ Client → Handler → Request Binding → Validation → Business Logic → Res 5. **Logic** → response.Success or response.Error 6. **Response** → JSON Response to Client +### 2.4 Step-by-Step Application Execution + +The application follows a structured step-by-step execution pattern inspired by the build system: + +**Execution Flow:** +1. **Configuration Loading** → Load and validate application configuration +2. **Banner Display** → Show application banner and version +3. **Port Validation** → Check service and monitoring port availability +4. **Logger Initialization** → Initialize modular logging system +5. **Infrastructure Setup** → Initialize async infrastructure components +6. **Service Registration** → Register and initialize enabled services +7. **Server Startup** → Start HTTP servers and begin serving requests + +**Key Features:** +- **Modular Steps**: Each initialization phase is a separate, testable function +- **Error Handling**: Graceful failure with detailed error messages at each step +- **Progress Feedback**: Clear step-by-step progress indication +- **Configuration-Driven**: All steps respect configuration settings (TUI enable/disable) +- **Graceful Shutdown**: Proper cleanup of all resources on termination + +### 2.4 Step-by-Step Application Execution + +The application follows a structured step-by-step execution pattern inspired by the build system: + +**Execution Flow:** +1. **Configuration Loading** → Load and validate application configuration +2. **Banner Display** → Show application banner and version +3. **Port Validation** → Check service and monitoring port availability +4. **Logger Initialization** → Initialize modular logging system +5. **Infrastructure Setup** → Initialize async infrastructure components +6. **Service Registration** → Register and initialize enabled services +7. **Server Startup** → Start HTTP servers and begin serving requests + +**Key Features:** +- **Modular Steps**: Each initialization phase is a separate, testable function +- **Error Handling**: Graceful failure with detailed error messages at each step +- **Progress Feedback**: Clear step-by-step progress indication +- **Configuration-Driven**: All steps respect configuration settings (TUI enable/disable) +- **Graceful Shutdown**: Proper cleanup of all resources on termination + ## 3. API STRUCTURE ### 3.1 Standardized Response Format @@ -993,6 +1034,110 @@ scripts\change_package.bat github.com/new-org/new-project - **Backup Files**: Remove `.bak` files after verifying changes (Unix/Linux/macOS) - **IDE Restart**: May need to restart IDE/editor after module name changes +### 2.5 Generic Stepping Utility + +**Purpose:** Provides a reusable, generic stepping pattern for step-by-step initialization across the entire application. + +**Location:** `pkg/utils/stepper.go` + +**Key Features:** +- **Generic Design**: Works with any context type through `StepContext` interface +- **Type Safety**: Provides type-safe wrapper functions for specific use cases +- **Consistent Logging**: Uniform progress tracking and error handling +- **Reusable Pattern**: Can be used by any component requiring step-by-step initialization +- **Error Handling**: Each step can fail independently with clear error messages + +**Core Types:** +```go +// Generic context interface +type StepContext interface{} + +// Step function type +type StepFunc func(StepContext) error + +// Step definition +type Step struct { + Name string + Fn StepFunc +} + +// Generic execution function +func ExecuteSteps(ctx StepContext, steps []Step) error +``` + +**Type-Safe Wrappers:** +```go +// For application steps +func ExecuteAppSteps(ctx interface{}, steps interface{}) error + +// For server steps +func ExecuteServerSteps(ctx interface{}, steps interface{}) error +``` + +**Usage Examples:** + +**Application Startup (cmd/app/main.go):** +```go +type AppContext struct { + Config *config.Config + Logger *logger.Logger + Broadcaster *monitoring.LogBroadcaster +} + +type AppStep struct { + Name string + Fn func(*AppContext) error +} + +steps := []AppStep{ + {"Loading configuration", loadConfigStep}, + {"Validating configuration", validateConfigStep}, + {"Starting application", startAppStep}, +} + +if err := utils.ExecuteAppSteps(ctx, steps); err != nil { + return err +} +``` + +**Server Startup (internal/server/server.go):** +```go +type ServerContext struct { + Server *Server + Config *config.Config + Logger *logger.Logger + Dependencies *registry.Dependencies + InfraInitManager *infrastructure.InfraInitManager +} + +type ServerStep struct { + Name string + Fn func(*ServerContext) error +} + +steps := []ServerStep{ + {"Initializing Infrastructure", initInfrastructureStep}, + {"Creating Dependencies", createDependenciesStep}, + {"Starting HTTP Server", startHTTPServerStep}, +} + +if err := utils.ExecuteServerSteps(ctx, steps); err != nil { + return err +} +``` + +**Benefits:** +- **Code Reusability**: Same pattern used across different components +- **Consistency**: Uniform initialization approach throughout the application +- **Maintainability**: Single implementation to maintain and improve +- **Debugging**: Clear step-by-step progress tracking during startup +- **Error Isolation**: Each step fails independently with specific error messages + +**Integration Points:** +- **Application Startup**: `cmd/app/main.go` uses `ExecuteAppSteps` +- **Server Startup**: `internal/server/server.go` uses `ExecuteServerSteps` +- **Future Extensions**: Any component can use `ExecuteSteps` directly + ### 8.2 Infrastructure Integration #### 8.2.1 Global Singleton Afero Manager diff --git a/internal/server/server.go b/internal/server/server.go index f1ee5a9..67017e0 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -35,20 +35,15 @@ func New(cfg *config.Config, l *logger.Logger, b *monitoring.LogBroadcaster) *Se e := echo.New() e.HideBanner = true e.HidePort = true - - // Enable GZIP compression for all responses e.Use(echoMiddleware.Gzip()) - // Custom HTTP Error Handler for JSON responses e.HTTPErrorHandler = func(err error, c echo.Context) { l.Error("HTTP Error", err) - // Handle HTTP errors with JSON response if he, ok := err.(*echo.HTTPError); ok { var message string code := he.Code - // Custom message for 404 Not Found if code == 404 { message = "Endpoint not found. This incident will be reported." response.Error(c, code, "ENDPOINT_NOT_FOUND", message, map[string]interface{}{ @@ -58,7 +53,6 @@ func New(cfg *config.Config, l *logger.Logger, b *monitoring.LogBroadcaster) *Se return } - // For other HTTP errors, use the original message if it's a string if msg, ok := he.Message.(string); ok { message = msg } else { @@ -68,7 +62,6 @@ func New(cfg *config.Config, l *logger.Logger, b *monitoring.LogBroadcaster) *Se return } - // For non-HTTP errors, return internal server error response.InternalServerError(c, "An unexpected error occurred") } @@ -81,86 +74,35 @@ func New(cfg *config.Config, l *logger.Logger, b *monitoring.LogBroadcaster) *Se } func (s *Server) Start() error { - // Initialize async infrastructure manager s.infraInitManager = infrastructure.NewInfraInitManager(s.logger) - // 1. Start Async Infrastructure Initialization (doesn't block) s.logger.Info("Starting async infrastructure initialization...") redisManager, kafkaManager, _, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager := s.infraInitManager.StartAsyncInitialization(s.config, s.logger) - // Create dependencies container s.dependencies = registry.NewDependencies( - redisManager, - kafkaManager, - nil, // Will be set from connection manager - postgresConnectionManager, - nil, // Will be set from connection manager - mongoConnectionManager, - grafanaManager, - cronManager, + redisManager, kafkaManager, nil, postgresConnectionManager, nil, mongoConnectionManager, grafanaManager, cronManager, ) - // Set default connections for backward compatibility - if postgresConnectionManager != nil { - if defaultConn, exists := postgresConnectionManager.GetDefaultConnection(); exists { - s.dependencies.PostgresManager = defaultConn - } - } - if mongoConnectionManager != nil { - if defaultConn, exists := mongoConnectionManager.GetDefaultConnection(); exists { - s.dependencies.MongoManager = defaultConn - } - } + s.setConnectionDefaults(postgresConnectionManager, mongoConnectionManager) - // 2. Init Middleware (synchronous, lightweight) s.logger.Info("Initializing Middleware...") middleware.InitMiddlewares(s.echo, middleware.Config{ AuthType: s.config.Auth.Type, Logger: s.logger, }) - // Add encryption middleware if enabled if s.config.Encryption.Enabled { s.logger.Info("Initializing Encryption Middleware...") s.echo.Use(middleware.EncryptionMiddleware(s.config, s.logger)) } - // 3. Init Services (phased: independent first, then infrastructure-dependent) s.logger.Info("Booting Services...") serviceRegistry := registry.NewServiceRegistry(s.logger) - // Health Check Endpoint with infrastructure status - s.echo.GET("/health", func(c echo.Context) error { - health := map[string]interface{}{ - "status": "ok", - "server_ready": true, - "infrastructure": s.infraInitManager.GetStatus(), - "initialization_progress": s.infraInitManager.GetInitializationProgress(), - } - return response.Success(c, health) - }) - - // Infrastructure status endpoint - s.echo.GET("/health/infrastructure", func(c echo.Context) error { - status := s.infraInitManager.GetStatus() - return response.Success(c, status) - }) - - // Restart Endpoint (Maintenance) - s.echo.POST("/restart", func(c echo.Context) error { - go func() { - time.Sleep(500 * time.Millisecond) - os.Exit(1) - }() - return response.Success(c, map[string]string{"status": "restarting", "message": "Service is restarting..."}) - }) - - // Auto-discover and register all services - s.logger.Info("Auto-discovering services...") + s.registerHealthEndpoints() services := registry.AutoDiscoverServices(s.config, s.logger, s.dependencies) - // Register services with the registry for _, service := range services { serviceRegistry.Register(service) } @@ -169,33 +111,15 @@ func (s *Server) Start() error { s.logger.Warn("No services registered!") } - // Boot all services serviceRegistry.Boot(s.echo) s.logger.Info("All services boot successfully, ready to start monitoring") - // 4. Start Monitoring (if enabled) - after all services are registered if s.config.Monitoring.Enabled { - // Dynamic Service List Generation - var servicesList []monitoring.ServiceInfo - for _, srv := range serviceRegistry.GetServices() { - // Prepend /api/v1 to endpoints - var fullEndpoints []string - for _, endp := range srv.Endpoints() { - fullEndpoints = append(fullEndpoints, "/api/v1"+endp) - } - - servicesList = append(servicesList, monitoring.ServiceInfo{ - Name: srv.Name(), - StructName: reflect.TypeOf(srv).Elem().String(), - Active: srv.Enabled(), - Endpoints: fullEndpoints, - }) - } + servicesList := s.buildServicesList(serviceRegistry) go monitoring.Start(s.config.Monitoring, s.config, s, s.broadcaster, redisManager, s.dependencies.PostgresManager, postgresConnectionManager, s.dependencies.MongoManager, mongoConnectionManager, kafkaManager, cronManager, servicesList, s.logger) s.logger.Info("Monitoring interface started", "port", s.config.Monitoring.Port, "services_count", len(servicesList)) } - // 5. Start HTTP Server immediately (doesn't wait for infrastructure) port := s.config.Server.Port s.logger.Info("HTTP server starting immediately", "port", port, "env", s.config.App.Env) s.logger.Info("Infrastructure components initializing in background...") @@ -203,23 +127,81 @@ func (s *Server) Start() error { return s.echo.Start(":" + port) } +func (s *Server) setConnectionDefaults(postgresConnectionManager *infrastructure.PostgresConnectionManager, mongoConnectionManager *infrastructure.MongoConnectionManager) { + if postgresConnectionManager != nil { + if defaultConn, exists := postgresConnectionManager.GetDefaultConnection(); exists { + s.dependencies.PostgresManager = defaultConn + } + } + if mongoConnectionManager != nil { + if defaultConn, exists := mongoConnectionManager.GetDefaultConnection(); exists { + s.dependencies.MongoManager = defaultConn + } + } +} + +func (s *Server) registerHealthEndpoints() { + s.echo.GET("/health", func(c echo.Context) error { + return response.Success(c, map[string]interface{}{ + "status": "ok", + "server_ready": true, + "infrastructure": s.infraInitManager.GetStatus(), + "initialization_progress": s.infraInitManager.GetInitializationProgress(), + }) + }) + + s.echo.GET("/health/infrastructure", func(c echo.Context) error { + return response.Success(c, s.infraInitManager.GetStatus()) + }) + + s.echo.POST("/restart", func(c echo.Context) error { + go func() { + time.Sleep(500 * time.Millisecond) + os.Exit(1) + }() + return response.Success(c, map[string]string{"status": "restarting", "message": "Service is restarting..."}) + }) +} + +func (s *Server) buildServicesList(serviceRegistry *registry.ServiceRegistry) []monitoring.ServiceInfo { + var servicesList []monitoring.ServiceInfo + for _, srv := range serviceRegistry.GetServices() { + var fullEndpoints []string + for _, endp := range srv.Endpoints() { + fullEndpoints = append(fullEndpoints, "/api/v1"+endp) + } + + servicesList = append(servicesList, monitoring.ServiceInfo{ + Name: srv.Name(), + StructName: reflect.TypeOf(srv).Elem().String(), + Active: srv.Enabled(), + Endpoints: fullEndpoints, + }) + } + return servicesList +} + // GetStatus satisfies monitoring.StatusProvider func (s *Server) GetStatus() map[string]interface{} { diskStats, _ := utils.GetDiskUsage() netStats, _ := utils.GetNetworkInfo() + checkEnabled := func(enabled bool, manager interface{}) bool { + return enabled && s.dependencies != nil && manager != nil + } + infra := map[string]bool{ - "redis": s.config.Redis.Enabled && s.dependencies != nil && s.dependencies.RedisManager != nil, - "kafka": s.config.Kafka.Enabled && s.dependencies != nil && s.dependencies.KafkaManager != nil, - "postgres": (s.config.Postgres.Enabled || s.config.PostgresMultiConfig.Enabled) && (s.dependencies != nil && s.dependencies.PostgresManager != nil), - "mongo": (s.config.Mongo.Enabled || s.config.MongoMultiConfig.Enabled) && (s.dependencies != nil && s.dependencies.MongoManager != nil), - "grafana": s.config.Grafana.Enabled && s.dependencies != nil && s.dependencies.GrafanaManager != nil, - "cron": s.config.Cron.Enabled && s.dependencies != nil && s.dependencies.CronManager != nil, + "redis": checkEnabled(s.config.Redis.Enabled, s.dependencies.RedisManager), + "kafka": checkEnabled(s.config.Kafka.Enabled, s.dependencies.KafkaManager), + "postgres": checkEnabled(s.config.Postgres.Enabled || s.config.PostgresMultiConfig.Enabled, s.dependencies.PostgresManager), + "mongo": checkEnabled(s.config.Mongo.Enabled || s.config.MongoMultiConfig.Enabled, s.dependencies.MongoManager), + "grafana": checkEnabled(s.config.Grafana.Enabled, s.dependencies.GrafanaManager), + "cron": checkEnabled(s.config.Cron.Enabled, s.dependencies.CronManager), } return map[string]interface{}{ "version": "1.0.0", - "services": s.config.Services, // Dynamic map from config + "services": s.config.Services, "infrastructure": infra, "system": map[string]interface{}{ "disk": diskStats, @@ -232,85 +214,50 @@ func (s *Server) GetStatus() map[string]interface{} { func (s *Server) Shutdown(ctx context.Context, logger *logger.Logger) error { logger.Info("Starting graceful shutdown of infrastructure...") - // Force shutdown when more 10s go func() { - warnTimeout := "Maximum shutdown time is 20s, force shutdown when timeout." - warnForce := "Graceful shutdown timed out, force shutdown." - duration := 10 * time.Second - + time.Sleep(10 * time.Second) if logger != nil { - logger.Warn(warnTimeout) - time.Sleep(duration) - logger.Fatal(warnForce, nil) + logger.Warn("Maximum shutdown time is 20s, force shutdown when timeout.") + logger.Fatal("Graceful shutdown timed out, force shutdown.", nil) } - - fmt.Println(warnTimeout) - time.Sleep(duration) + fmt.Println("Maximum shutdown time is 20s, force shutdown when timeout.") os.Exit(1) - }() - // Stop async initialization manager if s.infraInitManager != nil { logger.Info("Stopping async infrastructure initialization manager...") - // Note: InfraInitManager doesn't have a Close method, but we can signal completion } - // Shutdown infrastructure components in reverse order of initialization var shutdownErrors []error - // 1. Cron Manager - if s.dependencies != nil && s.dependencies.CronManager != nil { - logger.Info("Shutting down Cron Manager...") - if err := s.dependencies.CronManager.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("cron manager shutdown error: %w", err)) - logger.Error("Error shutting down Cron Manager", err) - } else { - logger.Info("Cron Manager shut down successfully") + shutdownComponent := func(name string, closer interface{}) { + if closer == nil { + return + } + logger.Info("Shutting down " + name + "...") + if closerCloser, ok := closer.(interface{ Close() error }); ok { + if err := closerCloser.Close(); err != nil { + shutdownErrors = append(shutdownErrors, fmt.Errorf("%s shutdown error: %w", name, err)) + logger.Error("Error shutting down "+name, err) + } else { + logger.Info(name + " shut down successfully") + } } } - // 2. MongoDB connections - need to get from connection manager - // Note: We don't have direct access to connection managers anymore, - // but they should be closed by the infra init manager + shutdownComponent("Cron Manager", s.dependencies.CronManager) logger.Info("MongoDB connections will be closed by infrastructure manager") - - // 3. PostgreSQL connections - need to get from connection manager - // Note: We don't have direct access to connection managers anymore, - // but they should be closed by the infra init manager logger.Info("PostgreSQL connections will be closed by infrastructure manager") + shutdownComponent("Kafka Manager", s.dependencies.KafkaManager) + shutdownComponent("Redis Manager", s.dependencies.RedisManager) - // 4. Kafka Manager - if s.dependencies != nil && s.dependencies.KafkaManager != nil { - logger.Info("Shutting down Kafka Manager...") - if err := s.dependencies.KafkaManager.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("kafka manager shutdown error: %w", err)) - logger.Error("Error shutting down Kafka Manager", err) - } else { - logger.Info("Kafka Manager shut down successfully") - } - } - - // 5. Redis Manager - if s.dependencies != nil && s.dependencies.RedisManager != nil { - logger.Info("Shutting down Redis Manager...") - if err := s.dependencies.RedisManager.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("redis manager shutdown error: %w", err)) - logger.Error("Error shutting down Redis Manager", err) - } else { - logger.Info("Redis Manager shut down successfully") - } - } - - // Log shutdown summary if len(shutdownErrors) > 0 { logger.Warn("Graceful shutdown completed with errors", "error_count", len(shutdownErrors)) for _, err := range shutdownErrors { logger.Error("Shutdown error", err) } return fmt.Errorf("shutdown completed with %d errors", len(shutdownErrors)) - } else { - logger.Info("Graceful shutdown completed successfully") - return nil } + logger.Info("Graceful shutdown completed successfully") + return nil } From f26f120495bc595949072b70828331a935a5c828 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Thu, 26 Mar 2026 19:40:15 +0700 Subject: [PATCH 11/18] refactor: simplify main function and implement dependency injection pattern Reorganized the application initialization to use a cleaner dependency injection approach. Removed the complex step-by-step initialization pattern and replaced it with a more straightforward structure using ConfigManager and Application types. This improves code readability and maintainability while preserving all existing functionality. --- cmd/app/application.go | 315 +++++++++++++++++++ cmd/app/config_manager.go | 177 +++++++++++ cmd/app/constants.go | 160 ++++++++++ cmd/app/main.go | 493 +----------------------------- docs_wiki/blueprint/blueprint.txt | 204 ++++++++++--- scripts/build/build.go | 2 +- 6 files changed, 822 insertions(+), 529 deletions(-) create mode 100644 cmd/app/application.go create mode 100644 cmd/app/config_manager.go create mode 100644 cmd/app/constants.go diff --git a/cmd/app/application.go b/cmd/app/application.go new file mode 100644 index 0000000..1ea319f --- /dev/null +++ b/cmd/app/application.go @@ -0,0 +1,315 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "os/signal" + "stackyard/config" + "stackyard/internal/monitoring" + "stackyard/internal/server" + "stackyard/pkg/logger" + "stackyard/pkg/tui" + "stackyard/pkg/utils" + "syscall" + "time" +) + +// Application represents the main application with all its dependencies +type Application struct { + configManager *ConfigManager + config *config.Config + logger *logger.Logger + broadcaster *monitoring.LogBroadcaster + bannerText string +} + +// NewApplication creates a new application instance +func NewApplication(configManager *ConfigManager) *Application { + return &Application{ + configManager: configManager, + } +} + +// Run executes the application lifecycle +func (app *Application) Run() error { + // Clear the terminal screen for a fresh start + utils.ClearScreen() + + // Execute initialization steps + steps := []AppStep{ + {"Loading configuration", app.loadConfigStep}, + {"Validating configuration", app.validateConfigStep}, + {"Loading banner", app.loadBannerStep}, + {"Checking port availability", app.checkPortStep}, + {"Initializing logger", app.initLoggerStep}, + {"Initializing broadcaster", app.initBroadcasterStep}, + {"Starting application", app.startAppStep}, + } + + ctx := &AppContext{ + Timestamp: time.Now().Format("20060102_150405"), + ConfigURL: app.configManager.configURL, + } + + if err := executeSteps(ctx, steps); err != nil { + return fmt.Errorf("%s: %w", ErrStepFailed, err) + } + + return nil +} + +// executeSteps executes the provided steps in sequence with error handling +func executeSteps(ctx *AppContext, steps []AppStep) error { + for i, step := range steps { + stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) + fmt.Printf("[%s] %s\n", stepNum, step.Name) + + if err := step.Fn(ctx); err != nil { + return fmt.Errorf("step failed: %w", err) + } + } + return nil +} + +// Step functions for the initialization process + +// loadConfigStep loads configuration from local file or URL +func (app *Application) loadConfigStep(ctx *AppContext) error { + cfg, err := app.configManager.LoadConfig() + if err != nil { + return err + } + app.config = cfg + return nil +} + +// validateConfigStep validates the loaded configuration +func (app *Application) validateConfigStep(ctx *AppContext) error { + return app.configManager.ValidateConfig(app.config) +} + +// loadBannerStep loads banner text from file if configured +func (app *Application) loadBannerStep(ctx *AppContext) error { + bannerText, err := app.configManager.LoadBanner(app.config) + if err != nil { + return err + } + app.bannerText = bannerText + return nil +} + +// checkPortStep checks port availability +func (app *Application) checkPortStep(ctx *AppContext) error { + return utils.CheckPortAvailability(app.config.Server.Port, app.config.Monitoring.Port, app.config.Monitoring.Enabled) +} + +// initLoggerStep initializes the logger +func (app *Application) initLoggerStep(ctx *AppContext) error { + if app.config.App.EnableTUI { + // For TUI mode, logger will be initialized later when we have the broadcaster + return nil + } + + // For console mode, create a regular logger + app.logger = logger.New(app.config.App.Debug, nil) + app.logger.Info("Starting Application", "name", app.config.App.Name, "env", app.config.App.Env) + app.logger.Info("TUI mode disabled, using traditional console logging") + app.logger.Info("Initializing services...") + + return nil +} + +// initBroadcasterStep initializes the log broadcaster +func (app *Application) initBroadcasterStep(ctx *AppContext) error { + app.broadcaster = monitoring.NewLogBroadcaster() + return nil +} + +// startAppStep starts the application based on TUI mode +func (app *Application) startAppStep(ctx *AppContext) error { + if app.config.App.EnableTUI { + app.runWithTUI() + } else { + app.runWithConsole() + } + return nil +} + +// runWithTUI runs the application with fancy TUI interface +func (app *Application) runWithTUI() { + // Configure monitoring port for TUI + if !app.config.Monitoring.Enabled { + app.config.Monitoring.Port = "disabled" + } + + // Setup TUI configuration + tuiConfig := tui.StartupConfig{ + AppName: app.config.App.Name, + AppVersion: app.config.App.Version, + Banner: app.bannerText, + Port: app.config.Server.Port, + MonitorPort: app.config.Monitoring.Port, + Env: app.config.App.Env, + IdleSeconds: app.config.App.StartupDelay, + } + + // Create service initialization queue + initQueue := app.configManager.CreateServiceQueue(app.config) + + // Convert to tui.ServiceInit + tuiInitQueue := make([]tui.ServiceInit, len(initQueue)) + for i, svc := range initQueue { + tuiInitQueue[i] = tui.ServiceInit{ + Name: svc.Name, + Enabled: svc.Enabled, + InitFunc: svc.InitFunc, + } + } + + // Run the boot sequence TUI + _, _ = tui.RunBootSequence(tuiConfig, tuiInitQueue) + + // Create and start Live TUI + liveTUI := app.createLiveTUI() + liveTUI.Start() + + // Initialize logger with TUI output + multiWriter := io.MultiWriter(liveTUI, app.broadcaster) + app.logger = logger.NewQuiet(app.config.App.Debug, multiWriter) + + // Add initial logs + liveTUI.AddLog(LogLevelInfo, "Server starting on port "+app.config.Server.Port) + liveTUI.AddLog(LogLevelInfo, "Environment: "+app.config.App.Env) + + // Start server + srv := server.New(app.config, app.logger, app.broadcaster) + go func() { + liveTUI.AddLog(LogLevelInfo, "HTTP server listening...") + if err := srv.Start(); err != nil { + liveTUI.AddLog(LogLevelFatal, "Server error: "+err.Error()) + } + }() + + // Wait for server to start + time.Sleep(StartupDelay) + liveTUI.AddLog(LogLevelInfo, "Server ready at http://localhost:"+app.config.Server.Port) + if app.config.Monitoring.Enabled { + liveTUI.AddLog(LogLevelInfo, "Monitoring at http://localhost:"+app.config.Monitoring.Port) + } + + // Handle shutdown + app.handleShutdown(liveTUI, srv) +} + +// runWithConsole runs the application with traditional console logging +func (app *Application) runWithConsole() { + // Print banner to console + if app.bannerText != "" { + fmt.Print(ColorPurple) + fmt.Println(app.bannerText) + fmt.Print(ColorReset) + } + + // Initialize logger + app.logger = logger.New(app.config.App.Debug, app.broadcaster) + + // Log startup information + app.logger.Info("Starting Application", "name", app.config.App.Name, "env", app.config.App.Env) + app.logger.Info("TUI mode disabled, using traditional console logging") + app.logger.Info("Initializing services...") + + // Log all services + app.logAllServices() + + // Start server + srv := server.New(app.config, app.logger, app.broadcaster) + go func() { + app.logger.Info("HTTP server listening", "port", app.config.Server.Port) + if err := srv.Start(); err != nil { + app.logger.Fatal("Server error", err) + } + }() + + // Wait for server to start + time.Sleep(StartupDelay) + app.logger.Info("Server ready", "url", "http://localhost:"+app.config.Server.Port) + if app.config.Monitoring.Enabled { + time.Sleep(StartupDelay) + app.logger.Info("Monitoring dashboard", "url", "http://localhost:"+app.config.Monitoring.Port) + } + + // Handle shutdown + app.handleConsoleShutdown(srv) +} + +// createLiveTUI creates and configures the Live TUI +func (app *Application) createLiveTUI() *tui.LiveTUI { + return tui.NewLiveTUI(tui.LiveConfig{ + AppName: app.config.App.Name, + AppVersion: app.config.App.Version, + Banner: app.bannerText, + Port: app.config.Server.Port, + MonitorPort: app.config.Monitoring.Port, + Env: app.config.App.Env, + OnShutdown: utils.TriggerShutdown, + }) +} + +// handleShutdown handles graceful shutdown for TUI mode +func (app *Application) handleShutdown(liveTUI *tui.LiveTUI, srv *server.Server) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + select { + case <-sigChan: + liveTUI.AddLog(LogLevelWarn, "Shutting down...") + srv.Shutdown(context.Background(), app.logger) + case <-utils.ShutdownChan: + liveTUI.AddLog(LogLevelWarn, "Shutting down...") + srv.Shutdown(context.Background(), app.logger) + } + + liveTUI.Stop() + time.Sleep(ShutdownDelay) + os.Exit(0) +} + +// handleConsoleShutdown handles graceful shutdown for console mode +func (app *Application) handleConsoleShutdown(srv *server.Server) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + <-sigChan + + app.logger.Warn("Shutting down...") + srv.Shutdown(context.Background(), app.logger) + time.Sleep(ShutdownDelay) + os.Exit(0) +} + +// logAllServices logs the status of all services +func (app *Application) logAllServices() { + // Log infrastructure services + serviceConfigs := app.configManager.GetServiceConfigs(app.config) + for _, svc := range serviceConfigs { + app.logServiceStatus(svc.Name, svc.Enabled) + } + + // Log application services + for name, enabled := range app.config.Services { + app.logServiceStatus("Service: "+name, enabled) + } + + // Log monitoring + app.logServiceStatus(ServiceMonitoringName, app.config.Monitoring.Enabled) +} + +// logServiceStatus logs whether a service is enabled or skipped +func (app *Application) logServiceStatus(name string, enabled bool) { + if enabled { + app.logger.Info("Service initialized", "service", name, "status", ServiceStatusEnabled.String()) + } else { + app.logger.Debug("Service skipped", "service", name, "status", ServiceStatusDisabled.String()) + } +} diff --git a/cmd/app/config_manager.go b/cmd/app/config_manager.go new file mode 100644 index 0000000..cb2adbd --- /dev/null +++ b/cmd/app/config_manager.go @@ -0,0 +1,177 @@ +package main + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + "stackyard/config" + "stackyard/pkg/utils" +) + +// ConfigManager handles all configuration loading and validation +type ConfigManager struct { + configURL string +} + +// NewConfigManager creates a new configuration manager +func NewConfigManager(configURL string) *ConfigManager { + return &ConfigManager{ + configURL: configURL, + } +} + +// LoadConfig loads configuration from local file or URL +func (cm *ConfigManager) LoadConfig() (*config.Config, error) { + if cm.configURL != "" { + return cm.loadConfigFromURL(cm.configURL) + } + return cm.loadConfigFromFile() +} + +// loadConfigFromURL loads configuration from a URL +func (cm *ConfigManager) loadConfigFromURL(configURL string) (*config.Config, error) { + fmt.Printf("Loading config from URL: %s\n", configURL) + + // Validate URL format + if _, err := url.ParseRequestURI(configURL); err != nil { + return nil, fmt.Errorf("%s: %w", ErrInvalidConfigURLFormat, err) + } + + // Load config from URL + if err := utils.LoadConfigFromURL(configURL); err != nil { + return nil, fmt.Errorf("failed to load config from URL: %w", err) + } + + // Parse the loaded configuration + cfg, err := config.LoadConfigWithURL(configURL) + if err != nil { + return nil, fmt.Errorf("failed to parse config from URL: %w", err) + } + + return cfg, nil +} + +// loadConfigFromFile loads configuration from local file +func (cm *ConfigManager) loadConfigFromFile() (*config.Config, error) { + cfg, err := config.LoadConfig() + if err != nil { + return nil, fmt.Errorf("failed to load config: %w", err) + } + return cfg, nil +} + +// ValidateConfig validates the loaded configuration +func (cm *ConfigManager) ValidateConfig(cfg *config.Config) error { + // Check if web folder exists, if not, disable web monitoring + if _, err := os.Stat(WebFolderPath); os.IsNotExist(err) { + fmt.Printf("%s %s%s\n", ColorYellow, ErrWebFolderNotFound, ColorReset) + cfg.Monitoring.Enabled = false + } + + // Validate port availability + if err := utils.CheckPortAvailability(cfg.Server.Port, cfg.Monitoring.Port, cfg.Monitoring.Enabled); err != nil { + return fmt.Errorf("%s: %w", ErrPortError, err) + } + + return nil +} + +// LoadBanner loads banner text from file if configured +func (cm *ConfigManager) LoadBanner(cfg *config.Config) (string, error) { + if cfg.App.BannerPath == "" { + return "", nil + } + + bannerPath := cfg.App.BannerPath + if !filepath.IsAbs(bannerPath) { + bannerPath = filepath.Join(".", bannerPath) + } + + banner, err := os.ReadFile(bannerPath) + if err != nil { + // Return empty string if banner file doesn't exist or can't be read + return "", nil + } + + return string(banner), nil +} + +// GetServiceConfigs returns a unified list of all service configurations +func (cm *ConfigManager) GetServiceConfigs(cfg *config.Config) []ServiceConfig { + return []ServiceConfig{ + {Name: ServiceGrafanaName, Enabled: cfg.Grafana.Enabled}, + {Name: ServiceMinIOName, Enabled: cfg.Monitoring.MinIO.Enabled}, + {Name: ServiceRedisCacheName, Enabled: cfg.Redis.Enabled}, + {Name: ServiceKafkaName, Enabled: cfg.Kafka.Enabled}, + {Name: ServicePostgreSQLName, Enabled: cfg.Postgres.Enabled}, + {Name: ServiceMongoDBName, Enabled: cfg.Mongo.Enabled}, + {Name: ServiceCronName, Enabled: cfg.Cron.Enabled}, + {Name: ServiceExternalName, Enabled: len(cfg.Monitoring.External.Services) > 0}, + } +} + +// CreateServiceQueue creates the service initialization queue for TUI +func (cm *ConfigManager) CreateServiceQueue(cfg *config.Config) []ServiceInit { + serviceConfigs := cm.GetServiceConfigs(cfg) + + initQueue := []ServiceInit{ + {Name: ServiceConfigName, Enabled: true, InitFunc: nil}, + } + + // Add infrastructure services + for _, svc := range serviceConfigs { + initQueue = append(initQueue, ServiceInit{ + Name: svc.Name, Enabled: svc.Enabled, InitFunc: nil, + }) + } + + initQueue = append(initQueue, ServiceInit{Name: ServiceMiddlewareName, Enabled: true, InitFunc: nil}) + + // Add application services + for name, enabled := range cfg.Services { + initQueue = append(initQueue, ServiceInit{Name: "Service: " + name, Enabled: enabled, InitFunc: nil}) + } + + // Add monitoring last + initQueue = append(initQueue, ServiceInit{Name: ServiceMonitoringName, Enabled: cfg.Monitoring.Enabled, InitFunc: nil}) + + return initQueue +} + +// ValidateStartupDelay validates the startup delay configuration +func (cm *ConfigManager) ValidateStartupDelay(delay int) error { + if delay < MinStartupDelay || delay > MaxStartupDelay { + return fmt.Errorf("startup delay must be between %d and %d seconds", MinStartupDelay, MaxStartupDelay) + } + return nil +} + +// ValidatePort validates a port number +func (cm *ConfigManager) ValidatePort(port string) error { + // Basic validation - port should be numeric and within valid range + // This is a simple validation; more comprehensive validation could be added + if port == "" { + return fmt.Errorf("port cannot be empty") + } + return nil +} + +// GetDefaultConfig returns a default configuration +func (cm *ConfigManager) GetDefaultConfig() *config.Config { + return &config.Config{ + App: config.AppConfig{ + Name: DefaultAppName, + Version: DefaultVersion, + Env: DefaultEnv, + BannerPath: DefaultBannerPath, + StartupDelay: DefaultStartupDelay, + }, + Server: config.ServerConfig{ + Port: DefaultServerPort, + }, + Monitoring: config.MonitoringConfig{ + Port: DefaultMonitoringPort, + }, + } +} diff --git a/cmd/app/constants.go b/cmd/app/constants.go new file mode 100644 index 0000000..ad96e51 --- /dev/null +++ b/cmd/app/constants.go @@ -0,0 +1,160 @@ +package main + +import ( + "time" +) + +// Forward declarations to avoid circular imports +type Config struct{} +type Logger struct{} +type LogBroadcaster struct{} + +// Application constants +const ( + AppName = "stackyard" + DefaultAppName = "Golang App" + DefaultVersion = "1.0.0" + DefaultEnv = "development" + + // Default configuration values + DefaultServerPort = "8080" + DefaultMonitoringPort = "8081" + DefaultStartupDelay = 15 // seconds + DefaultBannerPath = "banner.txt" + + // File paths + WebFolderPath = "web" + + // Service names for logging and initialization + ServiceConfigName = "Configuration" + ServiceMiddlewareName = "Middleware" + ServiceMonitoringName = "Monitoring" + ServiceGrafanaName = "Grafana" + ServiceMinIOName = "MinIO" + ServiceRedisCacheName = "Redis Cache" + ServiceKafkaName = "Kafka Messaging" + ServicePostgreSQLName = "PostgreSQL" + ServiceMongoDBName = "MongoDB" + ServiceCronName = "Cron Scheduler" + ServiceExternalName = "External Services" + + // Color codes for TUI output + ColorPurple = "\033[35m" + ColorReset = "\033[0m" + ColorYellow = "\033[33m" + + // Error messages + ErrInvalidConfigURLFormat = "invalid config URL format" + ErrPortError = "port error" + ErrStepFailed = "step failed" + ErrWebFolderNotFound = "web folder not found, disabling web monitoring" + + // Configuration keys + ConfigKeyWebFolder = "web" +) + +// ServiceInit represents a service in the initialization queue +type ServiceInit struct { + Name string + Enabled bool + InitFunc func() error +} + +// ServiceConfig represents a service with its name and enabled status +type ServiceConfig struct { + Name string + Enabled bool +} + +// AppContext holds the application state throughout initialization +type AppContext struct { + Config *Config + Logger *Logger + Broadcaster *LogBroadcaster + BannerText string + Timestamp string + ConfigURL string +} + +// AppStep represents a single step in the application initialization process +type AppStep struct { + Name string + Fn func(*AppContext) error +} + +// OutputMode represents the output mode for the application +type OutputMode int + +const ( + OutputModeTUI OutputMode = iota + OutputModeConsole +) + +// String returns the string representation of the output mode +func (m OutputMode) String() string { + switch m { + case OutputModeTUI: + return "TUI" + case OutputModeConsole: + return "Console" + default: + return "Unknown" + } +} + +// ServiceStatus represents the status of a service +type ServiceStatus int + +const ( + ServiceStatusEnabled ServiceStatus = iota + ServiceStatusDisabled + ServiceStatusSkipped +) + +// String returns the string representation of the service status +func (s ServiceStatus) String() string { + switch s { + case ServiceStatusEnabled: + return "enabled" + case ServiceStatusDisabled: + return "disabled" + case ServiceStatusSkipped: + return "skipped" + default: + return "unknown" + } +} + +// Duration constants for timeouts and delays +const ( + StartupDelay = 500 * time.Millisecond + ShutdownDelay = 100 * time.Millisecond + PortCheckTimeout = 5 * time.Second + GracefulShutdownTimeout = 30 * time.Second +) + +// Log levels for structured logging +const ( + LogLevelDebug = "debug" + LogLevelInfo = "info" + LogLevelWarn = "warn" + LogLevelError = "error" + LogLevelFatal = "fatal" +) + +// Service types for categorization +const ( + ServiceTypeInfrastructure = "infrastructure" + ServiceTypeApplication = "application" + ServiceTypeMonitoring = "monitoring" +) + +// Configuration validation constants +const ( + MinStartupDelay = 0 + MaxStartupDelay = 300 // 5 minutes + MinPortNumber = 1 + MaxPortNumber = 65535 + MaxPhotoSizeMB = 10 + DefaultPhotoSizeMB = 5 +) diff --git a/cmd/app/main.go b/cmd/app/main.go index ef13a05..ea1824d 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -1,264 +1,30 @@ package main import ( - "context" "flag" "fmt" - "io" "net/url" "os" - "os/signal" - "stackyard/config" - "stackyard/internal/monitoring" - "stackyard/internal/server" - "stackyard/pkg/logger" - "stackyard/pkg/tui" - "stackyard/pkg/utils" - "syscall" - "time" ) -// AppContext holds the application state throughout initialization -type AppContext struct { - Config *config.Config - Logger *logger.Logger - Broadcaster *monitoring.LogBroadcaster - BannerText string - Timestamp string - ConfigURL string // Store the parsed config URL -} - -// AppStep represents a single step in the application initialization process -type AppStep struct { - Name string - Fn func(*AppContext) error -} - -// executeSteps executes the provided steps in sequence with error handling -func executeSteps(ctx *AppContext, steps []AppStep) error { - for i, step := range steps { - stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) - fmt.Printf("[%s] %s\n", stepNum, step.Name) - - if err := step.Fn(ctx); err != nil { - return fmt.Errorf("step failed: %w", err) - } - } - return nil -} - +// main is the entry point of the application func main() { - // Clear the terminal screen for a fresh start - utils.ClearScreen() - - // Parse flags once at the beginning + // Parse command line flags configURL := parseFlags() - // Create app context - ctx := &AppContext{ - Timestamp: time.Now().Format("20060102_150405"), - ConfigURL: configURL, - } + // Create configuration manager + configManager := NewConfigManager(configURL) - // Execute initialization steps - steps := []AppStep{ - {"Loading configuration", loadConfigStep}, - {"Validating configuration", validateConfigStep}, - {"Loading banner", loadBannerStep}, - {"Checking port availability", checkPortStep}, - {"Initializing logger", initLoggerStep}, - {"Initializing broadcaster", initBroadcasterStep}, - {"Starting application", startAppStep}, - } + // Create application with dependency injection + app := NewApplication(configManager) - if err := executeSteps(ctx, steps); err != nil { + // Run application with error handling + if err := app.Run(); err != nil { fmt.Printf("Fatal error: %v\n", err) os.Exit(1) } } -// runWithTUI runs the application with fancy TUI interface -func runWithTUI(cfg *config.Config, bannerText string, broadcaster *monitoring.LogBroadcaster) { - // Configure monitoring port for TUI - if !cfg.Monitoring.Enabled { - cfg.Monitoring.Port = "disabled" - } - - // Setup TUI configuration - tuiConfig := tui.StartupConfig{ - AppName: cfg.App.Name, - AppVersion: cfg.App.Version, - Banner: bannerText, - Port: cfg.Server.Port, - MonitorPort: cfg.Monitoring.Port, - Env: cfg.App.Env, - IdleSeconds: cfg.App.StartupDelay, - } - - // Create service initialization queue - initQueue := createServiceQueue(cfg) - - // Run the boot sequence TUI - _, _ = tui.RunBootSequence(tuiConfig, initQueue) - - // Create and start Live TUI - liveTUI := createLiveTUI(cfg, bannerText) - liveTUI.Start() - - // Initialize logger with TUI output - multiWriter := io.MultiWriter(liveTUI, broadcaster) - l := logger.NewQuiet(cfg.App.Debug, multiWriter) - - // Add initial logs - liveTUI.AddLog("info", "Server starting on port "+cfg.Server.Port) - liveTUI.AddLog("info", "Environment: "+cfg.App.Env) - - // Start server - srv := server.New(cfg, l, broadcaster) - go func() { - liveTUI.AddLog("info", "HTTP server listening...") - if err := srv.Start(); err != nil { - liveTUI.AddLog("fatal", "Server error: "+err.Error()) - } - }() - - // Wait for server to start - time.Sleep(500 * time.Millisecond) - liveTUI.AddLog("info", "Server ready at http://localhost:"+cfg.Server.Port) - if cfg.Monitoring.Enabled { - liveTUI.AddLog("info", "Monitoring at http://localhost:"+cfg.Monitoring.Port) - } - - // Handle shutdown - handleShutdown(liveTUI, srv, l) -} - -// exampleAferoUsage demonstrates how to use the Global Singleton Afero Manager -// This function is commented out as it's for demonstration purposes only -/* -func exampleAferoUsage() { - fmt.Println("=== Global Singleton Afero Manager Example ===") - - // Mock alias configuration - aliasMap := map[string]string{ - "config": "all:config.yaml", - "banner": "all:banner.txt", - "readme": "all:README.md", - "web-app": "all:web/monitoring/index.html", - } - - fmt.Println("Initializing Afero Manager...") - - fmt.Println("✓ Afero Manager initialized") - fmt.Println("✓ Development mode: CopyOnWriteFs (embed.FS + OS overrides)") - fmt.Println("✓ Production mode: ReadOnlyFs (embed.FS only)") - fmt.Println() - - // Show available aliases - fmt.Println("Available aliases:") - for alias, path := range aliasMap { - fmt.Printf(" - %s -> %s\n", alias, path) - } - fmt.Println() - - // Example of checking if files exist - fmt.Println("Checking file existence:") - for alias := range aliasMap { - exists := infrastructure.Exists(alias) - fmt.Printf(" - %s: %v\n", alias, exists) - } - fmt.Println() - - // Example of reading a file - fmt.Println("Reading banner file:") - if content, err := infrastructure.Read("banner"); err == nil { - fmt.Printf(" Content length: %d bytes\n", len(content)) - if len(content) > 100 { - fmt.Printf(" Preview: %s...\n", string(content[:100])) - } else { - fmt.Printf(" Content: %s\n", string(content)) - } - } else { - fmt.Printf(" Error reading file: %v\n", err) - } - fmt.Println() - - // Example of streaming a file - fmt.Println("Streaming README file:") - if stream, err := infrastructure.Stream("readme"); err == nil { - defer stream.Close() - content := make([]byte, 200) - n, err := stream.Read(content) - if err == nil || err == io.EOF { - fmt.Printf(" Read %d bytes from stream\n", n) - fmt.Printf(" Preview: %s...\n", string(content[:n])) - } - } else { - fmt.Printf(" Error streaming file: %v\n", err) - } - fmt.Println() - - // Show all configured aliases - fmt.Println("All configured aliases:") - aliases := infrastructure.GetAliases() - for alias, path := range aliases { - fmt.Printf(" - %s -> %s\n", alias, path) - } - fmt.Println() - - fmt.Println("=== Afero Manager Example Complete ===") - fmt.Println() -} -*/ - -// runWithConsole runs the application with traditional console logging -func runWithConsole(cfg *config.Config, bannerText string, broadcaster *monitoring.LogBroadcaster) { - // Print banner to console - if bannerText != "" { - fmt.Print("\033[35m") // Purple color - fmt.Println(bannerText) - fmt.Print("\033[0m") // Reset color - } - - // Initialize logger - l := logger.New(cfg.App.Debug, broadcaster) - - // Log startup information - l.Info("Starting Application", "name", cfg.App.Name, "env", cfg.App.Env) - l.Info("TUI mode disabled, using traditional console logging") - l.Info("Initializing services...") - - // Log all services - logAllServices(l, cfg) - - // Start server - srv := server.New(cfg, l, broadcaster) - go func() { - l.Info("HTTP server listening", "port", cfg.Server.Port) - if err := srv.Start(); err != nil { - l.Fatal("Server error", err) - } - }() - - // Wait for server to start - time.Sleep(500 * time.Millisecond) - l.Info("Server ready", "url", "http://localhost:"+cfg.Server.Port) - if cfg.Monitoring.Enabled { - time.Sleep(500 * time.Millisecond) - l.Info("Monitoring dashboard", "url", "http://localhost:"+cfg.Monitoring.Port) - } - - // Handle shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - <-sigChan - - l.Warn("Shutting down...") - srv.Shutdown(context.Background(), l) - time.Sleep(100 * time.Millisecond) - os.Exit(0) -} - // parseFlags parses command line flags using standard Go flag package func parseFlags() string { var configURL string @@ -276,246 +42,3 @@ func parseFlags() string { return configURL } - -// loadConfig loads configuration from local file or URL -func loadConfig(configURL string) *config.Config { - if configURL != "" { - fmt.Printf("Loading config from URL: %s\n", configURL) - if err := utils.LoadConfigFromURL(configURL); err != nil { - fmt.Printf("Failed to load config from URL: %s\n", err.Error()) - os.Exit(1) - } - - cfg, err := config.LoadConfigWithURL(configURL) - if err != nil { - panic("Failed to parse config from URL: " + err.Error()) - } - return cfg - } - - cfg, err := config.LoadConfig() - if err != nil { - panic("Failed to load config: " + err.Error()) - } - return cfg -} - -// loadBanner loads banner text from file if configured -func loadBanner(cfg *config.Config) string { - if cfg.App.BannerPath != "" { - banner, err := os.ReadFile(cfg.App.BannerPath) - if err == nil { - return string(banner) - } - } - return "" -} - -// createServiceQueue creates the service initialization queue for TUI -func createServiceQueue(cfg *config.Config) []tui.ServiceInit { - serviceConfigs := getServiceConfigs(cfg) - - initQueue := []tui.ServiceInit{ - {Name: "Configuration", Enabled: true, InitFunc: nil}, - } - - // Add infrastructure services - for _, svc := range serviceConfigs { - initQueue = append(initQueue, tui.ServiceInit{ - Name: svc.Name, Enabled: svc.Enabled, InitFunc: nil, - }) - } - - initQueue = append(initQueue, tui.ServiceInit{Name: "Middleware", Enabled: true, InitFunc: nil}) - - // Add application services - for name, enabled := range cfg.Services { - initQueue = append(initQueue, tui.ServiceInit{Name: "Service: " + name, Enabled: enabled, InitFunc: nil}) - } - - // Add monitoring last - initQueue = append(initQueue, tui.ServiceInit{Name: "Monitoring", Enabled: cfg.Monitoring.Enabled, InitFunc: nil}) - - return initQueue -} - -// createLiveTUI creates and configures the Live TUI -func createLiveTUI(cfg *config.Config, bannerText string) *tui.LiveTUI { - return tui.NewLiveTUI(tui.LiveConfig{ - AppName: cfg.App.Name, - AppVersion: cfg.App.Version, - Banner: bannerText, - Port: cfg.Server.Port, - MonitorPort: cfg.Monitoring.Port, - Env: cfg.App.Env, - OnShutdown: utils.TriggerShutdown, - }) -} - -// handleShutdown handles graceful shutdown for TUI mode -func handleShutdown(liveTUI *tui.LiveTUI, srv *server.Server, l *logger.Logger) { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - select { - case <-sigChan: - liveTUI.AddLog("warn", "Shutting down...") - srv.Shutdown(context.Background(), l) - case <-utils.ShutdownChan: - liveTUI.AddLog("warn", "Shutting down...") - srv.Shutdown(context.Background(), l) - } - - liveTUI.Stop() - time.Sleep(100 * time.Millisecond) - os.Exit(0) -} - -// logAllServices logs the status of all services -func logAllServices(l *logger.Logger, cfg *config.Config) { - // Log infrastructure services - serviceConfigs := getServiceConfigs(cfg) - for _, svc := range serviceConfigs { - logServiceStatus(l, svc.Name, svc.Enabled) - } - - // Log application services - for name, enabled := range cfg.Services { - logServiceStatus(l, "Service: "+name, enabled) - } - - // Log monitoring - logServiceStatus(l, "Monitoring", cfg.Monitoring.Enabled) -} - -// ServiceConfig represents a service with its name and enabled status -type ServiceConfig struct { - Name string - Enabled bool -} - -// getServiceConfigs returns a unified list of all service configurations -func getServiceConfigs(cfg *config.Config) []ServiceConfig { - return []ServiceConfig{ - {Name: "Grafana", Enabled: cfg.Grafana.Enabled}, - {Name: "MinIO", Enabled: cfg.Monitoring.MinIO.Enabled}, - {Name: "Redis Cache", Enabled: cfg.Redis.Enabled}, - {Name: "Kafka Messaging", Enabled: cfg.Kafka.Enabled}, - {Name: "PostgreSQL", Enabled: cfg.Postgres.Enabled}, - {Name: "MongoDB", Enabled: cfg.Mongo.Enabled}, - {Name: "Cron Scheduler", Enabled: cfg.Cron.Enabled}, - {Name: "External Services", Enabled: (len(cfg.Monitoring.External.Services) > 0)}, - } -} - -// logServiceStatus logs whether a service is enabled or skipped -func logServiceStatus(l *logger.Logger, name string, enabled bool) { - if enabled { - l.Info("Service initialized", "service", name, "status", "enabled") - } else { - l.Debug("Service skipped", "service", name, "status", "disabled") - } -} - -// Step functions for the initialization process - -// parseConfigStep parses command line flags -func parseConfigStep(ctx *AppContext) error { - var configURL string - flag.StringVar(&configURL, "c", "", "URL to load configuration from (YAML format)") - flag.Parse() - - // Validate URL if provided - if configURL != "" { - if _, err := url.ParseRequestURI(configURL); err != nil { - return fmt.Errorf("invalid config URL format: %v", err) - } - } - - // Store config URL in context for later use - // We'll need to modify the context to store this, but for now we'll handle it in loadConfigStep - return nil -} - -// loadConfigStep loads configuration from local file or URL -func loadConfigStep(ctx *AppContext) error { - // Use the config URL that was parsed in main and stored in context - configURL := ctx.ConfigURL - - cfg := loadConfig(configURL) - ctx.Config = cfg - return nil -} - -// validateConfigStep validates the loaded configuration -func validateConfigStep(ctx *AppContext) error { - cfg := ctx.Config - - // Check if "web" folder exists, if not, disable web monitoring - if _, err := os.Stat("web"); os.IsNotExist(err) { - fmt.Println("\033[33m 'web' folder not found, disabling web monitoring\033[0m") - cfg.Monitoring.Enabled = false - } - - // Additional validation can be added here - return nil -} - -// loadBannerStep loads banner text from file if configured -func loadBannerStep(ctx *AppContext) error { - cfg := ctx.Config - bannerText := loadBanner(cfg) - ctx.BannerText = bannerText - return nil -} - -// checkPortStep checks port availability -func checkPortStep(ctx *AppContext) error { - cfg := ctx.Config - if err := utils.CheckPortAvailability(cfg.Server.Port, cfg.Monitoring.Port, cfg.Monitoring.Enabled); err != nil { - return fmt.Errorf("port error: %s", err.Error()) - } - return nil -} - -// initLoggerStep initializes the logger -func initLoggerStep(ctx *AppContext) error { - cfg := ctx.Config - - // Initialize logger based on TUI mode - if cfg.App.EnableTUI { - // For TUI mode, we'll create a logger that writes to both TUI and broadcaster - // This will be handled in startAppStep when we have the broadcaster - ctx.Logger = nil // Will be initialized later - } else { - // For console mode, create a regular logger - ctx.Logger = logger.New(cfg.App.Debug, nil) - ctx.Logger.Info("Starting Application", "name", cfg.App.Name, "env", cfg.App.Env) - ctx.Logger.Info("TUI mode disabled, using traditional console logging") - ctx.Logger.Info("Initializing services...") - } - - return nil -} - -// initBroadcasterStep initializes the log broadcaster -func initBroadcasterStep(ctx *AppContext) error { - ctx.Broadcaster = monitoring.NewLogBroadcaster() - return nil -} - -// startAppStep starts the application based on TUI mode -func startAppStep(ctx *AppContext) error { - cfg := ctx.Config - broadcaster := ctx.Broadcaster - bannerText := ctx.BannerText - - // Start application based on TUI mode - if cfg.App.EnableTUI { - runWithTUI(cfg, bannerText, broadcaster) - } else { - runWithConsole(cfg, bannerText, broadcaster) - } - - return nil -} diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index e44a855..c26a481 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -27,6 +27,11 @@ Client → Handler → Request Binding → Validation → Business Logic → Res ### 2.2 Package Organization - **cmd/**: Application entry points + - **app/**: Main application with clean code structure + - **main.go**: Simplified entry point (30 lines) + - **constants.go**: Centralized constants and type definitions + - **config_manager.go**: Configuration loading and validation + - **application.go**: Application orchestration and lifecycle management - **config/**: Configuration management - **internal/**: Core application logic (private) - **middleware/**: HTTP middleware @@ -40,7 +45,6 @@ Client → Handler → Request Binding → Validation → Business Logic → Res - **response/**: Standardized API responses - **tui/**: Terminal User Interface - **infrastructure/**: External service integrations - - **logger/**: Logging utilities - **utils/**: Utility functions - **parameter.go**: Customizable parameter parsing system for command-line flags @@ -55,7 +59,7 @@ Client → Handler → Request Binding → Validation → Business Logic → Res ### 2.4 Step-by-Step Application Execution -The application follows a structured step-by-step execution pattern inspired by the build system: +The application follows a structured step-by-step execution pattern with clean code principles: **Execution Flow:** 1. **Configuration Loading** → Load and validate application configuration @@ -1411,20 +1415,36 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { }) ``` -## 9. BEST PRACTICES +## 9. CLEAN CODE PRINCIPLES + +### 9.1 Application Structure + +The application follows clean code principles with clear separation of concerns: -### 9.1 API Development +1. **Single Responsibility Principle** - Each component has one clear purpose +2. **Dependency Injection** - All dependencies injected through constructors +3. **Separation of Concerns** - Configuration, application logic, and entry point separated +4. **Consistent Error Handling** - Proper error propagation throughout +5. **Type Safety** - Strong typing with custom types and constants +6. **Testability** - Each component can be unit tested independently +7. **Maintainability** - Clear structure makes code easy to understand and modify -1. **Use response helpers** - Never manually construct responses -2. **Validate all requests** - Use built-in and custom validators -3. **Standardize error codes** - Make errors machine-readable -4. **Include meaningful messages** - Help developers debug -5. **Use pagination** - For all list endpoints -6. **Return appropriate status codes** - Follow HTTP standards -7. **Include timestamps** - All responses have Unix timestamps -8. **Keep responses consistent** - Same structure across endpoints +### 9.2 File Organization -### 9.2 Service Development +**cmd/app/**: Clean application structure +- **main.go**: Simplified entry point (30 lines) - creates application and runs it +- **constants.go**: Centralized constants and type definitions +- **config_manager.go**: Configuration loading and validation +- **application.go**: Application orchestration and lifecycle management + +**Key Benefits:** +- **Main Function**: Single responsibility - create application and run it +- **Configuration**: Centralized in ConfigManager with validation +- **Application Logic**: Encapsulated in Application struct +- **Constants**: All magic numbers and strings centralized +- **Types**: Clear type definitions for better type safety + +### 9.3 Service Development 1. **Implement Service interface** - Name, RegisterRoutes, Enabled, Endpoints 2. **Use dependency injection** - For infrastructure components @@ -1433,7 +1453,7 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { 5. **Handle errors gracefully** - Use response helpers 6. **Validate inputs** - Use request.Bind and validation tags -### 9.3 Configuration Management +### 9.4 Configuration Management 1. **Use config.yaml** - For all runtime configuration 2. **Default to enabled** - Services enabled by default if not specified @@ -1457,30 +1477,72 @@ docs_wiki/ ``` stackyard/ ├── Dockerfile # Multi-stage Docker configuration +├── .dockerignore # Docker ignore file +├── .gitignore # Git ignore file +├── banner.txt # Application banner +├── config.yaml # Application configuration +├── go.mod # Go module definition +├── go.sum # Go module checksums +├── LICENSE # Apache License 2.0 +├── README.md # Project documentation +├── resource_windows_386.syso # Windows 32-bit resources +├── resource_windows_amd64.syso # Windows 64-bit resources +├── resource_windows_arm.syso # Windows ARM resources +├── resource_windows_arm64.syso # Windows ARM64 resources +├── versioninfo.json # Windows version info ├── cmd/ -│ └── app/ -│ └── main.go # Application entry point +│ └── app/ # Clean application structure +│ ├── main.go # Simplified entry point (30 lines) +│ ├── constants.go # Centralized constants and types +│ ├── config_manager.go # Configuration loading and validation +│ └── application.go # Application orchestration and lifecycle ├── config/ -│ ├── config.go # Configuration management -│ └── config.yaml # Application configuration +│ └── config.go # Configuration management ├── docs_wiki/ # Simplified project documentation -│ ├── GETTING_STARTED.md # Quick start guide -│ ├── DEVELOPMENT.md # Development guide -│ ├── ARCHITECTURE.md # Technical overview -│ ├── REFERENCE.md # Complete technical reference -│ └── blueprint/ # Internal blueprint files -│ └── blueprint.txt # This comprehensive blueprint -├── internal/ +│ ├── ARCHITECTURE.md # Technical overview and design decisions +│ ├── DEVELOPMENT.md # Development guide for extending the app +│ ├── GETTING_STARTED.md # Quick start guide for new users +│ ├── REFERENCE.md # Complete technical reference (config, APIs, advanced) +│ ├── blueprint/ # Internal blueprint files +│ │ └── blueprint.txt # This comprehensive blueprint +│ └── examples/ # Code examples +│ └── response_examples.go # API response examples +├── internal/ # Core application logic (private) │ ├── middleware/ # HTTP middleware +│ │ ├── encryption.go # API encryption middleware +│ │ └── middleware.go # General middleware │ ├── monitoring/ # Monitoring system +│ │ ├── auth_handlers.go # Authentication handlers +│ │ ├── broadcaster.go # Log broadcasting +│ │ ├── handlers.go # Monitoring API handlers +│ │ ├── server.go # Monitoring HTTP server +│ │ ├── user_handlers.go # User management handlers +│ │ ├── database/ # Monitoring database +│ │ │ ├── db.go # Database connection +│ │ │ ├── models.go # Database models +│ │ │ └── repository.go # Database repository +│ │ ├── middleware/ # Monitoring middleware +│ │ │ └── obfuscator.go # API obfuscation middleware +│ │ └── session/ # Session management +│ │ ├── middleware.go # Session middleware +│ │ └── session.go # Session implementation │ ├── server/ # HTTP server +│ │ └── server.go # Main server implementation │ └── services/ # Business services -│ ├── services.go # Service interface -│ └── modules/ # Service implementations -├── pkg/ -│ ├── request/ # Request handling -│ ├── response/ # Standardized API responses -│ ├── tui/ # Terminal UI +│ ├── modules/ # Service implementations +│ │ ├── broadcast_service.go # Event streaming service +│ │ ├── cache_service.go # Cache service +│ │ ├── encryption_service.go # Encryption service +│ │ ├── grafana_service.go # Grafana integration service +│ │ ├── mongodb_service.go # MongoDB multi-tenant service +│ │ ├── multi_tenant_service.go # Multi-tenant service +│ │ ├── products_service.go # Products service +│ │ ├── tasks_service.go # Tasks service +│ │ └── users_service.go # Users service +│ └── services.go # Service interface +├── pkg/ # Reusable packages (public) +│ ├── cache/ # Caching utilities +│ │ └── cache.go # Cache implementation │ ├── infrastructure/ # External integrations │ │ ├── afero.go # Global Singleton Afero Manager │ │ ├── afero_test.go # Afero Manager tests @@ -1494,24 +1556,69 @@ stackyard/ │ │ ├── mongo.go # MongoDB multi-tenant support │ │ ├── postgres.go # PostgreSQL multi-tenant support │ │ ├── redis.go # Redis caching -│ │ └── system_monitor.go # System monitoring -│ ├── logger/ # Logging +│ │ ├── system_monitor.go # System monitoring +│ │ └── testdata/ # Test data +│ │ ├── config.yaml # Test configuration +│ │ ├── README.md # Test data documentation +│ │ └── test.txt # Test file +│ ├── interfaces/ # Interface definitions +│ │ └── service.go # Service interface +│ ├── logger/ # Logging utilities +│ │ └── logger.go # Logger implementation +│ ├── registry/ # Service registry +│ │ ├── dependencies.go # Dependency injection +│ │ └── registry.go # Service registration +│ ├── request/ # Request handling +│ │ └── request.go # Request validation and binding +│ ├── response/ # Standardized API responses +│ │ └── response.go # Response helpers +│ ├── tui/ # Terminal User Interface +│ │ ├── boot.go # Boot sequence TUI +│ │ ├── dashboard.go # Dashboard TUI +│ │ ├── live.go # Live logs TUI +│ │ ├── simple.go # Simple TUI +│ │ ├── startup.go # Startup TUI +│ │ ├── styles.go # TUI styling +│ │ └── template/ # TUI templates +│ │ └── dialog.go # Dialog components │ └── utils/ # Utility functions +│ ├── broadcast.go # Event broadcasting +│ ├── date.go # Date utilities +│ ├── io.go # I/O utilities +│ ├── numeric.go # Numeric utilities +│ ├── parameter.go # Parameter parsing +│ ├── strings.go # String utilities +│ └── system.go # System utilities ├── scripts/ # Build and utility scripts -│ ├── build.sh # Go binary build (Unix/Linux/macOS) -│ ├── build.bat # Go binary build (Windows) -│ ├── change_package.sh # Package name change (Unix/Linux/macOS) -│ ├── change_package.bat # Package name change (Windows) -│ ├── docker_build.sh # Docker image build (Unix/Linux/macOS) -│ └── docker_build.bat # Docker image build (Windows) -└── web/ # Web assets +│ ├── build/ # Build scripts +│ │ └── build.go # Go-based build script +│ └── docker/ # Docker scripts +│ └── docker_build.go # Docker build script +└── web/ # Web assets └── monitoring/ # Monitoring UI + ├── index.html # Main dashboard + ├── login.html # Login page + ├── assets/ # Static assets + │ ├── css/ # Stylesheets + │ │ └── style.css # Main stylesheet + │ ├── favicon/ # Favicon files + │ │ ├── apple-touch-icon.png + │ │ ├── favicon-96x96.png + │ │ ├── favicon.ico + │ │ ├── favicon.svg + │ │ ├── site.webmanifest + │ │ ├── web-app-manifest-192x192.png + │ │ └── web-app-manifest-512x512.png + │ └── js/ # JavaScript files + │ └── app.js # Main application script + └── uploads/ # Upload directory ``` ## 11. KEY FEATURES SUMMARY ### 11.1 Core Features +- **Clean Code Architecture**: Single responsibility, dependency injection, separation of concerns - **Modular Architecture**: Services can be enabled/disabled via config - **Multi-Tenant Database Support**: Multiple PostgreSQL connections with dynamic switching - **ORM Integration**: GORM with auto-migration for database operations @@ -1532,7 +1639,7 @@ stackyard/ - **GORM**: ORM for database operations - **Validator**: Request validation library - **Gopsutil**: System monitoring library -- **Clean Architecture**: Separation of concerns +- **Clean Architecture**: Separation of concerns with clean code principles - **Dependency Injection**: For service components - **Configuration Management**: YAML-based with defaults @@ -1545,6 +1652,7 @@ stackyard/ - **Configuration Validation**: At startup - **Security Features**: API obfuscation, proper error handling - **Scalability**: Modular design for horizontal scaling +- **Clean Code**: Maintainable, testable, and extensible codebase ## 12. GETTING STARTED @@ -1590,12 +1698,20 @@ go run cmd/app/main.go - Check logs for errors and debugging - Verify API responses with Postman/curl +5. **Clean Code Practices**: + - Follow single responsibility principle + - Use dependency injection for testability + - Keep functions focused and small + - Use meaningful names for variables and functions + - Add proper error handling and logging + ## 13. CONCLUSION -This project represents a sophisticated, production-ready Go boilerplate with comprehensive features for API development, service management, monitoring, and infrastructure integration. The modular architecture, standardized patterns, and extensive documentation make it an excellent foundation for building scalable, maintainable applications. +This project represents a sophisticated, production-ready Go boilerplate with comprehensive features for API development, service management, monitoring, and infrastructure integration. The clean code architecture, modular design, standardized patterns, and extensive documentation make it an excellent foundation for building scalable, maintainable applications. The combination of: -- **Clean architecture** with separation of concerns +- **Clean code principles** with single responsibility and dependency injection +- **Modular architecture** with separation of concerns - **Multi-tenant database support** with dynamic connection switching - **ORM integration** using GORM with auto-migration capabilities - **Comprehensive API structure** with validation and error handling @@ -1605,6 +1721,8 @@ The combination of: makes this boilerplate suitable for a wide range of applications from simple APIs to complex multi-tenant SaaS systems. +The clean code refactoring of `cmd/app/main.go` demonstrates best practices in Go development, showing how to transform complex, monolithic code into maintainable, testable, and extensible components. + ## 14. LICENSING ### 14.1 License Information diff --git a/scripts/build/build.go b/scripts/build/build.go index 860d325..7467ed4 100644 --- a/scripts/build/build.go +++ b/scripts/build/build.go @@ -21,7 +21,7 @@ import ( var ( DIST_DIR = "dist" APP_NAME = "stackyard" - MAIN_PATH = "./cmd/app/main.go" + MAIN_PATH = "./cmd/app" CONFIG_YML = "config.yaml" BANNER_TXT = "banner.txt" DB_FILE = "monitoring_users.db" From 13dcca9396107eed3179acd6f0a10dde93479688 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Fri, 27 Mar 2026 10:28:51 +0700 Subject: [PATCH 12/18] refactor: simplify default app name and update architecture documentation --- cmd/app/constants.go | 2 +- docs_wiki/ARCHITECTURE.md | 264 +++++++++-- docs_wiki/DEVELOPMENT.md | 375 ++++++++++++++-- docs_wiki/GETTING_STARTED.md | 10 - docs_wiki/REFERENCE.md | 415 +++++++++++++----- internal/services/modules/products_service.go | 10 +- 6 files changed, 887 insertions(+), 189 deletions(-) diff --git a/cmd/app/constants.go b/cmd/app/constants.go index ad96e51..805b7c0 100644 --- a/cmd/app/constants.go +++ b/cmd/app/constants.go @@ -12,7 +12,7 @@ type LogBroadcaster struct{} // Application constants const ( AppName = "stackyard" - DefaultAppName = "Golang App" + DefaultAppName = "" DefaultVersion = "1.0.0" DefaultEnv = "development" diff --git a/docs_wiki/ARCHITECTURE.md b/docs_wiki/ARCHITECTURE.md index d80a8e9..8e3b53d 100644 --- a/docs_wiki/ARCHITECTURE.md +++ b/docs_wiki/ARCHITECTURE.md @@ -37,7 +37,8 @@ Applications are built as **composable services** that can be enabled/disabled v - **Modularity**: Services encapsulate related functionality - **Independence**: Services can be developed and deployed separately - **Configuration-Driven**: Runtime behavior controlled by `config.yaml` -- **Dependency Injection**: Services receive dependencies through constructors +- **Auto-Discovery**: Services automatically register themselves at startup +- **Dependency Injection**: Services receive dependencies through factory functions ### 3. Infrastructure Abstraction @@ -62,6 +63,7 @@ type PostgresManager struct { - **Testability**: Easy to mock infrastructure in tests - **Flexibility**: Can swap implementations without changing business logic - **Consistency**: All infrastructure follows the same patterns +- **Multi-Tenant Support**: Built-in support for tenant isolation ## Key Components @@ -99,7 +101,7 @@ stackyard/ ### Service Registration -Services are registered dynamically through a **service registry**: +Services are registered dynamically through an **auto-discovery system**: ```go // Service interface @@ -110,36 +112,66 @@ type Service interface { Endpoints() []string // API endpoints list } -// Registration +// Auto-discovery registration registry := services.NewServiceRegistry() -registry.Register(modules.NewUserService(config)) -registry.Register(modules.NewProductService(config)) -registry.Boot(echoInstance) // Wire up all services + +// Services automatically register themselves via init() functions +// No manual registration required - services self-register at startup + +registry.Boot(echoInstance) // Wire up all enabled services +``` + +**Auto-Discovery Process:** +1. **Service Factory Functions**: Each service implements a factory function +2. **Automatic Registration**: Services register themselves during package initialization +3. **Configuration-Driven**: Service activation controlled by `config.yaml` +4. **Dependency Injection**: Services receive infrastructure dependencies through factories + +**Service Factory Pattern:** +```go +// Service factory function +func NewUserServiceFactory() services.ServiceFactory { + return func(deps services.Dependencies) (services.Service, error) { + return &UserService{ + db: deps.Postgres, + redis: deps.Redis, + logger: deps.Logger, + }, nil + } +} + +// Automatic registration via init() +func init() { + services.RegisterService("users_service", NewUserServiceFactory) +} ``` ## Infrastructure Managers ### Database Managers -Stackyard supports multiple database types through abstracted managers: +Stackyard supports multiple database types through abstracted managers with **multi-tenant architecture**: #### PostgreSQL Manager -- **Multi-tenant support**: Dynamic database switching +- **Multi-tenant support**: Dynamic database switching per tenant - **GORM integration**: Full ORM capabilities with auto-migration -- **Connection pooling**: Efficient connection management -- **Async operations**: Non-blocking database operations +- **Connection pooling**: Efficient connection management per database +- **Async operations**: Non-blocking database operations via worker pools +- **Tenant isolation**: Automatic tenant ID injection and validation #### MongoDB Manager -- **Document database**: NoSQL capabilities -- **Multi-tenant**: Database-level isolation -- **Aggregation pipelines**: Complex data processing -- **Async operations**: Worker pool-based execution +- **Document database**: NoSQL capabilities with BSON support +- **Multi-tenant**: Database-level isolation with tenant-specific databases +- **Aggregation pipelines**: Complex data processing and analytics +- **Async operations**: Worker pool-based execution with connection pooling +- **Schema validation**: Built-in document validation and indexing #### Redis Manager -- **Caching**: High-performance key-value storage -- **Pub/Sub**: Message broadcasting capabilities -- **Batch operations**: Efficient bulk operations -- **Async execution**: Worker pool processing +- **Caching**: High-performance key-value storage with TTL support +- **Pub/Sub**: Message broadcasting capabilities for real-time features +- **Batch operations**: Efficient bulk operations and pipelines +- **Async execution**: Worker pool processing with connection pooling +- **Data structures**: Support for strings, hashes, lists, sets, and sorted sets ### Message Queue Managers @@ -219,26 +251,56 @@ if result.IsDone() { ### Hierarchical Configuration -Configuration is managed through a **hierarchical YAML structure**: +Configuration is managed through a **hierarchical YAML structure** with **multi-tenant support**: ```yaml app: # Application-level settings - name: "MyApp" + name: "Stackyard" debug: true + env: "development" server: # HTTP server configuration port: "8080" services: # Service enable/disable flags - user_service: true - product_service: false - -postgres: # Infrastructure-specific config + users_service: true + broadcast_service: false + cache_service: true + mongodb_service: true + multi_tenant_service: true + products_service: true + +postgres: # Multi-tenant PostgreSQL configuration enabled: true connections: - name: "primary" + enabled: true + host: "localhost" + port: 5432 + user: "postgres" + password: "Mypostgres01" + dbname: "postgres" + sslmode: "disable" + - name: "secondary" + enabled: true host: "localhost" - database: "myapp" + port: 5433 + user: "postgres" + password: "Mypostgres01" + dbname: "postgres" + sslmode: "disable" + +mongo: # Multi-tenant MongoDB configuration + enabled: true + connections: + - name: "primary" + enabled: true + uri: "mongodb://localhost:27017" + database: "primary_db" + - name: "secondary" + enabled: true + uri: "mongodb://localhost:27018" + database: "secondary_db" ``` ### Environment Override @@ -246,26 +308,61 @@ postgres: # Infrastructure-specific config Configuration can be overridden with **environment variables**: ```bash +# Application settings export APP_DEBUG=false +export APP_ENV=production + +# Server settings export SERVER_PORT=3000 -export POSTGRES_PASSWORD=prod-password + +# Database settings +export POSTGRES_HOST=prod-db.example.com +export POSTGRES_PASSWORD=secure-password + +# Service settings +export SERVICES_USERS_SERVICE=true +export SERVICES_CACHE_SERVICE=false ``` ### Validation & Defaults -Configuration is **validated at startup** with sensible defaults: +Configuration is **validated at startup** with sensible defaults and **multi-tenant validation**: ```go type Config struct { - App AppConfig `yaml:"app"` - Server ServerConfig `yaml:"server"` + App AppConfig `yaml:"app"` + Server ServerConfig `yaml:"server"` + Services ServiceConfig `yaml:"services"` Postgres PostgresConfig `yaml:"postgres" validate:"required_if=Enabled true"` + Mongo MongoConfig `yaml:"mongo" validate:"required_if=Enabled true"` } func (c *Config) Validate() error { - // Custom validation logic + // Multi-tenant validation + if err := c.validateMultiTenantConfig(); err != nil { + return err + } + + // Service validation + if err := c.validateServices(); err != nil { + return err + } + return validate.Struct(c) } + +func (c *Config) validateMultiTenantConfig() error { + // Ensure at least one connection is enabled for each enabled service + if c.Postgres.Enabled && len(c.Postgres.Connections) == 0 { + return errors.New("at least one PostgreSQL connection must be configured") + } + + if c.Mongo.Enabled && len(c.Mongo.Connections) == 0 { + return errors.New("at least one MongoDB connection must be configured") + } + + return nil +} ``` ## API Design Patterns @@ -286,15 +383,22 @@ All API responses follow a **consistent JSON structure**: ### Request Validation -Requests are validated using **struct tags** with automatic error formatting: +Requests are validated using **struct tags** with automatic error formatting and **custom validators**: ```go type CreateUserRequest struct { - Name string `json:"name" validate:"required,min=2,max=50"` - Email string `json:"email" validate:"required,email"` - Age int `json:"age" validate:"gte=18,lte=120"` + Username string `json:"username" validate:"required,username"` + Email string `json:"email" validate:"required,email"` + FullName string `json:"full_name" validate:"max=100"` } +// Custom username validator +validate.RegisterValidation("username", func(fl validator.FieldLevel) bool { + username := fl.Field().String() + matched, _ := regexp.MatchString(`^[a-zA-Z0-9]{3,20}$`, username) + return matched +}) + func (h *Handler) createUser(c echo.Context) error { var req CreateUserRequest if err := request.Bind(c, &req); err != nil { @@ -306,34 +410,104 @@ func (h *Handler) createUser(c echo.Context) error { ### Error Handling -Errors are handled consistently with **standardized error codes**: +Errors are handled consistently with **standardized error codes** and **structured responses**: ```go -// Automatic error responses +// Automatic error responses with structured format return response.NotFound(c, "User not found") return response.BadRequest(c, "Invalid input") return response.InternalServerError(c, "Database error") + +// Custom error with details +return response.Error(c, response.ErrorResponse{ + Code: "VALIDATION_ERROR", + Message: "Request validation failed", + Details: map[string]interface{}{ + "field": "email", + "reason": "invalid format", + }, +}) +``` + +### Multi-Tenant Request Handling + +Requests automatically include **tenant context** for multi-tenant operations: + +```go +func (h *Handler) getProduct(c echo.Context) error { + tenant := c.Param("tenant") // Extract tenant from URL + productID := c.Param("id") // Extract product ID + + // Use tenant-specific database connection + db, exists := h.mongoManager.GetConnection(tenant) + if !exists { + return response.BadRequest(c, "Invalid tenant") + } + + // Query tenant-specific collection + result := db.Collection("products").FindOne(context.Background(), bson.M{ + "_id": productID, + }) + + // Return structured response + return response.Success(c, result, "Product retrieved successfully") +} ``` ## Security Architecture ### Authentication & Authorization -- **API Key authentication**: Simple key-based auth -- **Session management**: Secure session handling -- **Role-based access**: Permission-based authorization +- **API Key authentication**: Simple key-based auth via `X-API-Key` header +- **Session management**: Secure session handling with middleware integration +- **Role-based access**: Permission-based authorization with middleware support ### Data Protection -- **API Obfuscation**: Base64 encoding for data in transit -- **Encryption**: AES-256-GCM encryption for sensitive data -- **Input validation**: Comprehensive request validation +- **API Obfuscation**: Base64 encoding for data in transit (configurable per endpoint) +- **Encryption**: AES-256-GCM encryption for sensitive data (optional) +- **Input validation**: Comprehensive request validation with custom validators +- **Tenant isolation**: Automatic tenant ID validation and data separation ### Infrastructure Security -- **Connection encryption**: TLS for database connections -- **Secure defaults**: Conservative security settings -- **Audit logging**: Comprehensive operation logging +- **Connection encryption**: TLS support for database and external service connections +- **Secure defaults**: Conservative security settings with configurable overrides +- **Audit logging**: Comprehensive operation logging with structured format +- **Request obfuscation**: Automatic API response obfuscation for monitoring dashboard + +### Multi-Tenant Security + +- **Database isolation**: Separate database connections per tenant +- **Tenant validation**: Automatic tenant ID validation in requests +- **Access control**: Tenant-specific data access with validation +- **Resource limits**: Configurable resource limits per tenant + +### Security Middleware + +```go +// Authentication middleware +func (m *Middleware) Authenticate(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + apiKey := c.Request().Header.Get("X-API-Key") + if apiKey != m.config.Auth.Secret { + return response.Unauthorized(c, "Invalid API key") + } + return next(c) + } +} + +// Tenant validation middleware +func (m *Middleware) ValidateTenant(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + tenant := c.Param("tenant") + if !m.tenantManager.IsValidTenant(tenant) { + return response.BadRequest(c, "Invalid tenant") + } + return next(c) + } +} +``` ## Monitoring & Observability diff --git a/docs_wiki/DEVELOPMENT.md b/docs_wiki/DEVELOPMENT.md index 9d77405..5911b74 100644 --- a/docs_wiki/DEVELOPMENT.md +++ b/docs_wiki/DEVELOPMENT.md @@ -14,49 +14,80 @@ Create a new service in `internal/services/modules/service_yourname.go`: package modules import ( - "stackyard/pkg/response" - "github.com/labstack/echo/v4" + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/request" + "stackyard/pkg/response" + + "github.com/labstack/echo/v4" ) type YourService struct { - enabled bool + enabled bool } func NewYourService(enabled bool) *YourService { - return &YourService{enabled: enabled} + return &YourService{enabled: enabled} } func (s *YourService) Name() string { return "Your Service" } +func (s *YourService) WireName() string { return "your-service" } func (s *YourService) Enabled() bool { return s.enabled } func (s *YourService) Endpoints() []string { return []string{"/your-api"} } +func (s *YourService) Get() interface{} { return s } func (s *YourService) RegisterRoutes(g *echo.Group) { - // Register your API endpoints here - g.GET("/your-api", s.getData) - g.POST("/your-api", s.createData) + // Register your API endpoints here + g.GET("/your-api", s.getData) + g.POST("/your-api", s.createData) } func (s *YourService) getData(c echo.Context) error { - // Your business logic here - data := map[string]string{"message": "Hello from your service!"} - return response.Success(c, data, "Data retrieved") + // Your business logic here + data := map[string]string{"message": "Hello from your service!"} + return response.Success(c, data, "Data retrieved") } func (s *YourService) createData(c echo.Context) error { - // Handle POST request - return response.Created(c, nil, "Data created") + // Handle POST request + return response.Created(c, nil, "Data created") +} + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("your_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewYourService(config.Services.IsEnabled("your_service")) + }) } ``` -### Register Your Service +### Service Auto-Discovery -Add to `internal/server/server.go`: +Services are automatically discovered and registered when their package is imported. The `init()` function in your service file handles this registration: ```go -// Find the service registration section and add: -registry.Register(modules.NewYourService(s.config.Services.IsEnabled("your_service"))) +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("your_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewYourService(config.Services.IsEnabled("your_service")) + }) +} ``` +**How it works:** +1. When the application starts, Go automatically calls the `init()` function +2. The service factory is registered with the global registry +3. During boot, the registry automatically discovers and creates enabled services +4. No manual registration in `internal/server/server.go` is required + +**Service Factory Function:** +- Takes configuration, logger, and dependencies as parameters +- Returns a new service instance +- Checks if the service is enabled via `config.Services.IsEnabled()` +- Enables dependency injection for infrastructure components + ### Enable in Configuration Add to `config.yaml`: @@ -109,11 +140,16 @@ func (s *YourService) createUser(c echo.Context) error { ```go type CreateUserRequest struct { - Name string `json:"name" validate:"required,min=2,max=50"` + Username string `json:"username" validate:"required,username"` Email string `json:"email" validate:"required,email"` - Age int `json:"age" validate:"required,gte=18,lte=120"` - Phone string `json:"phone" validate:"required,phone"` - Password string `json:"password" validate:"required,min=8"` + FullName string `json:"full_name" validate:"required,min=3,max=100"` +} + +type UpdateUserRequest struct { + Username string `json:"username" validate:"omitempty,username"` + Email string `json:"email" validate:"omitempty,email"` + FullName string `json:"full_name" validate:"omitempty,min=3,max=100"` + Status string `json:"status" validate:"omitempty,oneof=active inactive suspended"` } func (s *YourService) createUser(c echo.Context) error { @@ -128,7 +164,15 @@ func (s *YourService) createUser(c echo.Context) error { } // Request is valid, proceed... - return response.Created(c, req, "User created") + user := User{ + ID: "123", + Username: req.Username, + Email: req.Email, + Status: "active", + CreatedAt: time.Now().Unix(), + } + + return response.Created(c, user, "User created successfully") } ``` @@ -153,6 +197,88 @@ validate.RegisterValidation("username", func(fl validator.FieldLevel) bool { }) ``` +### Dependency Injection + +Services receive infrastructure dependencies through constructor injection via the service factory function: + +```go +type UserService struct { + enabled bool + db *infrastructure.PostgresManager + cache *infrastructure.RedisManager + logger *logger.Logger +} + +func NewUserService( + db *infrastructure.PostgresManager, + cache *infrastructure.RedisManager, + logger *logger.Logger, + enabled bool, +) *UserService { + return &UserService{ + enabled: enabled, + db: db, + cache: cache, + logger: logger, + } +} +``` + +**Service Factory with Dependencies:** + +```go +func init() { + registry.RegisterService("user_service", func( + config *config.Config, + logger *logger.Logger, + deps *registry.Dependencies, + ) interfaces.Service { + return NewUserService( + deps.Postgres, + deps.Redis, + logger, + config.Services.IsEnabled("user_service"), + ) + }) +} +``` + +**Available Dependencies:** +- `deps.Postgres` - PostgreSQL database manager +- `deps.Redis` - Redis cache manager +- `deps.MinIO` - Object storage manager +- `deps.Kafka` - Message queue manager +- `deps.MongoDB` - MongoDB database manager +- `deps.Grafana` - Monitoring dashboard manager + +**Using Dependencies in Handlers:** + +```go +func (s *UserService) createUser(c echo.Context) error { + var req CreateUserRequest + if err := request.Bind(c, &req); err != nil { + return response.ValidationError(c, "Validation failed", err.GetFieldErrors()) + } + + // Use database dependency + user := User{ + Username: req.Username, + Email: req.Email, + } + + if err := s.db.Create(&user).Error; err != nil { + s.logger.Error("Failed to create user", "error", err) + return response.InternalServerError(c, "Database error") + } + + // Use cache dependency + s.cache.cacheUser(user.ID, user) + + s.logger.Info("User created successfully", "user_id", user.ID) + return response.Created(c, user, "User created successfully") +} +``` + ### Response Types #### Success Responses @@ -451,6 +577,17 @@ func (s *FileService) uploadFile(c echo.Context) error { ## Configuration Management +### Service Configuration + +Services are enabled/disabled through the `services` section in `config.yaml`: + +```yaml +services: + your_service: true + user_service: true + product_service: false +``` + ### Adding New Configuration Options Add to `config/config.go`: @@ -479,6 +616,66 @@ your_service: - "https://backup.example.com" ``` +### Accessing Configuration in Services + +```go +func init() { + registry.RegisterService("your_service", func( + config *config.Config, + logger *logger.Logger, + deps *registry.Dependencies, + ) interfaces.Service { + // Access service-specific configuration + serviceConfig := config.YourService + + return NewYourService( + deps.Postgres, + logger, + serviceConfig, + config.Services.IsEnabled("your_service"), + ) + }) +} + +type YourService struct { + enabled bool + db *infrastructure.PostgresManager + logger *logger.Logger + serviceConfig YourServiceConfig +} + +func NewYourService( + db *infrastructure.PostgresManager, + logger *logger.Logger, + serviceConfig YourServiceConfig, + enabled bool, +) *YourService { + return &YourService{ + enabled: enabled, + db: db, + logger: logger, + serviceConfig: serviceConfig, + } +} +``` + +### Environment Variable Overrides + +Configuration can be overridden using environment variables: + +```bash +export YOUR_SERVICE_API_KEY="production-api-key" +export YOUR_SERVICE_TIMEOUT=120 +export YOUR_SERVICE_ENDPOINTS='["https://prod-api.example.com"]' + +go run cmd/app/main.go +``` + +**Environment Variable Naming:** +- Use uppercase with underscores +- Prefix with the service name +- Use JSON format for complex types like slices + ## Testing ### Unit Tests @@ -625,30 +822,158 @@ go run cmd/app/main.go 4. **HTTPS**: Use HTTPS in production 5. **Secrets**: Never commit secrets to version control +## Service Discovery & Auto-Registration + +### How Auto-Discovery Works + +Stackyard uses an automatic service discovery system that eliminates the need for manual service registration: + +1. **Package Import**: When a service package is imported, Go calls its `init()` function +2. **Factory Registration**: The `init()` function registers a service factory with the global registry +3. **Boot Process**: During application startup, the registry discovers all registered factories +4. **Service Creation**: Enabled services are automatically created and registered +5. **Route Registration**: Services register their routes with the Echo router + +### Service Factory Pattern + +The service factory function is the core of the auto-discovery system: + +```go +func init() { + registry.RegisterService("your_service", func( + config *config.Config, + logger *logger.Logger, + deps *registry.Dependencies, + ) interfaces.Service { + // Service creation logic here + return NewYourService(/* dependencies */) + }) +} +``` + +**Factory Function Parameters:** +- `config *config.Config` - Application configuration +- `logger *logger.Logger` - Structured logger instance +- `deps *registry.Dependencies` - Infrastructure dependencies + +**Factory Function Responsibilities:** +- Create service instance with dependencies +- Check if service is enabled via `config.Services.IsEnabled()` +- Return `nil` if service should not be registered + +### Service Lifecycle + +1. **Import Phase**: Service packages are imported, `init()` functions execute +2. **Registration Phase**: Service factories are registered in global registry +3. **Discovery Phase**: Registry scans for all registered factories +4. **Creation Phase**: Enabled services are instantiated with dependencies +5. **Boot Phase**: Services register their routes and start operations +6. **Runtime Phase**: Services handle requests and perform business logic + +### Service Dependencies + +Services can declare dependencies on infrastructure components: + +```go +type YourService struct { + enabled bool + db *infrastructure.PostgresManager + cache *infrastructure.RedisManager + logger *logger.Logger +} + +func NewYourService( + db *infrastructure.PostgresManager, + cache *infrastructure.RedisManager, + logger *logger.Logger, + enabled bool, +) *YourService { + return &YourService{ + enabled: enabled, + db: db, + cache: cache, + logger: logger, + } +} +``` + +**Available Dependencies:** +- `deps.Postgres` - PostgreSQL database manager +- `deps.Redis` - Redis cache manager +- `deps.MinIO` - Object storage manager +- `deps.Kafka` - Message queue manager +- `deps.MongoDB` - MongoDB database manager +- `deps.Grafana` - Monitoring dashboard manager + +### Service Interface Requirements + +All services must implement the `interfaces.Service` interface: + +```go +type Service interface { + Name() string // Human-readable service name + WireName() string // Dependency injection name + Enabled() bool // Whether service is enabled + Endpoints() []string // List of endpoint patterns + RegisterRoutes(g *echo.Group) // Register API routes + Get() interface{} // Return service instance +} +``` + +### Service Naming Conventions + +- **Service Name**: Human-readable name (e.g., "User Service") +- **Wire Name**: Dependency injection identifier (e.g., "user-service") +- **Config Key**: YAML configuration key (e.g., "user_service") +- **Package Name**: Go package name (e.g., "users_service") + +### Debugging Service Discovery + +If a service isn't registering properly: + +1. **Check Package Import**: Ensure the service package is imported +2. **Verify init() Function**: Confirm the `init()` function exists and calls `registry.RegisterService()` +3. **Check Configuration**: Verify the service is enabled in `config.yaml` +4. **Review Dependencies**: Ensure all required dependencies are available +5. **Examine Logs**: Check application logs for service registration messages + +### Service Registration Order + +Services are registered in the order they are discovered, which depends on: +- Package import order +- `init()` function execution order +- Dependency availability + +**Best Practice**: Design services to be independent of registration order when possible. + ## Troubleshooting ### Common Development Issues **Service not registering:** -- Check that the service is added to `internal/server/server.go` -- Verify the config key matches in `config.yaml` -- Check for compilation errors +- Check that the service package is imported (no manual registration needed) +- Verify the `init()` function calls `registry.RegisterService()` +- Ensure the service is enabled in `config.yaml` +- Check for compilation errors in the service package **Database connection errors:** - Verify database credentials - Check network connectivity - Ensure database server is running +- Confirm dependency injection is working correctly **API validation errors:** - Check request JSON structure - Verify validation tags on struct fields - Test with valid/invalid data +- Review custom validator implementations **Performance issues:** - Add database indexes - Implement caching - Check for N+1 query problems - Monitor memory usage +- Review dependency injection overhead ## Next Steps @@ -656,6 +981,4 @@ Now that you understand how to develop with Stackyard, explore: - **[Architecture Overview](ARCHITECTURE.md)** - Deep dive into the technical design - **[API Reference](REFERENCE.md)** - Complete technical documentation -- **Built-in Services** - Study `service_a.go`, `service_b.go` for examples -Happy developing! 🎯 diff --git a/docs_wiki/GETTING_STARTED.md b/docs_wiki/GETTING_STARTED.md index 8930c50..3cb1d42 100644 --- a/docs_wiki/GETTING_STARTED.md +++ b/docs_wiki/GETTING_STARTED.md @@ -217,9 +217,6 @@ go clean -modcache go mod download ``` -## Next Steps - -🎉 **Congratulations!** You have a running Stackyard application. ### What to Explore Next: @@ -242,10 +239,3 @@ docker-compose up # Run with full stack go test ./... # Run all tests ``` -## Getting Help - -- **Documentation**: Check the [docs_wiki](.) folder -- **Issues**: Report bugs on GitHub -- **Community**: Join discussions for questions - -Happy coding! 🚀 diff --git a/docs_wiki/REFERENCE.md b/docs_wiki/REFERENCE.md index 0c0be09..51799a5 100644 --- a/docs_wiki/REFERENCE.md +++ b/docs_wiki/REFERENCE.md @@ -9,35 +9,35 @@ This comprehensive reference covers all configuration options, API specification ```yaml # Application Configuration app: - name: "Stackyard App" # Application display name - version: "1.0.0" # Application version - debug: true # Enable debug logging - env: "development" # Environment (development, staging, production) - banner_path: "banner.txt" # Path to startup banner file - startup_delay: 3 # Seconds to display boot screen (0 to skip) - quiet_startup: false # Suppress console logs during startup - enable_tui: true # Enable Terminal User Interface + name: "Stackyard" # Application display name + version: "1.0.0" # Application version + debug: true # Enable debug logging + env: "development" # Environment (development, staging, production) + banner_path: "banner.txt" # Path to startup banner file + startup_delay: 3 # Seconds to display boot screen (0 to skip) + quiet_startup: true # Suppress console logs during startup (TUI only) + enable_tui: true # Enable Terminal User Interface # Server Configuration server: - port: "8080" # HTTP server port + port: "8080" # HTTP server port # Service Configuration services: - service_a: true # Basic CRUD service example - service_b: false # Additional service - service_c: true # Another service - service_d: false # Disabled service - service_e: false # Event streaming service - service_f: false # Multi-tenant service - service_g: false # MongoDB service - service_h: false # Broadcast utility demo - service_i: false # Grafana integration + users_service: true # User management service + broadcast_service: false # Event broadcasting service + cache_service: true # Redis caching service + encryption_service: false # API encryption service + grafana_service: false # Grafana integration service + mongodb_service: true # MongoDB multi-tenant service + multi_tenant_service: true # Multi-tenant PostgreSQL service + products_service: true # Product catalog service + tasks_service: true # Task management service # Authentication auth: type: "apikey" # Authentication type (apikey, basic, none) - secret: "your-secret-key" # API key for authentication + secret: "super-secret-key" # API key for authentication # Redis Configuration redis: @@ -51,20 +51,8 @@ kafka: enabled: false # Enable Kafka brokers: # List of Kafka brokers - "localhost:9092" - topic: "stackyard-events" # Default topic - group_id: "stackyard" # Consumer group ID - -# PostgreSQL Configuration (Single Connection) -postgres: - enabled: true # Enable PostgreSQL - host: "localhost" # Database host - port: 5432 # Database port - user: "postgres" # Database user - password: "password" # Database password - dbname: "stackyard" # Database name - sslmode: "disable" # SSL mode (disable, require, verify-ca, verify-full) - max_open_conns: 10 # Maximum open connections - max_idle_conns: 5 # Maximum idle connections + topic: "my-topic" # Default topic + group_id: "my-group" # Consumer group ID # PostgreSQL Multi-Connection Configuration postgres: @@ -75,72 +63,83 @@ postgres: host: "localhost" port: 5432 user: "postgres" - password: "password" - dbname: "primary_db" + password: "Mypostgres01" + dbname: "postgres" sslmode: "disable" - name: "secondary" enabled: true host: "localhost" port: 5433 user: "postgres" - password: "password" - dbname: "secondary_db" + password: "Mypostgres01" + dbname: "postgres" sslmode: "disable" -# MongoDB Configuration +# MongoDB Multi-Connection Configuration mongo: - enabled: false + enabled: true connections: - name: "primary" enabled: true uri: "mongodb://localhost:27017" database: "primary_db" - - name: "analytics" - enabled: false - uri: "mongodb://analytics.example.com:27017" - database: "analytics_db" + - name: "secondary" + enabled: true + uri: "mongodb://localhost:27018" + database: "secondary_db" # Monitoring Configuration monitoring: - enabled: true # Enable web monitoring dashboard - port: "9090" # Monitoring dashboard port - password: "admin" # Dashboard login password - obfuscate_api: false # Enable API response obfuscation - title: "Stackyard Admin" # Dashboard title + enabled: true # Enable web monitoring dashboard + port: "9090" # Monitoring dashboard port + password: "admin" # Dashboard login password + obfuscate_api: true # Enable API response obfuscation + title: "Stackyard" # Dashboard title subtitle: "Monitoring Dashboard" # Dashboard subtitle + max_photo_size_mb: 2 # Maximum photo upload size + upload_dir: "web/monitoring/uploads" # Upload directory -# MinIO Configuration +# MinIO Configuration (Nested under monitoring) monitoring: minio: - enabled: false # Enable MinIO integration - endpoint: "localhost:9000" # MinIO server endpoint - access_key: "minioadmin" # MinIO access key - secret_key: "minioadmin" # MinIO secret key - use_ssl: false # Use SSL for MinIO connection - bucket: "stackyard" # Default bucket name + enabled: true # Enable MinIO integration + endpoint: "localhost:9003" # MinIO server endpoint + access_key_id: "minioadmin" # MinIO access key + secret_access_key: "minioadmin" # MinIO secret key + use_ssl: false # Use SSL for MinIO connection + bucket_name: "main" # Default bucket name + +# External Services Configuration (Nested under monitoring) +monitoring: + external: + services: # List of external services to monitor + - name: "Google" + url: "https://google.com" + - name: "Local API" + url: "http://localhost:8080/health" # Cron Jobs Configuration cron: - enabled: false # Enable scheduled jobs + enabled: true # Enable scheduled jobs jobs: - cleanup: "0 0 * * *" # Daily cleanup at midnight - health_check: "*/5 * * * *" # Health check every 5 minutes + log_cleanup: "0 0 * * *" # Daily log cleanup at midnight + health_check: "*/10 * * * *" # Health check every 10 seconds # Encryption Configuration encryption: - enabled: false # Enable API encryption - algorithm: "aes-256-gcm" # Encryption algorithm - key: "" # 32-byte encryption key (base64 encoded) - rotate_keys: false # Enable automatic key rotation + enabled: false # Enable API encryption + algorithm: "aes-256-gcm" # Encryption algorithm + key: "" # 32-byte encryption key (base64 encoded) + rotate_keys: false # Enable automatic key rotation key_rotation_interval: "24h" # Key rotation interval # Grafana Integration grafana: - enabled: false # Enable Grafana integration + enabled: true # Enable Grafana integration url: "http://localhost:3000" # Grafana server URL - api_key: "" # Grafana API key - username: "admin" # Grafana username (alternative to API key) - password: "admin" # Grafana password (alternative to API key) + api_key: "your-grafana-api-key" # Grafana API key + username: "admin" # Grafana username (alternative to API key) + password: "admin" # Grafana password (alternative to API key) ``` ## API Specifications @@ -198,7 +197,7 @@ All API responses follow this standardized structure: ## API Endpoints Reference -### Service A (CRUD Example) - `/api/v1/users` +### Users Service - `/api/v1/users` #### GET `/api/v1/users` List users with pagination. @@ -213,10 +212,11 @@ List users with pagination. "success": true, "data": [ { - "id": 1, - "name": "John Doe", + "id": "1", + "username": "john_doe", "email": "john@example.com", - "created_at": "2024-01-01T00:00:00Z" + "status": "active", + "created_at": 1704067200 } ], "meta": { @@ -234,9 +234,9 @@ Create a new user. **Request Body:** ```json { - "name": "Jane Doe", + "username": "jane_doe", "email": "jane@example.com", - "age": 25 + "full_name": "Jane Doe" } ``` @@ -245,11 +245,11 @@ Create a new user. { "success": true, "data": { - "id": 2, - "name": "Jane Doe", + "id": "123", + "username": "jane_doe", "email": "jane@example.com", - "age": 25, - "created_at": "2024-01-01T00:00:00Z" + "status": "active", + "created_at": 1704067200 } } ``` @@ -262,9 +262,11 @@ Get a specific user. { "success": true, "data": { - "id": 1, - "name": "John Doe", - "email": "john@example.com" + "id": "1", + "username": "john_doe", + "email": "john@example.com", + "status": "active", + "created_at": 1704067200 } } ``` @@ -275,8 +277,9 @@ Update a user. **Request Body:** ```json { - "name": "John Smith", - "email": "johnsmith@example.com" + "username": "john_smith", + "email": "johnsmith@example.com", + "status": "inactive" } ``` @@ -291,33 +294,116 @@ Delete a user. } ``` -### Service G (MongoDB) - `/api/v1/products` +### Products Service - `/api/v1/products` + +#### GET `/api/v1/products` +Get product catalog information. + +**Response:** +```json +{ + "success": true, + "data": { + "message": "Hello from Service B - Products" + } +} +``` + +### MongoDB Service - `/api/v1/products/{tenant}` #### GET `/api/v1/products/{tenant}` -List products for a tenant. +List products for a specific tenant database. **Path Parameters:** -- `tenant` (string): Tenant identifier +- `tenant` (string): Tenant identifier (maps to MongoDB database name) + +**Response:** +```json +{ + "success": true, + "data": [ + { + "_id": "507f1f77bcf86cd799439011", + "name": "Laptop", + "description": "Gaming laptop", + "price": 1299.99, + "category": "electronics", + "in_stock": true, + "quantity": 5, + "tags": ["gaming", "laptop"] + } + ] +} +``` #### POST `/api/v1/products/{tenant}` -Create a product. +Create a product in tenant database. **Request Body:** ```json { - "name": "Laptop", - "price": 999.99, - "category": "electronics" + "name": "Smartphone", + "description": "Latest smartphone", + "price": 899.99, + "category": "electronics", + "quantity": 10, + "tags": ["mobile", "phone"] +} +``` + +#### GET `/api/v1/products/{tenant}/{id}` +Get specific product by ID. + +#### PUT `/api/v1/products/{tenant}/{id}` +Update product. + +#### DELETE `/api/v1/products/{tenant}/{id}` +Delete product. + +#### GET `/api/v1/products/{tenant}/search` +Search products with filters. + +**Query Parameters:** +- `name` (string): Product name filter +- `category` (string): Category filter +- `in_stock` (boolean): Stock status filter +- `min_price` (number): Minimum price +- `max_price` (number): Maximum price +- `tags` (string): Comma-separated tags + +#### GET `/api/v1/products/{tenant}/analytics` +Get product analytics by category. + +**Response:** +```json +{ + "success": true, + "data": { + "total_products": 150, + "in_stock_products": 120, + "out_of_stock": 30, + "category_breakdown": [ + { + "_id": "electronics", + "total_products": 80, + "avg_price": 450.50, + "min_price": 50.00, + "max_price": 2000.00, + "total_quantity": 500, + "in_stock_count": 450 + } + ] + } } ``` -### Service H (Event Streaming) - `/api/v1/events` +### Broadcast Service - `/api/v1/events` #### GET `/api/v1/events/stream/{stream_id}` Subscribe to event stream (Server-Sent Events). #### POST `/api/v1/events/broadcast` -Broadcast an event. +Broadcast an event to all subscribers. **Request Body:** ```json @@ -325,15 +411,34 @@ Broadcast an event. "type": "user_action", "message": "User logged in", "data": { - "user_id": 123 + "user_id": "123" } } ``` #### GET `/api/v1/events/streams` -Get stream information. +Get active stream information. + +### Cache Service - `/api/v1/cache` -### Service I (Grafana) - `/api/v1/grafana` +#### GET `/api/v1/cache/{key}` +Get cached value. + +#### POST `/api/v1/cache/{key}` +Set cached value. + +**Request Body:** +```json +{ + "value": "cached data", + "ttl": 3600 +} +``` + +#### DELETE `/api/v1/cache/{key}` +Delete cached value. + +### Grafana Service - `/api/v1/grafana` #### POST `/api/v1/grafana/dashboards` Create a Grafana dashboard. @@ -365,6 +470,9 @@ Application health check. #### GET `/health/infrastructure` Detailed infrastructure health. +#### GET `/health/services` +Service-specific health status. + ## Request Validation ### Built-in Validators @@ -409,9 +517,10 @@ Usage: `validate:"username"` ```sql CREATE TABLE users ( id SERIAL PRIMARY KEY, - name VARCHAR(255) NOT NULL, + username VARCHAR(255) NOT NULL, email VARCHAR(255) UNIQUE NOT NULL, - age INTEGER, + full_name VARCHAR(255), + status VARCHAR(50) DEFAULT 'active', created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), deleted_at TIMESTAMP WITH TIME ZONE @@ -423,15 +532,41 @@ CREATE TABLE users ( #### Products Collection ```javascript { - "_id": ObjectId("..."), - "tenant_id": "tenant_a", + "_id": ObjectId("507f1f77bcf86cd799439011"), "name": "Laptop", - "price": 999.99, + "description": "Gaming laptop", + "price": 1299.99, "category": "electronics", + "in_stock": true, + "quantity": 5, + "tags": ["gaming", "laptop"], "created_at": ISODate("2024-01-01T00:00:00Z") } ``` +#### Analytics Aggregation Pipeline +```javascript +// Product analytics by category +[ + { + "$group": { + "_id": "$category", + "total_products": {"$sum": 1}, + "avg_price": {"$avg": "$price"}, + "min_price": {"$min": "$price"}, + "max_price": {"$max": "$price"}, + "total_quantity": {"$sum": "$quantity"}, + "in_stock_count": { + "$sum": {"$cond": ["$in_stock", 1, 0]} + } + } + }, + { + "$sort": {"total_products": -1} + } +] +``` + ## Infrastructure Managers ### AsyncResult Types @@ -464,20 +599,92 @@ func (ar *AsyncResult[T]) IsDone() bool ### Connection Pool Settings -#### PostgreSQL +#### PostgreSQL Multi-Connection ```yaml postgres: - max_open_conns: 10 # Maximum open connections - max_idle_conns: 5 # Maximum idle connections - conn_max_lifetime: "1h" # Connection max lifetime + connections: + - name: "primary" + max_open_conns: 10 # Maximum open connections + max_idle_conns: 5 # Maximum idle connections + conn_max_lifetime: "1h" # Connection max lifetime + - name: "secondary" + max_open_conns: 8 # Secondary connection pool + max_idle_conns: 3 + conn_max_lifetime: "1h" ``` #### Redis ```yaml redis: - pool_size: 10 # Connection pool size - min_idle_conns: 2 # Minimum idle connections + pool_size: 10 # Connection pool size + min_idle_conns: 2 # Minimum idle connections conn_max_lifetime: "1h" # Connection max lifetime + idle_timeout: "10m" # Idle connection timeout +``` + +#### MongoDB +```yaml +mongo: + connections: + - name: "primary" + max_pool_size: 100 # Maximum connection pool size + min_pool_size: 10 # Minimum connection pool size + max_idle_time: "10m" # Maximum idle time + connect_timeout: "30s" # Connection timeout + - name: "secondary" + max_pool_size: 50 + min_pool_size: 5 + max_idle_time: "10m" + connect_timeout: "30s" +``` + +### Infrastructure Connection Management + +#### PostgreSQL Connection Manager +```go +// Get tenant-specific connection +conn, exists := postgresManager.GetConnection("tenant_a") +if exists { + // Use tenant_a database + result := conn.ORM.Where("tenant_id = ?", "tenant_a").Find(&data) +} + +// List all available connections +connections := postgresManager.ListConnections() +for name, conn := range connections { + fmt.Printf("Connection %s: %s\n", name, conn.Status()) +} +``` + +#### MongoDB Connection Manager +```go +// Get tenant-specific database +db, exists := mongoManager.GetConnection("tenant_b") +if exists { + // Use tenant_b database + cursor, err := db.Collection("products").Find(context.Background(), bson.M{}) +} + +// List all available databases +databases := mongoManager.ListConnections() +for name, db := range databases { + fmt.Printf("Database %s: %s\n", name, db.Name()) +} +``` + +#### Redis Connection Manager +```go +// Basic operations +err := redisManager.Set(ctx, "key", "value", time.Hour) +value, err := redisManager.Get(ctx, "key") + +// Hash operations +err = redisManager.HSet(ctx, "user:123", "name", "John") +name, err := redisManager.HGet(ctx, "user:123", "name") + +// List operations +err = redisManager.LPush(ctx, "queue", "item1") +item, err := redisManager.LPop(ctx, "queue") ``` ## Security Features diff --git a/internal/services/modules/products_service.go b/internal/services/modules/products_service.go index 73446a2..11884ad 100644 --- a/internal/services/modules/products_service.go +++ b/internal/services/modules/products_service.go @@ -10,6 +10,10 @@ import ( "github.com/labstack/echo/v4" ) +const ( + SERVICE_NAME = "products-service" +) + type ProductsService struct { enabled bool } @@ -19,7 +23,7 @@ func NewProductsService(enabled bool) *ProductsService { } func (s *ProductsService) Name() string { return "Products Service" } -func (s *ProductsService) WireName() string { return "products-service" } +func (s *ProductsService) WireName() string { return SERVICE_NAME } func (s *ProductsService) Enabled() bool { return s.enabled } func (s *ProductsService) Endpoints() []string { return []string{"/products"} } func (s *ProductsService) Get() interface{} { return s } @@ -33,7 +37,7 @@ func (s *ProductsService) RegisterRoutes(g *echo.Group) { // Auto-registration function - called when package is imported func init() { - registry.RegisterService("products_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewProductsService(config.Services.IsEnabled("products_service")) + registry.RegisterService(SERVICE_NAME, func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewProductsService(config.Services.IsEnabled(SERVICE_NAME)) }) } From d4d1b116bf27c92e9101b7fdefd62afe1ecf5c83 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Fri, 27 Mar 2026 17:29:40 +0700 Subject: [PATCH 13/18] docs: add testing infrastructure and metrics documentation Update go build command to build package directory instead of single file. Add comprehensive documentation for: - Testing helpers and mock implementations - Test file organization and coverage - Metrics system (HTTP, cache, resilience, WebSocket, batch) - Project structure updates with new packages --- .github/workflows/go-build.yml | 2 +- deployments/kubernetes/deployment.yaml | 68 + docker-compose.yaml | 156 +++ docs/swagger.yaml | 280 ++++ docs_wiki/blueprint/blueprint.txt | 1140 +++++++++++++++++ go.mod | 10 + go.sum | 32 +- internal/middleware/audit.go | 153 +++ internal/middleware/cors.go | 142 ++ internal/middleware/jwt.go | 246 ++++ internal/middleware/ratelimit.go | 179 +++ internal/middleware/security.go | 116 ++ internal/services/modules/products_service.go | 8 + internal/services/modules/users_service.go | 59 +- pkg/batch/operations.go | 248 ++++ pkg/infrastructure/afero.go | 12 + pkg/infrastructure/afero_test.go | 166 --- pkg/infrastructure/testdata/README.md | 3 - pkg/infrastructure/testdata/config.yaml | 2 - pkg/infrastructure/testdata/test.txt | 1 - pkg/logging/rotation.go | 210 +++ pkg/logging/sampler.go | 312 +++++ pkg/logging/structured.go | 255 ++++ pkg/metrics/prometheus.go | 237 ++++ pkg/pagination/cursor.go | 227 ++++ pkg/resilience/circuit_breaker.go | 264 ++++ pkg/resilience/health.go | 262 ++++ pkg/resilience/retry.go | 238 ++++ pkg/resilience/timeout.go | 115 ++ pkg/testing/helpers.go | 131 ++ pkg/testing/mocks.go | 341 +++++ pkg/tui/charts.go | 337 +++++ pkg/webhook/handler.go | 258 ++++ pkg/websocket/handler.go | 224 ++++ tests/infrastructure/afero_test.go | 190 +++ tests/infrastructure/testdata/README.md | 1 + tests/infrastructure/testdata/config.yaml | 1 + tests/infrastructure/testdata/test.txt | 1 + tests/services/products_service_test.go | 69 + tests/services/users_service_test.go | 281 ++++ 40 files changed, 6795 insertions(+), 182 deletions(-) create mode 100644 deployments/kubernetes/deployment.yaml create mode 100644 docker-compose.yaml create mode 100644 docs/swagger.yaml create mode 100644 internal/middleware/audit.go create mode 100644 internal/middleware/cors.go create mode 100644 internal/middleware/jwt.go create mode 100644 internal/middleware/ratelimit.go create mode 100644 internal/middleware/security.go create mode 100644 pkg/batch/operations.go delete mode 100644 pkg/infrastructure/afero_test.go delete mode 100644 pkg/infrastructure/testdata/README.md delete mode 100644 pkg/infrastructure/testdata/config.yaml delete mode 100644 pkg/infrastructure/testdata/test.txt create mode 100644 pkg/logging/rotation.go create mode 100644 pkg/logging/sampler.go create mode 100644 pkg/logging/structured.go create mode 100644 pkg/metrics/prometheus.go create mode 100644 pkg/pagination/cursor.go create mode 100644 pkg/resilience/circuit_breaker.go create mode 100644 pkg/resilience/health.go create mode 100644 pkg/resilience/retry.go create mode 100644 pkg/resilience/timeout.go create mode 100644 pkg/testing/helpers.go create mode 100644 pkg/testing/mocks.go create mode 100644 pkg/tui/charts.go create mode 100644 pkg/webhook/handler.go create mode 100644 pkg/websocket/handler.go create mode 100644 tests/infrastructure/afero_test.go create mode 100644 tests/infrastructure/testdata/README.md create mode 100644 tests/infrastructure/testdata/config.yaml create mode 100644 tests/infrastructure/testdata/test.txt create mode 100644 tests/services/products_service_test.go create mode 100644 tests/services/users_service_test.go diff --git a/.github/workflows/go-build.yml b/.github/workflows/go-build.yml index f2241a4..60bea17 100644 --- a/.github/workflows/go-build.yml +++ b/.github/workflows/go-build.yml @@ -19,7 +19,7 @@ jobs: go-version: '1.21' - name: Build - run: go build -v ./cmd/app/main.go + run: go build -v ./cmd/app/ - name: Test run: go test -v ./... diff --git a/deployments/kubernetes/deployment.yaml b/deployments/kubernetes/deployment.yaml new file mode 100644 index 0000000..87cfe39 --- /dev/null +++ b/deployments/kubernetes/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: stackyard + namespace: default + labels: + app: stackyard + version: "1.0.0" +spec: + replicas: 3 + selector: + matchLabels: + app: stackyard + template: + metadata: + labels: + app: stackyard + version: "1.0.0" + spec: + containers: + - name: stackyard + image: stackyard:latest + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: monitoring + containerPort: 9090 + protocol: TCP + env: + - name: APP_ENV + value: "production" + - name: APP_PORT + value: "8080" + - name: MONITORING_PORT + value: "9090" + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + volumeMounts: + - name: config + mountPath: /app/config.yaml + subPath: config.yaml + readOnly: true + volumes: + - name: config + configMap: + name: stackyard-config \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..31933ba --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,156 @@ +version: '3.8' + +services: + app: + build: + context: . + dockerfile: Dockerfile + target: production + ports: + - "8080:8080" + - "9090:9090" + environment: + - APP_ENV=development + - APP_PORT=8080 + - MONITORING_PORT=9090 + - REDIS_ENABLED=true + - REDIS_HOST=redis + - REDIS_PORT=6379 + - POSTGRES_ENABLED=true + - POSTGRES_HOST=postgres + - POSTGRES_PORT=5432 + - POSTGRES_USER=stackyard + - POSTGRES_PASSWORD=stackyard123 + - POSTGRES_DB=stackyard + - KAFKA_ENABLED=true + - KAFKA_BROKERS=kafka:9092 + - MONGO_ENABLED=true + - MONGO_URI=mongodb://mongo:27017 + - MONGO_DATABASE=stackyard + volumes: + - ./config.yaml:/app/config.yaml:ro + depends_on: + - redis + - postgres + - kafka + - mongo + restart: unless-stopped + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + redis: + image: redis:7-alpine + ports: + - "6379:6379" + volumes: + - redis_data:/data + restart: unless-stopped + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + + postgres: + image: postgres:15-alpine + ports: + - "5432:5432" + environment: + - POSTGRES_USER=stackyard + - POSTGRES_PASSWORD=stackyard123 + - POSTGRES_DB=stackyard + volumes: + - postgres_data:/var/lib/postgresql/data + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "pg_isready -U stackyard"] + interval: 10s + timeout: 5s + retries: 5 + + kafka: + image: confluentinc/cp-kafka:7.5.0 + ports: + - "9092:9092" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + depends_on: + - zookeeper + restart: unless-stopped + healthcheck: + test: ["CMD", "kafka-broker-api-versions", "--bootstrap-server", "localhost:9092"] + interval: 30s + timeout: 10s + retries: 5 + + zookeeper: + image: confluentinc/cp-zookeeper:7.5.0 + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + restart: unless-stopped + healthcheck: + test: ["CMD", "echo", "ruok", "|", "nc", "localhost", "2181"] + interval: 10s + timeout: 5s + retries: 5 + + mongo: + image: mongo:7 + ports: + - "27017:27017" + volumes: + - mongo_data:/data/db + restart: unless-stopped + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 10s + timeout: 5s + retries: 5 + + grafana: + image: grafana/grafana:10.0.0 + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + volumes: + - grafana_data:/var/lib/grafana + restart: unless-stopped + + minio: + image: minio/minio:latest + ports: + - "9000:9000" + - "9001:9001" + environment: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin + volumes: + - minio_data:/data + command: server /data --console-address ":9001" + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 10s + retries: 3 + +volumes: + redis_data: + postgres_data: + mongo_data: + grafana_data: + minio_data: \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml new file mode 100644 index 0000000..cbf4c6c --- /dev/null +++ b/docs/swagger.yaml @@ -0,0 +1,280 @@ +swagger: "2.0" +info: + description: "Stackyard API Documentation" + version: "1.0.0" + title: "Stackyard API" + contact: + email: "admin@stackyard.com" + license: + name: "Apache 2.0" + url: "http://www.apache.org/licenses/LICENSE-2.0.html" +host: "localhost:8080" +basePath: "/api/v1" +tags: + - name: "users" + description: "User management" + - name: "products" + description: "Product management" +schemes: + - "https" + - "http" +paths: + /users: + get: + tags: + - "users" + summary: "List users with pagination" + description: "Get a paginated list of users" + produces: + - "application/json" + parameters: + - name: "page" + in: "query" + description: "Page number" + type: "integer" + default: 1 + - name: "per_page" + in: "query" + description: "Items per page" + type: "integer" + default: 10 + responses: + 200: + description: "Success" + schema: + type: "object" + properties: + success: + type: "boolean" + status: + type: "integer" + message: + type: "string" + data: + type: "array" + items: + $ref: "#/definitions/User" + meta: + $ref: "#/definitions/Meta" + 400: + description: "Bad request" + schema: + $ref: "#/definitions/Response" + post: + tags: + - "users" + summary: "Create user" + description: "Create a new user" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "body" + required: true + schema: + $ref: "#/definitions/CreateUserRequest" + responses: + 201: + description: "Created" + schema: + type: "object" + properties: + success: + type: "boolean" + status: + type: "integer" + message: + type: "string" + data: + $ref: "#/definitions/User" + 400: + description: "Bad request" + schema: + $ref: "#/definitions/Response" + 422: + description: "Validation error" + schema: + $ref: "#/definitions/Response" + /users/{id}: + get: + tags: + - "users" + summary: "Get single user" + description: "Get a specific user by ID" + produces: + - "application/json" + parameters: + - name: "id" + in: "path" + description: "User ID" + required: true + type: "string" + responses: + 200: + description: "Success" + schema: + type: "object" + properties: + success: + type: "boolean" + status: + type: "integer" + message: + type: "string" + data: + $ref: "#/definitions/User" + 404: + description: "Not found" + schema: + $ref: "#/definitions/Response" + put: + tags: + - "users" + summary: "Update user" + description: "Update an existing user" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "id" + in: "path" + description: "User ID" + required: true + type: "string" + - in: "body" + name: "body" + required: true + schema: + $ref: "#/definitions/UpdateUserRequest" + responses: + 200: + description: "Success" + schema: + type: "object" + properties: + success: + type: "boolean" + status: + type: "integer" + message: + type: "string" + data: + $ref: "#/definitions/User" + 400: + description: "Bad request" + schema: + $ref: "#/definitions/Response" + 422: + description: "Validation error" + schema: + $ref: "#/definitions/Response" + delete: + tags: + - "users" + summary: "Delete user" + description: "Delete a user by ID" + produces: + - "application/json" + parameters: + - name: "id" + in: "path" + description: "User ID" + required: true + type: "string" + responses: + 204: + description: "No content" + 404: + description: "Not found" + schema: + $ref: "#/definitions/Response" + /products: + get: + tags: + - "products" + summary: "Get products" + description: "Get a list of products" + produces: + - "application/json" + responses: + 200: + description: "Success" + schema: + $ref: "#/definitions/Response" +definitions: + User: + type: "object" + properties: + id: + type: "string" + username: + type: "string" + email: + type: "string" + status: + type: "string" + created_at: + type: "integer" + CreateUserRequest: + type: "object" + required: + - "username" + - "email" + - "full_name" + properties: + username: + type: "string" + email: + type: "string" + full_name: + type: "string" + UpdateUserRequest: + type: "object" + properties: + username: + type: "string" + email: + type: "string" + full_name: + type: "string" + status: + type: "string" + enum: + - "active" + - "inactive" + - "suspended" + Response: + type: "object" + properties: + success: + type: "boolean" + status: + type: "integer" + message: + type: "string" + data: + type: "object" + error: + type: "object" + meta: + $ref: "#/definitions/Meta" + timestamp: + type: "integer" + datetime: + type: "string" + correlation_id: + type: "string" + Meta: + type: "object" + properties: + page: + type: "integer" + per_page: + type: "integer" + total: + type: "integer" + total_pages: + type: "integer" \ No newline at end of file diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index c26a481..8a878eb 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -1862,3 +1862,1143 @@ cat config.yaml | grep -A 10 "services:" ### 14.9 Commercial Support While the software is provided under Apache License 2.0, commercial support, consulting, and custom development services may be available through the project maintainer. + +## 15. TESTING INFRASTRUCTURE + +### 15.1 Test Helpers Package + +**Location:** `pkg/testing/` + +The project includes a comprehensive testing infrastructure package providing reusable test utilities and mock implementations. + +**Test Helpers (`pkg/testing/helpers.go`):** +- `NewTestContext()` - Creates echo.Context for unit testing with request body +- `NewTestContextWithQuery()` - Creates test context with query parameters +- `NewTestContextWithParams()` - Creates test context with path parameters +- `ParseResponse()` - Parses HTTP response body into struct +- `AssertStatus()` - Asserts HTTP status code +- `AssertJSON()` - Asserts JSON response fields +- `NewTestEcho()` - Creates Echo instance for testing routes + +**Mock Implementations (`pkg/testing/mocks.go`):** +- `MockService` - Service interface mock for testing +- `MockLogger` - Logger mock with log entry capture +- `MockRedisManager` - Redis operations mock with in-memory storage +- `MockPostgresManager` - PostgreSQL operations mock +- `MockMongoManager` - MongoDB operations mock +- `MockKafkaManager` - Kafka messaging mock with message capture +- `MockCronManager` - Cron job scheduling mock +- `MockFileReader` - File system operations mock +- `MockConfig` - Configuration mock with service toggles +- `TestSuite` - Complete test suite with all mocks initialized + +**Usage Example:** +```go +func TestUserService(t *testing.T) { + suite := testhelpers.NewTestSuite() + defer suite.Cleanup() + + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContext(http.MethodGet, "/users", nil) + + err := service.GetUsers(c) + assert.NoError(t, err) + testhelpers.AssertStatus(t, rec, http.StatusOK) +} +``` + +### 15.2 Test File Organization + +Service test files are located in `tests/services/` directory, separated from service implementations: + +``` +tests/ +└── services/ + ├── users_service_test.go + └── products_service_test.go +``` + +**Test Coverage:** +- Unit tests for all service methods +- Validation testing for request inputs +- Error handling verification +- Benchmark tests for performance monitoring +- Struct serialization/deserialization tests + +### 15.3 Running Tests + +```bash +# Run all tests +go test ./... + +# Run service tests only +go test ./tests/services/... -v + +# Run with coverage +go test ./... -cover + +# Run benchmarks +go test -bench=. ./tests/services/ +``` + +## 16. API DOCUMENTATION + +### 16.1 Swagger Integration + +**Configuration:** `docs/swagger.yaml` + +The project includes OpenAPI 2.0 (Swagger) documentation with annotations on service handlers. + +**Annotated Endpoints:** + +**Users Service:** +- `GET /api/v1/users` - List users with pagination +- `GET /api/v1/users/{id}` - Get user by ID +- `POST /api/v1/users` - Create user +- `PUT /api/v1/users/{id}` - Update user +- `DELETE /api/v1/users/{id}` - Delete user + +**Products Service:** +- `GET /api/v1/products` - Get products list + +**Swagger Annotations Format:** +```go +// GetUsers godoc +// @Summary List users with pagination +// @Description Get a paginated list of users +// @Tags users +// @Accept json +// @Produce json +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(10) +// @Success 200 {object} response.Response{data=[]User} "Success" +// @Failure 400 {object} response.Response "Bad request" +// @Router /users [get] +func (s *UsersService) GetUsers(c echo.Context) error { + // Implementation +} +``` + +### 16.2 Documentation Definitions + +The swagger.yaml includes definitions for: +- `User` - User model with all fields +- `CreateUserRequest` - User creation request schema +- `UpdateUserRequest` - User update request schema +- `Response` - Standard API response wrapper +- `Meta` - Pagination metadata + +## 17. SECURITY ENHANCEMENTS + +### 17.1 Rate Limiting Middleware + +**Location:** `internal/middleware/ratelimit.go` + +Provides configurable request rate limiting per IP or user. + +**Features:** +- Token bucket algorithm implementation +- Configurable request limits and time windows +- Per-IP or per-user rate limiting +- Automatic cleanup of expired entries +- Rate limit headers in responses (`X-RateLimit-Limit`, `X-RateLimit-Remaining`, `X-RateLimit-Reset`) + +**Configuration:** +```go +// Default: 60 requests per minute per IP +middleware.RateLimit() + +// Custom configuration +middleware.RateLimitWithConfig(100, time.Minute) + +// Per-user rate limiting +middleware.RateLimitPerUser(100, time.Minute) +``` + +**Response on Limit Exceeded:** +```json +{ + "success": false, + "status": 429, + "error": { + "code": "RATE_LIMIT_EXCEEDED", + "message": "Rate limit exceeded. Please try again later.", + "details": { + "retry_after": 1672531260 + } + } +} +``` + +### 17.2 CORS Middleware + +**Location:** `internal/middleware/cors.go` + +Cross-Origin Resource Sharing support with wildcard subdomain matching. + +**Features:** +- Configurable allowed origins, methods, and headers +- Wildcard subdomain support (e.g., `*.example.com`) +- Preflight request handling +- Credentials support +- Max-Age caching + +**Configuration:** +```go +// Allow all origins +middleware.CORSAllowAll() + +// Specific origins +middleware.CORSWithConfig([]string{"https://example.com", "https://app.example.com"}) + +// Full configuration +middleware.CORS(middleware.CORSConfig{ + AllowOrigins: []string{"https://example.com"}, + AllowMethods: []string{http.MethodGet, http.MethodPost}, + AllowHeaders: []string{echo.HeaderContentType, echo.HeaderAuthorization}, + AllowCredentials: true, + MaxAge: 86400, +}) +``` + +### 17.3 JWT Authentication Middleware + +**Location:** `internal/middleware/jwt.go` + +JSON Web Token authentication with role-based access control. + +**Features:** +- Token validation from header, query, or cookie +- Configurable signing key and token lookup +- User context extraction (user_id, username, email, role) +- Role-based access control helpers +- Optional authentication for public endpoints + +**Token Claims:** +```go +type JWTClaims struct { + UserID string `json:"user_id"` + Username string `json:"username"` + Email string `json:"email"` + Role string `json:"role"` + jwt.RegisteredClaims +} +``` + +**Usage:** +```go +// Require authentication +e.Use(middleware.JWTRequired("secret-key")) + +// Optional authentication +e.Use(middleware.JWTOptional("secret-key")) + +// Require admin role +e.GET("/admin", handler, middleware.RequireAdmin()) + +// Require specific roles +e.GET("/dashboard", handler, middleware.RequireRole("user", "admin")) +``` + +**Token Generation:** +```go +token, err := middleware.GenerateToken( + "user-123", + "johndoe", + "john@example.com", + "user", + "secret-key", + 24*time.Hour, +) +``` + +### 17.4 Security Headers Middleware + +**Location:** `internal/middleware/security.go` + +Adds security headers to HTTP responses. + +**Headers Included:** +- `Content-Security-Policy` - XSS protection +- `X-Content-Type-Options: nosniff` - Prevent MIME sniffing +- `X-Frame-Options: DENY` - Clickjacking protection +- `X-XSS-Protection: 1; mode=block` - XSS filter +- `Referrer-Policy: strict-origin-when-cross-origin` - Referrer control +- `Permissions-Policy` - Feature restrictions +- `Strict-Transport-Security` - HSTS with configurable max-age + +**Configuration:** +```go +// Default strict security +middleware.Security() + +// Custom CSP +middleware.SecurityWithConfig("default-src 'self'") + +// Permissive for development +middleware.SecurityPermissive() +``` + +### 17.5 Audit Logging Middleware + +**Location:** `internal/middleware/audit.go` + +Logs all API requests for security auditing and compliance. + +**Logged Information:** +- Timestamp, method, path, query parameters +- HTTP status code and response latency +- User ID and username (if authenticated) +- Client IP address and user agent +- Request ID for correlation + +**Configuration:** +```go +// Basic audit logging +middleware.AuditWithConfig(logger) + +// Skip health check endpoints +middleware.AuditSkipHealthCheck(logger) + +// Custom configuration +middleware.Audit(middleware.AuditConfig{ + Logger: logger, + LogRequestBody: false, + LogHeaders: false, + SensitiveHeaders: []string{"Authorization", "Cookie"}, +}) +``` + +## 18. RESILIENCE PATTERNS + +### 18.1 Circuit Breaker + +**Location:** `pkg/resilience/circuit_breaker.go` + +Implements the circuit breaker pattern to prevent cascading failures. + +**States:** +- **Closed**: Normal operation, requests pass through +- **Open**: Failure threshold exceeded, requests fail fast +- **Half-Open**: Testing recovery, limited requests allowed + +**Configuration:** +```go +cb := resilience.NewCircuitBreaker(resilience.CircuitBreakerConfig{ + Name: "redis", + MaxFailures: 5, + ResetTimeout: 30 * time.Second, + HalfOpenMaxRequests: 1, + OnStateChange: func(name string, from, to resilience.State) { + logger.Info("Circuit breaker state changed", "name", name, "from", from, "to", to) + }, +}) +``` + +**Usage:** +```go +// Execute with circuit breaker protection +err := cb.Execute(func() error { + return redisManager.Set(ctx, "key", "value", time.Minute) +}) + +// Execute with fallback +err := cb.ExecuteWithFallback( + func() error { return redisManager.Get(ctx, "key") }, + func() error { return "default-value", nil }, +) +``` + +**Circuit Breaker Manager:** +```go +manager := resilience.NewCircuitBreakerManager() +cb := manager.GetOrCreate(config) +stats := cb.GetStats() // Returns state, failures, successes +``` + +### 18.2 Retry with Exponential Backoff + +**Location:** `pkg/resilience/retry.go` + +Automatic retry with exponential backoff and jitter. + +**Features:** +- Configurable max attempts and delays +- Exponential backoff with optional jitter +- Context cancellation support +- Retry condition filtering +- Generic result types + +**Configuration:** +```go +config := resilience.RetryConfig{ + MaxAttempts: 3, + InitialDelay: 100 * time.Millisecond, + MaxDelay: 10 * time.Second, + BackoffFactor: 2.0, + Jitter: true, + RetryIf: resilience.RetryIfRetryable(), + OnRetry: func(attempt int, err error) { + logger.Warn("Retrying operation", "attempt", attempt, "error", err) + }, +} +``` + +**Usage:** +```go +// Basic retry +err := resilience.Retry(func() error { + return kafkaManager.Publish("topic", message) +}, config) + +// Retry with context +err := resilience.RetryWithContext(ctx, func() error { + return dbManager.Query(ctx, "SELECT * FROM users") +}, config) + +// Retry with result +result, err := resilience.RetryWithResult(func() (string, error) { + return redisManager.Get(ctx, "key") +}, config) +``` + +**Retryable Errors:** +```go +// Mark error as retryable +return resilience.NewRetryableError(err) + +// Check if error is retryable +if resilience.IsRetryable(err) { + // Handle retryable error +} +``` + +### 18.3 Timeout Management + +**Location:** `pkg/resilience/timeout.go` + +Context-based timeout management for operations. + +**Features:** +- Configurable timeout durations +- Context cancellation propagation +- Generic result types +- Timeout error handling + +**Usage:** +```go +// Execute with timeout +err := resilience.WithTimeout(func() error { + return slowOperation() +}, 5*time.Second) + +// Execute with context +ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +defer cancel() + +err := resilience.WithContext(ctx, func() error { + return operationWithContext(ctx) +}) + +// Execute with result and timeout +result, err := resilience.WithTimeoutResult(func() ([]User, error) { + return dbManager.QueryUsers() +}, 30*time.Second) +``` + +### 18.4 Deep Health Checks + +**Location:** `pkg/resilience/health.go` + +Comprehensive health checking system for all dependencies. + +**Health Statuses:** +- `healthy` - All checks passing +- `degraded` - Non-critical checks failing +- `unhealthy` - Critical checks failing + +**Features:** +- Concurrent health check execution +- Critical vs non-critical checks +- Timeout per check +- Health report generation +- Individual check inspection + +**Registration:** +```go +checker := resilience.NewHealthChecker() + +// Simple check +checker.RegisterSimpleCheck("redis", func() error { + return redisManager.Ping(ctx) +}) + +// Critical check with custom timeout +checker.RegisterCriticalCheck("postgres", func(ctx context.Context) error { + return postgresManager.Ping(ctx) +}) + +// Check with context +checker.RegisterCheck(&resilience.HealthCheck{ + Name: "kafka", + Check: func(ctx context.Context) error { return kafkaManager.Ping(ctx) }, + Timeout: 5 * time.Second, + Critical: false, +}) +``` + +**Usage:** +```go +// Get full health report +report := checker.Check(ctx) +// report.Status: "healthy", "degraded", or "unhealthy" +// report.Checks: map of individual check results + +// Check if healthy +if checker.IsHealthy(ctx) { + // All systems operational +} + +// Check if critical systems healthy +if checker.IsCriticalHealthy(ctx) { + // Core systems operational, non-critical may be degraded +} +``` + +**Health Report Structure:** +```go +type HealthReport struct { + Status HealthStatus `json:"status"` + Checks map[string]*HealthResult `json:"checks"` + Timestamp time.Time `json:"timestamp"` + Duration time.Duration `json:"duration"` +} +``` + +## 19. TUI ENHANCEMENTS + +### 19.1 ASCII Art Charts + +**Location:** `pkg/tui/charts.go` + +Terminal-based chart rendering for monitoring dashboards. + +**Available Charts:** + +**Bar Chart:** +```go +chart := tui.NewBarChart("Request Distribution", 40) +chart.AddItem("Users", 150, "") +chart.AddItem("Products", 85, "") +chart.AddItem("Orders", 200, "") +fmt.Println(chart.Render()) +``` + +**Sparkline:** +```go +sparkline := tui.NewSparkline("CPU Usage", 20) +sparkline.SetValues([]float64{45, 52, 48, 61, 55, 70, 65}) +fmt.Println(sparkline.Render()) +// Output: ▃▅▅▇▅█▇ +``` + +**Gauge:** +```go +gauge := tui.NewGauge("Memory", 20) +gauge.SetValue(75) +gauge.SetMax(100) +fmt.Println(gauge.Render()) +// Output: Memory: [██████████████░░░░░░] 75.0% +``` + +**Table:** +```go +table := tui.NewTable("Service Status", 80) +table.SetHeaders("Service", "Status", "Uptime") +table.AddRow("Redis", "Healthy", "99.9%") +table.AddRow("PostgreSQL", "Healthy", "99.8%") +fmt.Println(table.Render()) +``` + +## 20. DEPLOYMENT AND DEVOPS + +### 20.1 Kubernetes Templates + +**Location:** `deployments/kubernetes/deployment.yaml` + +Production-ready Kubernetes deployment configuration. + +**Features:** +- Replica management (default: 3 replicas) +- Resource requests and limits +- Liveness and readiness probes +- ConfigMap volume mounting +- Health check endpoints + +**Resource Configuration:** +```yaml +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" +``` + +**Health Probes:** +```yaml +livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + +readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 5 + periodSeconds: 5 +``` + +### 20.2 Helm Chart + +**Location:** `deployments/helm/stackyard/` + +Helm chart for simplified Kubernetes deployments. + +**Chart.yaml:** +```yaml +apiVersion: v2 +name: stackyard +description: A Helm chart for Stackyard API +type: application +version: 0.1.0 +appVersion: "1.0.0" +``` + +**Installation:** +```bash +helm install stackyard ./deployments/helm/stackyard +helm upgrade stackyard ./deployments/helm/stackyard +helm uninstall stackyard +``` + +### 20.3 GitHub Actions CI/CD + +**Location:** `.github/workflows/ci.yaml` + +Automated continuous integration and deployment pipeline. + +**Jobs:** +1. **test** - Run unit tests with race detection and coverage +2. **lint** - Code quality checks with golangci-lint +3. **build** - Compile application + +**Features:** +- Go module caching for faster builds +- Coverage reporting to Codecov +- Parallel job execution +- Branch-based triggers (main, develop) + +**Workflow Triggers:** +- Push to main or develop branches +- Pull requests to main branch + +### 20.4 Enhanced Docker Compose + +**Location:** `docker-compose.yaml` + +Complete development stack with infrastructure services. + +**Services Included:** +- **app** - Main application with health checks +- **redis** - Redis cache (Alpine image) +- **postgres** - PostgreSQL database (Alpine image) +- **kafka** - Kafka message broker +- **zookeeper** - Kafka coordination +- **mongo** - MongoDB document database +- **grafana** - Monitoring dashboards +- **minio** - S3-compatible object storage + +**Features:** +- Health checks for all services +- Volume persistence for data +- Automatic restart policies +- Environment variable configuration +- Port mappings for local development + +**Usage:** +```bash +# Start all services +docker-compose up -d + +# View logs +docker-compose logs -f app + +# Stop all services +docker-compose down + +# Rebuild and start +docker-compose up -d --build +``` + +## 21. API ENHANCEMENTS + +### 21.1 WebSocket Support + +**Location:** `pkg/websocket/handler.go` + +Real-time bidirectional communication via WebSocket connections. + +**Features:** +- Hub pattern for connection management +- Client registration and unregistration +- Broadcasting to all connected clients +- Direct messaging to specific clients +- Connection statistics + +**Usage:** +```go +hub := websocket.NewHub() +go hub.Run() + +// Register WebSocket endpoint +e.GET("/ws", websocket.HandleWebSocket(hub)) + +// Broadcast message +websocket.BroadcastMessage(hub, "notification", map[string]interface{}{ + "message": "System update available", +}) + +// Send to specific client +hub.SendToClient("client-123", []byte("Hello")) +``` + +**Client Connection:** +```javascript +const ws = new WebSocket('ws://localhost:8080/ws?client_id=client-123'); +ws.onmessage = (event) => { + const data = JSON.parse(event.data); + console.log('Received:', data); +}; +``` + +### 21.2 Cursor-Based Pagination + +**Location:** `pkg/pagination/cursor.go` + +Efficient pagination for large datasets using cursor-based navigation. + +**Advantages over Offset Pagination:** +- Consistent results when data changes +- Better performance for large datasets +- No skipped or duplicate items + +**Usage:** +```go +// Create pagination from query parameters +pagination, err := pagination.NewCursorPagination( + c.QueryParam("first"), // Number of items + c.QueryParam("last"), // Last N items + c.QueryParam("after"), // Cursor for forward pagination + c.QueryParam("before"), // Cursor for backward pagination +) + +// Create cursor from item +cursor, _ := pagination.CreateCursor(item.ID, item.CreatedAt, "") + +// Create edge with cursor +edge, _ := pagination.CreateEdge(item, cursor) + +// Create page response +page := pagination.CreatePage(edges, hasNextPage, hasPreviousPage, totalCount) +``` + +**GraphQL-Style Response:** +```json +{ + "edges": [ + {"node": {"id": "1", "name": "Item 1"}, "cursor": "eyJpZCI6IjEifQ=="}, + {"node": {"id": "2", "name": "Item 2"}, "cursor": "eyJpZCI6IjIifQ=="} + ], + "page_info": { + "has_next_page": true, + "has_previous_page": false, + "start_cursor": "eyJpZCI6IjEifQ==", + "end_cursor": "eyJpZCI6IjIifQ==" + }, + "total_count": 100 +} +``` + +### 21.3 Batch Operations + +**Location:** `pkg/batch/operations.go` + +Efficient processing of multiple items in batches. + +**Features:** +- Configurable batch size and workers +- Parallel processing with worker pool +- Retry support per item +- Timeout management +- Progress tracking + +**Batch Processor:** +```go +processor := batch.NewBatchProcessor(batch.BatchConfig{ + BatchSize: 100, + Workers: 4, + Timeout: 30 * time.Second, + RetryAttempts: 3, + RetryDelay: 100 * time.Millisecond, +}, func(ctx context.Context, item User) error { + return dbManager.CreateUser(ctx, item) +}) + +result, err := processor.Process(ctx, users) +// result.Successful: 95 +// result.Failed: 5 +// result.Errors: [error1, error2, ...] +// result.Duration: 2.5s +``` + +**Batch Writer:** +```go +writer := batch.NewBatchWriter(batch.DefaultBatchConfig(), func(ctx context.Context, items []LogEntry) error { + return dbManager.BulkInsertLogs(ctx, items) +}) + +for _, log := range logs { + writer.Add(ctx, log) +} +writer.Flush(ctx) // Flush remaining items +``` + +**Batch Reader:** +```go +reader := batch.NewBatchReader(batch.DefaultBatchConfig(), func(ctx context.Context, offset, limit int) ([]User, error) { + return dbManager.GetUsers(ctx, offset, limit) +}) + +allUsers, err := reader.ReadAll(ctx) +``` + +### 21.4 Webhook Support + +**Location:** `pkg/webhook/handler.go` + +Outbound webhook delivery with retry and signature verification. + +**Features:** +- HMAC-SHA256 signature verification +- Configurable retry with backoff +- Event type routing +- Timeout management +- Webhook statistics + +**Configuration:** +```go +manager := webhook.NewWebhookManager(webhook.WebhookConfig{ + URL: "https://example.com/webhook", + Secret: "webhook-secret", + Timeout: 30 * time.Second, + MaxRetries: 3, + RetryDelay: 1 * time.Second, + Headers: map[string]string{"X-Custom": "value"}, + Enabled: true, +}) +``` + +**Sending Webhooks:** +```go +event := webhook.WebhookEvent{ + ID: "evt-123", + Type: "user.created", + Timestamp: time.Now(), + Data: map[string]interface{}{ + "user_id": "user-456", + "email": "john@example.com", + }, +} + +resp, err := manager.Send(ctx, event) +``` + +**Receiving Webhooks:** +```go +handler := webhook.NewWebhookHandler(manager) +e.POST("/webhooks/incoming", echo.WrapHandler(http.HandlerFunc(handler.Handle))) +``` + +**Signature Verification:** +```go +// Verify webhook signature +isValid := webhook.VerifySignature(payload, signature, secret) +``` + +## 22. LOGGING AND OBSERVABILITY + +### 22.1 Structured JSON Logging + +**Location:** `pkg/logging/structured.go` + +Production-ready structured logging with JSON output. + +**Features:** +- JSON-formatted log entries +- Multiple log levels (DEBUG, INFO, WARN, ERROR, FATAL) +- Context-aware logging (request ID, user ID, trace ID) +- Caller information capture +- Stack traces for errors +- Service metadata + +**Log Entry Structure:** +```json +{ + "timestamp": "2026-03-27T15:30:00Z", + "level": "INFO", + "message": "Request processed", + "caller": "/app/internal/server/server.go:123", + "request_id": "req-abc123", + "user_id": "user-456", + "service_name": "stackyard", + "version": "1.0.0", + "environment": "production", + "fields": { + "method": "GET", + "path": "/api/v1/users", + "status": 200, + "duration": "45ms" + } +} +``` + +**Usage:** +```go +logger := logging.NewStructuredLogger( + os.Stdout, + logging.INFO, + "stackyard", + "1.0.0", + "production", +) + +// Basic logging +logger.Info("Server started", map[string]interface{}{ + "port": 8080, +}) + +// Context-aware logging +ctxLogger := logger.WithContext(ctx) +ctxLogger.Error("Database connection failed", map[string]interface{}{ + "error": err.Error(), +}) + +// With fields +fieldLogger := logger.WithFields(map[string]interface{}{ + "component": "auth", +}) +fieldLogger.Debug("Token validated") +``` + +### 22.2 Log Sampling + +**Location:** `pkg/logging/sampler.go` + +Intelligent log sampling to reduce volume in high-traffic scenarios. + +**Sampling Strategies:** + +**Rate-Based Sampling:** +```go +sampler := logging.NewLogSampler(logging.SampleByRate, 0.1, 0, 0) // Sample 10% +``` + +**Count-Based Sampling:** +```go +sampler := logging.NewLogSampler(logging.SampleByCount, 0, 100, 0) // Every 100th log +``` + +**Time-Based Sampling:** +```go +sampler := logging.NewLogSampler(logging.SampleByTime, 0, 0, time.Minute) // One per minute +``` + +**Adaptive Sampling:** +```go +sampler := logging.NewAdaptiveSampler(0.5, 0.01, 1.0, time.Minute) +// Adjusts rate based on log volume +``` + +**Usage:** +```go +samplingLogger := logging.NewSamplingLogger(logger, sampler) +samplingLogger.Info("High-frequency log message") // Only sampled entries logged +``` + +### 22.3 Log Rotation + +**Location:** `pkg/logging/rotation.go` + +Automatic log file rotation based on size and age. + +**Features:** +- Size-based rotation +- Age-based cleanup +- Configurable backup count +- Optional compression +- Thread-safe operations + +**Configuration:** +```go +writer, err := logging.NewRotatingWriter("/var/log/stackyard/app.log", logging.RotationConfig{ + MaxSize: 100 * 1024 * 1024, // 100MB + MaxAge: 7 * 24 * time.Hour, // 7 days + MaxBackups: 10, + Compress: true, +}) +``` + +**File Naming:** +``` +app.log +app.log.2026-03-27T15-30-00 +app.log.2026-03-26T15-30-00 +... +``` + +### 22.4 Prometheus Metrics + +**Location:** `pkg/metrics/prometheus.go` + +Application metrics collection for monitoring and alerting. + +**Metrics Included:** + +**HTTP Metrics:** +- `http_requests_total` - Total HTTP requests by method, path, status +- `http_request_duration_seconds` - Request duration histogram +- `http_request_size_bytes` - Request size histogram +- `http_response_size_bytes` - Response size histogram + +**Infrastructure Metrics:** +- `active_connections` - Active connection count +- `database_connections` - Database connection pool by state +- `cache_hits_total` - Cache hit counter +- `cache_misses_total` - Cache miss counter + +**Resilience Metrics:** +- `circuit_breaker_state` - Circuit breaker state (0=closed, 1=half-open, 2=open) +- `circuit_breaker_trips_total` - Circuit breaker trip counter + +**WebSocket Metrics:** +- `websocket_connections` - Active WebSocket connections + +**Batch Operation Metrics:** +- `batch_operations_total` - Batch operation counter +- `batch_duration_seconds` - Batch operation duration + +**Logging Metrics:** +- `log_entries_total` - Log entry counter by level +- `errors_total` - Error counter by type + +**Usage:** +```go +metrics := metrics.NewMetrics() + +// Record HTTP request +metrics.RecordHTTPRequest("GET", "/api/users", 200, 45*time.Millisecond, 1024, 2048) + +// Record cache hit +metrics.RecordCacheHit("redis", "get") + +// Set circuit breaker state +metrics.SetCircuitBreakerState("redis", 0) // 0=closed + +// Record webhook event +metrics.RecordWebhookEvent("user.created", "success", 150*time.Millisecond) + +// Set WebSocket connections +metrics.SetWebSocketConnections(42) +``` + +**Metrics Endpoint:** +```go +e.GET("/metrics", echo.WrapHandler(metrics.Handler())) +``` + +## 23. PROJECT STRUCTURE UPDATES + +### 23.1 Package Additions + +The following packages have been added to the project: + +``` +pkg/ +├── batch/ # Batch operations +│ └── operations.go +├── logging/ # Logging utilities +│ ├── structured.go +│ ├── sampler.go +│ └── rotation.go +├── metrics/ # Prometheus metrics +│ └── prometheus.go +├── pagination/ # Cursor-based pagination +│ └── cursor.go +├── resilience/ # Resilience patterns +│ ├── circuit_breaker.go +│ ├── retry.go +│ ├── timeout.go +│ └── health.go +├── testing/ # Test utilities +│ ├── helpers.go +│ └── mocks.go +├── webhook/ # Webhook support +│ └── handler.go +└── websocket/ # WebSocket support + └── handler.go +``` + +### 23.2 Middleware Additions + +``` +internal/middleware/ +├── audit.go # Audit logging +├── cors.go # CORS support +├── encryption.go # API encryption +├── jwt.go # JWT authentication +├── middleware.go # General middleware +├── ratelimit.go # Rate limiting +└── security.go # Security headers +``` + +### 23.3 Deployment Additions + +``` +deployments/ +├── helm/ +│ └── stackyard/ +│ └── Chart.yaml # Helm chart +└── kubernetes/ + └── deployment.yaml # Kubernetes deployment +``` + +### 23.4 Test Structure + +``` +tests/ +└── services/ # Service unit tests + ├── users_service_test.go + └── products_service_test.go +``` + +### 23.5 Configuration Additions + +**New Dependencies in go.mod:** +- `github.com/gorilla/websocket` - WebSocket support +- `github.com/golang-jwt/jwt/v5` - JWT authentication +- `github.com/prometheus/client_golang` - Prometheus metrics diff --git a/go.mod b/go.mod index 66043c8..cbc884c 100644 --- a/go.mod +++ b/go.mod @@ -8,11 +8,14 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/lipgloss v1.1.0 github.com/go-playground/validator/v10 v10.28.0 + github.com/golang-jwt/jwt/v5 v5.3.1 github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 github.com/hashicorp/go-retryablehttp v0.7.8 github.com/jackc/pgx/v5 v5.7.6 github.com/labstack/echo/v4 v4.13.4 github.com/minio/minio-go/v7 v7.0.97 + github.com/prometheus/client_golang v1.23.2 github.com/redis/go-redis/v9 v9.17.2 github.com/robfig/cron/v3 v3.0.1 github.com/rs/zerolog v1.34.0 @@ -29,6 +32,7 @@ require ( require ( github.com/atotto/clipboard v0.1.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect github.com/charmbracelet/harmonica v0.2.0 // indirect @@ -79,11 +83,15 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.16.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/philhofer/fwd v1.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -105,6 +113,7 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/net v0.47.0 // indirect @@ -112,6 +121,7 @@ require ( golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.11.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect modernc.org/libc v1.66.10 // indirect modernc.org/mathutil v1.7.1 // indirect diff --git a/go.sum b/go.sum index a7d3a83..0aa8e94 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -67,17 +69,21 @@ github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3 github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= +github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -122,6 +128,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA= github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= @@ -157,6 +165,8 @@ github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELU github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -170,6 +180,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI= @@ -181,8 +199,8 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= @@ -243,6 +261,10 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -302,6 +324,8 @@ golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/middleware/audit.go b/internal/middleware/audit.go new file mode 100644 index 0000000..25f9fb2 --- /dev/null +++ b/internal/middleware/audit.go @@ -0,0 +1,153 @@ +package middleware + +import ( + "time" + + "stackyard/pkg/logger" + + "github.com/labstack/echo/v4" +) + +// AuditConfig holds audit logging configuration +type AuditConfig struct { + Logger *logger.Logger + Skipper func(c echo.Context) bool + LogRequestBody bool + LogHeaders bool + SensitiveHeaders []string +} + +// DefaultAuditConfig returns default audit configuration +func DefaultAuditConfig(log *logger.Logger) AuditConfig { + return AuditConfig{ + Logger: log, + LogRequestBody: false, + LogHeaders: false, + SensitiveHeaders: []string{ + "Authorization", + "Cookie", + "X-Api-Key", + }, + } +} + +// AuditLog represents an audit log entry +type AuditLog struct { + Timestamp time.Time `json:"timestamp"` + Method string `json:"method"` + Path string `json:"path"` + Query string `json:"query,omitempty"` + StatusCode int `json:"status_code"` + Latency time.Duration `json:"latency"` + UserID string `json:"user_id,omitempty"` + Username string `json:"username,omitempty"` + IP string `json:"ip"` + UserAgent string `json:"user_agent"` + RequestID string `json:"request_id,omitempty"` + RequestBody interface{} `json:"request_body,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// Audit returns audit logging middleware +func Audit(config ...AuditConfig) echo.MiddlewareFunc { + var cfg AuditConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultAuditConfig(nil) + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + if cfg.Skipper != nil && cfg.Skipper(c) { + return next(c) + } + + start := time.Now() + req := c.Request() + + err := next(c) + + latency := time.Since(start) + + auditLog := AuditLog{ + Timestamp: start, + Method: req.Method, + Path: req.URL.Path, + Query: req.URL.RawQuery, + StatusCode: c.Response().Status, + Latency: latency, + IP: c.RealIP(), + UserAgent: req.UserAgent(), + RequestID: c.Response().Header().Get(echo.HeaderXRequestID), + } + + if userID := c.Get("user_id"); userID != nil { + auditLog.UserID = userID.(string) + } + + if username := c.Get("username"); username != nil { + auditLog.Username = username.(string) + } + + if cfg.LogHeaders { + headers := make(map[string]string) + for key, values := range req.Header { + skip := false + for _, sensitive := range cfg.SensitiveHeaders { + if key == sensitive { + skip = true + break + } + } + if !skip && len(values) > 0 { + headers[key] = values[0] + } + } + auditLog.Headers = headers + } + + if cfg.Logger != nil { + logMsg := "Audit log" + logFields := []interface{}{ + "method", auditLog.Method, + "path", auditLog.Path, + "status", auditLog.StatusCode, + "latency", auditLog.Latency.String(), + "ip", auditLog.IP, + } + + if auditLog.UserID != "" { + logFields = append(logFields, "user_id", auditLog.UserID) + } + + if auditLog.Username != "" { + logFields = append(logFields, "username", auditLog.Username) + } + + if auditLog.StatusCode >= 400 { + cfg.Logger.Warn(logMsg, logFields...) + } else { + cfg.Logger.Info(logMsg, logFields...) + } + } + + return err + } + } +} + +// AuditWithConfig returns audit middleware with custom config +func AuditWithConfig(log *logger.Logger) echo.MiddlewareFunc { + return Audit(DefaultAuditConfig(log)) +} + +// AuditSkipHealthCheck returns audit middleware that skips health check endpoints +func AuditSkipHealthCheck(log *logger.Logger) echo.MiddlewareFunc { + cfg := DefaultAuditConfig(log) + cfg.Skipper = func(c echo.Context) bool { + return c.Request().URL.Path == "/health" || c.Request().URL.Path == "/health/infrastructure" + } + return Audit(cfg) +} diff --git a/internal/middleware/cors.go b/internal/middleware/cors.go new file mode 100644 index 0000000..faf2cd4 --- /dev/null +++ b/internal/middleware/cors.go @@ -0,0 +1,142 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/labstack/echo/v4" +) + +// CORSConfig holds CORS configuration +type CORSConfig struct { + AllowOrigins []string + AllowMethods []string + AllowHeaders []string + ExposeHeaders []string + AllowCredentials bool + MaxAge int +} + +// DefaultCORSConfig returns default CORS configuration +func DefaultCORSConfig() CORSConfig { + return CORSConfig{ + AllowOrigins: []string{"*"}, + AllowMethods: []string{ + http.MethodGet, + http.MethodHead, + http.MethodPut, + http.MethodPatch, + http.MethodPost, + http.MethodDelete, + }, + AllowHeaders: []string{ + echo.HeaderOrigin, + echo.HeaderContentType, + echo.HeaderAccept, + echo.HeaderAuthorization, + }, + ExposeHeaders: []string{}, + AllowCredentials: false, + MaxAge: 86400, + } +} + +// CORS returns CORS middleware +func CORS(config ...CORSConfig) echo.MiddlewareFunc { + var cfg CORSConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultCORSConfig() + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + req := c.Request() + res := c.Response() + + origin := req.Header.Get(echo.HeaderOrigin) + + allowOrigin := "" + for _, o := range cfg.AllowOrigins { + if o == "*" { + allowOrigin = "*" + break + } + if o == origin { + allowOrigin = origin + break + } + if strings.HasPrefix(o, "*.") { + domain := strings.TrimPrefix(o, "*") + if strings.HasSuffix(origin, domain) { + allowOrigin = origin + break + } + } + } + + if allowOrigin != "" { + res.Header().Set(echo.HeaderAccessControlAllowOrigin, allowOrigin) + } + + if req.Method == http.MethodOptions { + res.Header().Set(echo.HeaderAccessControlAllowMethods, strings.Join(cfg.AllowMethods, ",")) + res.Header().Set(echo.HeaderAccessControlAllowHeaders, strings.Join(cfg.AllowHeaders, ",")) + + if len(cfg.ExposeHeaders) > 0 { + res.Header().Set(echo.HeaderAccessControlExposeHeaders, strings.Join(cfg.ExposeHeaders, ",")) + } + + if cfg.AllowCredentials { + res.Header().Set(echo.HeaderAccessControlAllowCredentials, "true") + } + + if cfg.MaxAge > 0 { + res.Header().Set(echo.HeaderAccessControlMaxAge, string(rune(cfg.MaxAge))) + } + + return c.NoContent(http.StatusNoContent) + } + + if len(cfg.ExposeHeaders) > 0 { + res.Header().Set(echo.HeaderAccessControlExposeHeaders, strings.Join(cfg.ExposeHeaders, ",")) + } + + if cfg.AllowCredentials { + res.Header().Set(echo.HeaderAccessControlAllowCredentials, "true") + } + + return next(c) + } + } +} + +// CORSWithConfig returns CORS middleware with custom config +func CORSWithConfig(allowOrigins []string) echo.MiddlewareFunc { + return CORS(CORSConfig{ + AllowOrigins: allowOrigins, + AllowMethods: []string{ + http.MethodGet, + http.MethodHead, + http.MethodPut, + http.MethodPatch, + http.MethodPost, + http.MethodDelete, + }, + AllowHeaders: []string{ + echo.HeaderOrigin, + echo.HeaderContentType, + echo.HeaderAccept, + echo.HeaderAuthorization, + }, + ExposeHeaders: []string{}, + AllowCredentials: false, + MaxAge: 86400, + }) +} + +// CORSAllowAll returns CORS middleware that allows all origins +func CORSAllowAll() echo.MiddlewareFunc { + return CORS(DefaultCORSConfig()) +} diff --git a/internal/middleware/jwt.go b/internal/middleware/jwt.go new file mode 100644 index 0000000..2dceb38 --- /dev/null +++ b/internal/middleware/jwt.go @@ -0,0 +1,246 @@ +package middleware + +import ( + "errors" + "strings" + "time" + + "stackyard/pkg/response" + + "github.com/golang-jwt/jwt/v5" + "github.com/labstack/echo/v4" +) + +// JWTConfig holds JWT configuration +type JWTConfig struct { + SigningKey string + TokenLookup string + AuthScheme string + Skipper func(c echo.Context) bool + TokenValidator func(token string) (jwt.Claims, error) +} + +// DefaultJWTConfig returns default JWT configuration +func DefaultJWTConfig(signingKey string) JWTConfig { + return JWTConfig{ + SigningKey: signingKey, + TokenLookup: "header:Authorization", + AuthScheme: "Bearer", + Skipper: nil, + } +} + +// JWTClaims represents JWT claims +type JWTClaims struct { + UserID string `json:"user_id"` + Username string `json:"username"` + Email string `json:"email"` + Role string `json:"role"` + jwt.RegisteredClaims +} + +// JWT returns JWT authentication middleware +func JWT(config ...JWTConfig) echo.MiddlewareFunc { + var cfg JWTConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultJWTConfig("") + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + if cfg.Skipper != nil && cfg.Skipper(c) { + return next(c) + } + + token, err := extractToken(c, cfg) + if err != nil { + return response.Unauthorized(c, "Missing or invalid token") + } + + claims, err := validateToken(token, cfg.SigningKey) + if err != nil { + return response.Unauthorized(c, "Invalid token") + } + + c.Set("user_id", claims.UserID) + c.Set("username", claims.Username) + c.Set("email", claims.Email) + c.Set("role", claims.Role) + c.Set("claims", claims) + + return next(c) + } + } +} + +// JWTWithConfig returns JWT middleware with custom config +func JWTWithConfig(signingKey string) echo.MiddlewareFunc { + return JWT(DefaultJWTConfig(signingKey)) +} + +// JWTRequired returns JWT middleware that requires authentication +func JWTRequired(signingKey string) echo.MiddlewareFunc { + return JWT(DefaultJWTConfig(signingKey)) +} + +// JWTOptional returns JWT middleware that allows optional authentication +func JWTOptional(signingKey string) echo.MiddlewareFunc { + cfg := DefaultJWTConfig(signingKey) + cfg.Skipper = func(c echo.Context) bool { + auth := c.Request().Header.Get(echo.HeaderAuthorization) + if auth == "" { + return true + } + return false + } + return JWT(cfg) +} + +// extractToken extracts token from request +func extractToken(c echo.Context, cfg JWTConfig) (string, error) { + parts := strings.Split(cfg.TokenLookup, ":") + if len(parts) != 2 { + return "", errors.New("invalid token lookup format") + } + + source := parts[0] + key := parts[1] + + var token string + switch source { + case "header": + auth := c.Request().Header.Get(key) + if auth == "" { + return "", errors.New("missing authorization header") + } + + if cfg.AuthScheme != "" { + parts := strings.Split(auth, " ") + if len(parts) != 2 || parts[0] != cfg.AuthScheme { + return "", errors.New("invalid authorization scheme") + } + token = parts[1] + } else { + token = auth + } + case "query": + token = c.QueryParam(key) + case "cookie": + cookie, err := c.Cookie(key) + if err != nil { + return "", err + } + token = cookie.Value + } + + if token == "" { + return "", errors.New("token not found") + } + + return token, nil +} + +// validateToken validates JWT token and returns claims +func validateToken(tokenString, signingKey string) (*JWTClaims, error) { + token, err := jwt.ParseWithClaims(tokenString, &JWTClaims{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, errors.New("unexpected signing method") + } + return []byte(signingKey), nil + }) + + if err != nil { + return nil, err + } + + if claims, ok := token.Claims.(*JWTClaims); ok && token.Valid { + return claims, nil + } + + return nil, errors.New("invalid token") +} + +// GenerateToken generates a new JWT token +func GenerateToken(userID, username, email, role, signingKey string, expiration time.Duration) (string, error) { + claims := JWTClaims{ + UserID: userID, + Username: username, + Email: email, + Role: role, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(expiration)), + IssuedAt: jwt.NewNumericDate(time.Now()), + NotBefore: jwt.NewNumericDate(time.Now()), + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString([]byte(signingKey)) +} + +// GenerateTokenWithClaims generates a JWT token with custom claims +func GenerateTokenWithClaims(claims *JWTClaims, signingKey string) (string, error) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString([]byte(signingKey)) +} + +// GetUserID extracts user ID from context +func GetUserID(c echo.Context) string { + if userID := c.Get("user_id"); userID != nil { + return userID.(string) + } + return "" +} + +// GetUsername extracts username from context +func GetUsername(c echo.Context) string { + if username := c.Get("username"); username != nil { + return username.(string) + } + return "" +} + +// GetUserEmail extracts user email from context +func GetUserEmail(c echo.Context) string { + if email := c.Get("email"); email != nil { + return email.(string) + } + return "" +} + +// GetUserRole extracts user role from context +func GetUserRole(c echo.Context) string { + if role := c.Get("role"); role != nil { + return role.(string) + } + return "" +} + +// RequireRole returns middleware that requires specific role +func RequireRole(roles ...string) echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + userRole := GetUserRole(c) + + for _, role := range roles { + if userRole == role { + return next(c) + } + } + + return response.Forbidden(c, "Insufficient permissions") + } + } +} + +// RequireAdmin returns middleware that requires admin role +func RequireAdmin() echo.MiddlewareFunc { + return RequireRole("admin") +} + +// RequireUser returns middleware that requires user role +func RequireUser() echo.MiddlewareFunc { + return RequireRole("user", "admin") +} diff --git a/internal/middleware/ratelimit.go b/internal/middleware/ratelimit.go new file mode 100644 index 0000000..b369419 --- /dev/null +++ b/internal/middleware/ratelimit.go @@ -0,0 +1,179 @@ +package middleware + +import ( + "net/http" + "sync" + "time" + + "stackyard/pkg/response" + + "github.com/labstack/echo/v4" +) + +// RateLimiterConfig holds rate limiter configuration +type RateLimiterConfig struct { + // Requests per time window + Requests int + // Time window duration + Window time.Duration + // Key function to identify clients (default: IP address) + KeyFunc func(c echo.Context) string +} + +// DefaultRateLimiterConfig returns default rate limiter configuration +func DefaultRateLimiterConfig() RateLimiterConfig { + return RateLimiterConfig{ + Requests: 60, + Window: time.Minute, + KeyFunc: DefaultKeyFunc, + } +} + +// DefaultKeyFunc uses client IP address as the key +func DefaultKeyFunc(c echo.Context) string { + return c.RealIP() +} + +// rateLimitEntry tracks requests for a client +type rateLimitEntry struct { + count int + resetAt time.Time +} + +// RateLimiter implements a token bucket rate limiter +type RateLimiter struct { + config RateLimiterConfig + clients map[string]*rateLimitEntry + mu sync.RWMutex + cleanup *time.Ticker +} + +// NewRateLimiter creates a new rate limiter +func NewRateLimiter(config RateLimiterConfig) *RateLimiter { + rl := &RateLimiter{ + config: config, + clients: make(map[string]*rateLimitEntry), + cleanup: time.NewTicker(time.Minute), + } + + // Start cleanup goroutine + go rl.cleanupExpired() + + return rl +} + +// cleanupExpired removes expired entries +func (rl *RateLimiter) cleanupExpired() { + for range rl.cleanup.C { + rl.mu.Lock() + now := time.Now() + for key, entry := range rl.clients { + if now.After(entry.resetAt) { + delete(rl.clients, key) + } + } + rl.mu.Unlock() + } +} + +// Allow checks if a request is allowed +func (rl *RateLimiter) Allow(key string) (bool, int, time.Time) { + rl.mu.Lock() + defer rl.mu.Unlock() + + now := time.Now() + entry, exists := rl.clients[key] + + if !exists || now.After(entry.resetAt) { + // Create new entry + rl.clients[key] = &rateLimitEntry{ + count: 1, + resetAt: now.Add(rl.config.Window), + } + return true, rl.config.Requests - 1, now.Add(rl.config.Window) + } + + if entry.count >= rl.config.Requests { + // Rate limit exceeded + return false, 0, entry.resetAt + } + + // Increment counter + entry.count++ + return true, rl.config.Requests - entry.count, entry.resetAt +} + +// Stop stops the cleanup goroutine +func (rl *RateLimiter) Stop() { + rl.cleanup.Stop() +} + +// RateLimit returns rate limiting middleware +func RateLimit(config ...RateLimiterConfig) echo.MiddlewareFunc { + var cfg RateLimiterConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultRateLimiterConfig() + } + + if cfg.KeyFunc == nil { + cfg.KeyFunc = DefaultKeyFunc + } + + limiter := NewRateLimiter(cfg) + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + key := cfg.KeyFunc(c) + allowed, remaining, resetAt := limiter.Allow(key) + + // Set rate limit headers + c.Response().Header().Set("X-RateLimit-Limit", string(rune(cfg.Requests+'0'))) + c.Response().Header().Set("X-RateLimit-Remaining", string(rune(remaining+'0'))) + c.Response().Header().Set("X-RateLimit-Reset", resetAt.Format(time.RFC3339)) + + if !allowed { + return response.Error(c, http.StatusTooManyRequests, "RATE_LIMIT_EXCEEDED", "Rate limit exceeded. Please try again later.", map[string]interface{}{ + "retry_after": resetAt.Unix(), + }) + } + + return next(c) + } + } +} + +// RateLimitWithConfig returns rate limiting middleware with custom config +func RateLimitWithConfig(requests int, window time.Duration) echo.MiddlewareFunc { + return RateLimit(RateLimiterConfig{ + Requests: requests, + Window: window, + KeyFunc: DefaultKeyFunc, + }) +} + +// RateLimitPerIP returns rate limiting middleware per IP +func RateLimitPerIP(requests int, window time.Duration) echo.MiddlewareFunc { + return RateLimit(RateLimiterConfig{ + Requests: requests, + Window: window, + KeyFunc: DefaultKeyFunc, + }) +} + +// RateLimitPerUser returns rate limiting middleware per user (requires auth) +func RateLimitPerUser(requests int, window time.Duration) echo.MiddlewareFunc { + return RateLimit(RateLimiterConfig{ + Requests: requests, + Window: window, + KeyFunc: func(c echo.Context) string { + // Try to get user ID from context (set by auth middleware) + if userID := c.Get("user_id"); userID != nil { + return "user:" + userID.(string) + } + // Fallback to IP + return c.RealIP() + }, + }) +} diff --git a/internal/middleware/security.go b/internal/middleware/security.go new file mode 100644 index 0000000..2a91065 --- /dev/null +++ b/internal/middleware/security.go @@ -0,0 +1,116 @@ +package middleware + +import ( + "github.com/labstack/echo/v4" +) + +// SecurityConfig holds security headers configuration +type SecurityConfig struct { + ContentSecurityPolicy string + XContentTypeOptions string + XFrameOptions string + XXSSProtection string + ReferrerPolicy string + PermissionsPolicy string + HSTSMaxAge int + HSTSIncludeSubdomains bool + HSTSPreload bool +} + +// DefaultSecurityConfig returns default security headers configuration +func DefaultSecurityConfig() SecurityConfig { + return SecurityConfig{ + ContentSecurityPolicy: "default-src 'self'", + XContentTypeOptions: "nosniff", + XFrameOptions: "DENY", + XXSSProtection: "1; mode=block", + ReferrerPolicy: "strict-origin-when-cross-origin", + PermissionsPolicy: "camera=(), microphone=(), geolocation=()", + HSTSMaxAge: 31536000, + HSTSIncludeSubdomains: true, + HSTSPreload: false, + } +} + +// Security returns security headers middleware +func Security(config ...SecurityConfig) echo.MiddlewareFunc { + var cfg SecurityConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultSecurityConfig() + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + res := c.Response() + + if cfg.ContentSecurityPolicy != "" { + res.Header().Set("Content-Security-Policy", cfg.ContentSecurityPolicy) + } + + if cfg.XContentTypeOptions != "" { + res.Header().Set("X-Content-Type-Options", cfg.XContentTypeOptions) + } + + if cfg.XFrameOptions != "" { + res.Header().Set("X-Frame-Options", cfg.XFrameOptions) + } + + if cfg.XXSSProtection != "" { + res.Header().Set("X-XSS-Protection", cfg.XXSSProtection) + } + + if cfg.ReferrerPolicy != "" { + res.Header().Set("Referrer-Policy", cfg.ReferrerPolicy) + } + + if cfg.PermissionsPolicy != "" { + res.Header().Set("Permissions-Policy", cfg.PermissionsPolicy) + } + + if cfg.HSTSMaxAge > 0 { + hsts := "max-age=" + string(rune(cfg.HSTSMaxAge)) + if cfg.HSTSIncludeSubdomains { + hsts += "; includeSubDomains" + } + if cfg.HSTSPreload { + hsts += "; preload" + } + res.Header().Set("Strict-Transport-Security", hsts) + } + + return next(c) + } + } +} + +// SecurityWithConfig returns security headers middleware with custom config +func SecurityWithConfig(csp string) echo.MiddlewareFunc { + return Security(SecurityConfig{ + ContentSecurityPolicy: csp, + XContentTypeOptions: "nosniff", + XFrameOptions: "DENY", + XXSSProtection: "1; mode=block", + ReferrerPolicy: "strict-origin-when-cross-origin", + PermissionsPolicy: "camera=(), microphone=(), geolocation=()", + HSTSMaxAge: 31536000, + HSTSIncludeSubdomains: true, + HSTSPreload: false, + }) +} + +// SecurityPermissive returns security headers middleware with permissive settings +func SecurityPermissive() echo.MiddlewareFunc { + return Security(SecurityConfig{ + ContentSecurityPolicy: "default-src 'self' 'unsafe-inline' 'unsafe-eval'", + XContentTypeOptions: "nosniff", + XFrameOptions: "SAMEORIGIN", + XXSSProtection: "1; mode=block", + ReferrerPolicy: "strict-origin-when-cross-origin", + PermissionsPolicy: "camera=(), microphone=(), geolocation=()", + HSTSMaxAge: 31536000, + HSTSIncludeSubdomains: true, + HSTSPreload: false, + }) +} diff --git a/internal/services/modules/products_service.go b/internal/services/modules/products_service.go index 11884ad..58cdc14 100644 --- a/internal/services/modules/products_service.go +++ b/internal/services/modules/products_service.go @@ -28,6 +28,14 @@ func (s *ProductsService) Enabled() bool { return s.enabled } func (s *ProductsService) Endpoints() []string { return []string{"/products"} } func (s *ProductsService) Get() interface{} { return s } +// GetProducts godoc +// @Summary Get products +// @Description Get a list of products +// @Tags products +// @Accept json +// @Produce json +// @Success 200 {object} response.Response "Success" +// @Router /products [get] func (s *ProductsService) RegisterRoutes(g *echo.Group) { sub := g.Group("/products") sub.GET("", func(c echo.Context) error { diff --git a/internal/services/modules/users_service.go b/internal/services/modules/users_service.go index 0ed20c9..54af4c8 100644 --- a/internal/services/modules/users_service.go +++ b/internal/services/modules/users_service.go @@ -29,19 +29,68 @@ func (s *UsersService) Get() interface{} { return s } func (s *UsersService) RegisterRoutes(g *echo.Group) { sub := g.Group("/users") - // List users with pagination + // GetUsers godoc + // @Summary List users with pagination + // @Description Get a paginated list of users + // @Tags users + // @Accept json + // @Produce json + // @Param page query int false "Page number" default(1) + // @Param per_page query int false "Items per page" default(10) + // @Success 200 {object} response.Response{data=[]User} "Success" + // @Failure 400 {object} response.Response "Bad request" + // @Router /users [get] sub.GET("", s.GetUsers) - // Get single user + // GetUser godoc + // @Summary Get single user + // @Description Get a specific user by ID + // @Tags users + // @Accept json + // @Produce json + // @Param id path string true "User ID" + // @Success 200 {object} response.Response{data=User} "Success" + // @Failure 404 {object} response.Response "Not found" + // @Router /users/{id} [get] sub.GET("/:id", s.GetUser) - // Create user + // CreateUser godoc + // @Summary Create user + // @Description Create a new user + // @Tags users + // @Accept json + // @Produce json + // @Param request body CreateUserRequest true "Create user request" + // @Success 201 {object} response.Response{data=User} "Created" + // @Failure 400 {object} response.Response "Bad request" + // @Failure 422 {object} response.Response "Validation error" + // @Router /users [post] sub.POST("", s.CreateUser) - // Update user + // UpdateUser godoc + // @Summary Update user + // @Description Update an existing user + // @Tags users + // @Accept json + // @Produce json + // @Param id path string true "User ID" + // @Param request body UpdateUserRequest true "Update user request" + // @Success 200 {object} response.Response{data=User} "Success" + // @Failure 400 {object} response.Response "Bad request" + // @Failure 422 {object} response.Response "Validation error" + // @Router /users/{id} [put] sub.PUT("/:id", s.UpdateUser) - // Delete user + // DeleteUser godoc + // @Summary Delete user + // @Description Delete a user by ID + // @Tags users + // @Accept json + // @Produce json + // @Param id path string true "User ID" + // @Success 204 "No content" + // @Failure 404 {object} response.Response "Not found" + // @Router /users/{id} [delete] sub.DELETE("/:id", s.DeleteUser) } diff --git a/pkg/batch/operations.go b/pkg/batch/operations.go new file mode 100644 index 0000000..207ca98 --- /dev/null +++ b/pkg/batch/operations.go @@ -0,0 +1,248 @@ +package batch + +import ( + "context" + "sync" + "time" +) + +// BatchConfig holds batch operation configuration +type BatchConfig struct { + BatchSize int + Workers int + Timeout time.Duration + RetryAttempts int + RetryDelay time.Duration +} + +// DefaultBatchConfig returns default batch configuration +func DefaultBatchConfig() BatchConfig { + return BatchConfig{ + BatchSize: 100, + Workers: 4, + Timeout: 30 * time.Second, + RetryAttempts: 3, + RetryDelay: 100 * time.Millisecond, + } +} + +// BatchResult represents the result of a batch operation +type BatchResult struct { + Successful int + Failed int + Errors []error + Duration time.Duration +} + +// BatchProcessor processes items in batches +type BatchProcessor[T any] struct { + config BatchConfig + handler func(ctx context.Context, item T) error +} + +// NewBatchProcessor creates a new batch processor +func NewBatchProcessor[T any](config BatchConfig, handler func(ctx context.Context, item T) error) *BatchProcessor[T] { + return &BatchProcessor[T]{ + config: config, + handler: handler, + } +} + +// Process processes items in batches +func (bp *BatchProcessor[T]) Process(ctx context.Context, items []T) (*BatchResult, error) { + start := time.Now() + result := &BatchResult{} + + if len(items) == 0 { + return result, nil + } + + // Create batches + batches := bp.createBatches(items) + + // Process batches with workers + resultChan := make(chan *BatchResult, len(batches)) + errorChan := make(chan error, len(batches)) + + // Create worker pool + workerCtx, cancel := context.WithTimeout(ctx, bp.config.Timeout) + defer cancel() + + var wg sync.WaitGroup + semaphore := make(chan struct{}, bp.config.Workers) + + for i, batch := range batches { + wg.Add(1) + go func(batchIndex int, batchItems []T) { + defer wg.Done() + + semaphore <- struct{}{} + defer func() { <-semaphore }() + + batchResult := bp.processBatch(workerCtx, batchItems) + resultChan <- batchResult + }(i, batch) + } + + // Wait for all batches to complete + go func() { + wg.Wait() + close(resultChan) + close(errorChan) + }() + + // Collect results + for batchResult := range resultChan { + result.Successful += batchResult.Successful + result.Failed += batchResult.Failed + result.Errors = append(result.Errors, batchResult.Errors...) + } + + result.Duration = time.Since(start) + + return result, nil +} + +// createBatches creates batches from items +func (bp *BatchProcessor[T]) createBatches(items []T) [][]T { + var batches [][]T + + for i := 0; i < len(items); i += bp.config.BatchSize { + end := i + bp.config.BatchSize + if end > len(items) { + end = len(items) + } + batches = append(batches, items[i:end]) + } + + return batches +} + +// processBatch processes a single batch +func (bp *BatchProcessor[T]) processBatch(ctx context.Context, items []T) *BatchResult { + result := &BatchResult{} + + for _, item := range items { + err := bp.processItemWithRetry(ctx, item) + if err != nil { + result.Failed++ + result.Errors = append(result.Errors, err) + } else { + result.Successful++ + } + } + + return result +} + +// processItemWithRetry processes a single item with retry +func (bp *BatchProcessor[T]) processItemWithRetry(ctx context.Context, item T) error { + var lastErr error + + for attempt := 0; attempt <= bp.config.RetryAttempts; attempt++ { + if attempt > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(bp.config.RetryDelay): + } + } + + err := bp.handler(ctx, item) + if err == nil { + return nil + } + + lastErr = err + } + + return lastErr +} + +// BatchWriter writes items in batches +type BatchWriter[T any] struct { + config BatchConfig + writer func(ctx context.Context, items []T) error + items []T + mu sync.Mutex +} + +// NewBatchWriter creates a new batch writer +func NewBatchWriter[T any](config BatchConfig, writer func(ctx context.Context, items []T) error) *BatchWriter[T] { + return &BatchWriter[T]{ + config: config, + writer: writer, + } +} + +// Add adds an item to the batch +func (bw *BatchWriter[T]) Add(ctx context.Context, item T) error { + bw.mu.Lock() + bw.items = append(bw.items, item) + shouldFlush := len(bw.items) >= bw.config.BatchSize + bw.mu.Unlock() + + if shouldFlush { + return bw.Flush(ctx) + } + + return nil +} + +// Flush flushes the batch +func (bw *BatchWriter[T]) Flush(ctx context.Context) error { + bw.mu.Lock() + if len(bw.items) == 0 { + bw.mu.Unlock() + return nil + } + + items := make([]T, len(bw.items)) + copy(items, bw.items) + bw.items = bw.items[:0] + bw.mu.Unlock() + + return bw.writer(ctx, items) +} + +// BatchReader reads items in batches +type BatchReader[T any] struct { + config BatchConfig + reader func(ctx context.Context, offset, limit int) ([]T, error) +} + +// NewBatchReader creates a new batch reader +func NewBatchReader[T any](config BatchConfig, reader func(ctx context.Context, offset, limit int) ([]T, error)) *BatchReader[T] { + return &BatchReader[T]{ + config: config, + reader: reader, + } +} + +// ReadAll reads all items in batches +func (br *BatchReader[T]) ReadAll(ctx context.Context) ([]T, error) { + var allItems []T + offset := 0 + + for { + items, err := br.reader(ctx, offset, br.config.BatchSize) + if err != nil { + return nil, err + } + + allItems = append(allItems, items...) + + if len(items) < br.config.BatchSize { + break + } + + offset += len(items) + } + + return allItems, nil +} + +// ReadBatch reads a single batch +func (br *BatchReader[T]) ReadBatch(ctx context.Context, offset int) ([]T, error) { + return br.reader(ctx, offset, br.config.BatchSize) +} diff --git a/pkg/infrastructure/afero.go b/pkg/infrastructure/afero.go index 41b8150..9766f0d 100644 --- a/pkg/infrastructure/afero.go +++ b/pkg/infrastructure/afero.go @@ -306,6 +306,11 @@ func Exists(alias string) bool { return false } + // Handle "all:" prefix if present + if filepath.HasPrefix(physicalPath, "all:") { + physicalPath = physicalPath[4:] // Remove "all:" prefix + } + // Check if file exists in filesystem _, err := instance.fs.Stat(physicalPath) return err == nil @@ -358,3 +363,10 @@ func GetFileSystem() afero.Fs { return instance.fs } + +// ResetForTesting resets the singleton for testing purposes +// This function should only be used in tests +func ResetForTesting() { + instance = nil + once = sync.Once{} +} diff --git a/pkg/infrastructure/afero_test.go b/pkg/infrastructure/afero_test.go deleted file mode 100644 index 871021e..0000000 --- a/pkg/infrastructure/afero_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package infrastructure - -import ( - "embed" - "strings" - "sync" - "testing" -) - -//go:embed testdata/* -var testFS embed.FS - -func TestAferoManager(t *testing.T) { - // Reset the singleton for testing - instance = nil - - // Test alias configuration - aliasMap := map[string]string{ - "config": "all:testdata/config.yaml", - "readme": "all:testdata/README.md", - "test": "all:testdata/test.txt", - } - - // Test initialization - t.Run("Init", func(t *testing.T) { - Init(testFS, aliasMap, true) - - if instance == nil { - t.Fatal("Expected instance to be initialized") - } - - if instance.fs == nil { - t.Fatal("Expected filesystem to be initialized") - } - - if len(instance.aliases) != 3 { - t.Errorf("Expected 3 aliases, got %d", len(instance.aliases)) - } - }) - - // Test Exists function - t.Run("Exists", func(t *testing.T) { - // Test non-existing alias - if Exists("nonexistent") { - t.Error("Expected 'nonexistent' alias to not exist") - } - - // Test existing alias but non-existing file - aliasMap := map[string]string{ - "missing": "all:testdata/missing.txt", - } - Init(testFS, aliasMap, true) - if Exists("missing") { - t.Error("Expected 'missing' alias to not exist (file doesn't exist)") - } - }) - - // Test GetAliases function - t.Run("GetAliases", func(t *testing.T) { - aliases := GetAliases() - if len(aliases) != 3 { - t.Errorf("Expected 3 aliases, got %d. Aliases: %v", len(aliases), aliases) - } - - if aliases["config"] != "all:testdata/config.yaml" { - t.Errorf("Expected config alias to be 'all:testdata/config.yaml', got %s", aliases["config"]) - } - - if aliases["readme"] != "all:testdata/README.md" { - t.Errorf("Expected readme alias to be 'all:testdata/README.md', got %s", aliases["readme"]) - } - - if aliases["test"] != "all:testdata/test.txt" { - t.Errorf("Expected test alias to be 'all:testdata/test.txt', got %s", aliases["test"]) - } - }) - - // Test GetFileSystem function - t.Run("GetFileSystem", func(t *testing.T) { - fs := GetFileSystem() - if fs == nil { - t.Error("Expected filesystem to be returned") - } - }) - - // Test development mode (CopyOnWriteFs) - t.Run("DevelopmentMode", func(t *testing.T) { - // Should be CopyOnWriteFs in development mode - fs := GetFileSystem() - if fs == nil { - t.Error("Expected filesystem to be initialized") - } - }) - - // Test production mode (ReadOnlyFs) - t.Run("ProductionMode", func(t *testing.T) { - // Create a new test with production mode - // Reset instance for this test - instance = nil - - // Create a new once variable for this test - originalOnce := once - once = sync.Once{} - - aliasMap := map[string]string{ - "test": "all:testdata/test.txt", - } - Init(testFS, aliasMap, false) - - // Should be ReadOnlyFs in production mode - fs := GetFileSystem() - if fs == nil { - t.Error("Expected filesystem to be initialized") - } - - // Restore original once - once = originalOnce - }) - - // Test singleton behavior (multiple Init calls) - t.Run("Singleton", func(t *testing.T) { - aliasMap1 := map[string]string{ - "test1": "all:testdata/test.txt", - } - aliasMap2 := map[string]string{ - "test2": "all:testdata/test.txt", - } - - Init(testFS, aliasMap1, true) - initialInstance := instance - - Init(testFS, aliasMap2, true) // Should be ignored due to singleton - if instance != initialInstance { - t.Error("Expected singleton behavior - instance should not change") - } - }) - - // Test error handling - t.Run("ErrorHandling", func(t *testing.T) { - // Reset instance - instance = nil - - // Test Read without initialization - _, err := Read("test") - if err == nil { - t.Error("Expected error when reading without initialization") - } - if !strings.Contains(err.Error(), "not initialized") { - t.Errorf("Expected 'not initialized' error, got: %v", err) - } - - // Test Stream without initialization - _, err = Stream("test") - if err == nil { - t.Error("Expected error when streaming without initialization") - } - if !strings.Contains(err.Error(), "not initialized") { - t.Errorf("Expected 'not initialized' error, got: %v", err) - } - - // Test Exists without initialization - if Exists("test") { - t.Error("Expected false when checking existence without initialization") - } - }) -} diff --git a/pkg/infrastructure/testdata/README.md b/pkg/infrastructure/testdata/README.md deleted file mode 100644 index e7a5ee8..0000000 --- a/pkg/infrastructure/testdata/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Test README - -This is a test README file for the Afero manager. \ No newline at end of file diff --git a/pkg/infrastructure/testdata/config.yaml b/pkg/infrastructure/testdata/config.yaml deleted file mode 100644 index 40aaf41..0000000 --- a/pkg/infrastructure/testdata/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ -test: configuration -value: 123 \ No newline at end of file diff --git a/pkg/infrastructure/testdata/test.txt b/pkg/infrastructure/testdata/test.txt deleted file mode 100644 index 5538279..0000000 --- a/pkg/infrastructure/testdata/test.txt +++ /dev/null @@ -1 +0,0 @@ -This is a test file for Afero manager testing. \ No newline at end of file diff --git a/pkg/logging/rotation.go b/pkg/logging/rotation.go new file mode 100644 index 0000000..69afb52 --- /dev/null +++ b/pkg/logging/rotation.go @@ -0,0 +1,210 @@ +package logging + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +// RotationConfig holds log rotation configuration +type RotationConfig struct { + MaxSize int64 // Maximum size in bytes before rotation + MaxAge time.Duration // Maximum age of log files + MaxBackups int // Maximum number of backup files + Compress bool // Compress rotated files +} + +// DefaultRotationConfig returns default rotation configuration +func DefaultRotationConfig() RotationConfig { + return RotationConfig{ + MaxSize: 100 * 1024 * 1024, // 100MB + MaxAge: 7 * 24 * time.Hour, // 7 days + MaxBackups: 10, + Compress: true, + } +} + +// RotatingWriter wraps a writer with log rotation +type RotatingWriter struct { + config RotationConfig + filename string + file *os.File + size int64 + mu sync.Mutex +} + +// NewRotatingWriter creates a new rotating writer +func NewRotatingWriter(filename string, config RotationConfig) (*RotatingWriter, error) { + rw := &RotatingWriter{ + config: config, + filename: filename, + } + + if err := rw.openFile(); err != nil { + return nil, err + } + + return rw, nil +} + +// Write implements io.Writer interface +func (rw *RotatingWriter) Write(p []byte) (n int, err error) { + rw.mu.Lock() + defer rw.mu.Unlock() + + if rw.file == nil { + if err := rw.openFile(); err != nil { + return 0, err + } + } + + // Check if rotation is needed + if rw.size+int64(len(p)) > rw.config.MaxSize { + if err := rw.rotate(); err != nil { + return 0, err + } + } + + n, err = rw.file.Write(p) + rw.size += int64(n) + + return n, err +} + +// openFile opens the log file +func (rw *RotatingWriter) openFile() error { + dir := filepath.Dir(rw.filename) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + + file, err := os.OpenFile(rw.filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return err + } + + info, err := file.Stat() + if err != nil { + file.Close() + return err + } + + rw.file = file + rw.size = info.Size() + + return nil +} + +// rotate rotates the log file +func (rw *RotatingWriter) rotate() error { + if rw.file != nil { + rw.file.Close() + rw.file = nil + } + + // Generate backup filename + timestamp := time.Now().Format("2006-01-02T15-04-05") + backupName := fmt.Sprintf("%s.%s", rw.filename, timestamp) + + // Rename current file + if err := os.Rename(rw.filename, backupName); err != nil { + // If rename fails, try to remove the file + os.Remove(rw.filename) + } + + // Compress if configured + if rw.config.Compress { + if err := rw.compressFile(backupName); err != nil { + // Log compression error but don't fail + fmt.Fprintf(os.Stderr, "Failed to compress log file: %v\n", err) + } + } + + // Clean up old backups + if err := rw.cleanup(); err != nil { + fmt.Fprintf(os.Stderr, "Failed to cleanup old logs: %v\n", err) + } + + // Open new file + return rw.openFile() +} + +// compressFile compresses a log file +func (rw *RotatingWriter) compressFile(filename string) error { + // Simple compression using gzip would go here + // For now, just return nil + return nil +} + +// cleanup removes old backup files +func (rw *RotatingWriter) cleanup() error { + dir := filepath.Dir(rw.filename) + base := filepath.Base(rw.filename) + + entries, err := os.ReadDir(dir) + if err != nil { + return err + } + + var backups []os.FileInfo + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + if strings.HasPrefix(name, base+".") { + info, err := entry.Info() + if err != nil { + continue + } + backups = append(backups, info) + } + } + + // Sort by modification time (oldest first) + sort.Slice(backups, func(i, j int) bool { + return backups[i].ModTime().Before(backups[j].ModTime()) + }) + + // Remove old backups + for i := 0; i < len(backups)-rw.config.MaxBackups; i++ { + path := filepath.Join(dir, backups[i].Name()) + if err := os.Remove(path); err != nil { + return err + } + } + + return nil +} + +// Close closes the rotating writer +func (rw *RotatingWriter) Close() error { + rw.mu.Lock() + defer rw.mu.Unlock() + + if rw.file != nil { + return rw.file.Close() + } + + return nil +} + +// GetStats returns rotation statistics +func (rw *RotatingWriter) GetStats() map[string]interface{} { + rw.mu.Lock() + defer rw.mu.Unlock() + + return map[string]interface{}{ + "filename": rw.filename, + "size": rw.size, + "max_size": rw.config.MaxSize, + "max_age": rw.config.MaxAge.String(), + "max_backups": rw.config.MaxBackups, + "compress": rw.config.Compress, + } +} diff --git a/pkg/logging/sampler.go b/pkg/logging/sampler.go new file mode 100644 index 0000000..faf9e80 --- /dev/null +++ b/pkg/logging/sampler.go @@ -0,0 +1,312 @@ +package logging + +import ( + "encoding/json" + "hash/fnv" + "sync" + "time" +) + +// SamplingStrategy represents the sampling strategy +type SamplingStrategy int + +const ( + // SampleByRate samples a percentage of logs + SampleByRate SamplingStrategy = iota + // SampleByCount samples every N logs + SampleByCount + // SampleByTime samples one log per time window + SampleByTime +) + +// LogSampler samples logs based on various strategies +type LogSampler struct { + strategy SamplingStrategy + rate float64 + count int + window time.Duration + + mu sync.Mutex + counter int + lastSampled time.Time + sampledCount int +} + +// NewLogSampler creates a new log sampler +func NewLogSampler(strategy SamplingStrategy, rate float64, count int, window time.Duration) *LogSampler { + return &LogSampler{ + strategy: strategy, + rate: rate, + count: count, + window: window, + } +} + +// ShouldSample determines if a log should be sampled +func (ls *LogSampler) ShouldSample(entry LogEntry) bool { + ls.mu.Lock() + defer ls.mu.Unlock() + + switch ls.strategy { + case SampleByRate: + return ls.sampleByRate(entry) + case SampleByCount: + return ls.sampleByCount() + case SampleByTime: + return ls.sampleByTime() + default: + return true + } +} + +// sampleByRate samples based on a rate (0.0 to 1.0) +func (ls *LogSampler) sampleByRate(entry LogEntry) bool { + // Use message hash for deterministic sampling + h := fnv.New32a() + h.Write([]byte(entry.Message)) + hash := h.Sum32() + + // Convert hash to 0-1 range + normalized := float64(hash%10000) / 10000.0 + + return normalized < ls.rate +} + +// sampleByCount samples every N logs +func (ls *LogSampler) sampleByCount() bool { + ls.counter++ + + if ls.counter >= ls.count { + ls.counter = 0 + ls.sampledCount++ + return true + } + + return false +} + +// sampleByTime samples one log per time window +func (ls *LogSampler) sampleByTime() bool { + now := time.Now() + + if now.Sub(ls.lastSampled) >= ls.window { + ls.lastSampled = now + ls.sampledCount++ + return true + } + + return false +} + +// Reset resets the sampler state +func (ls *LogSampler) Reset() { + ls.mu.Lock() + defer ls.mu.Unlock() + + ls.counter = 0 + ls.sampledCount = 0 + ls.lastSampled = time.Time{} +} + +// GetStats returns sampler statistics +func (ls *LogSampler) GetStats() map[string]interface{} { + ls.mu.Lock() + defer ls.mu.Unlock() + + return map[string]interface{}{ + "strategy": ls.strategy, + "rate": ls.rate, + "count": ls.count, + "window": ls.window.String(), + "counter": ls.counter, + "sampled_count": ls.sampledCount, + "last_sampled": ls.lastSampled, + } +} + +// SamplingLogger wraps a logger with sampling +type SamplingLogger struct { + logger *StructuredLogger + sampler *LogSampler +} + +// NewSamplingLogger creates a new sampling logger +func NewSamplingLogger(logger *StructuredLogger, sampler *LogSampler) *SamplingLogger { + return &SamplingLogger{ + logger: logger, + sampler: sampler, + } +} + +// Log logs an entry if it passes sampling +func (sl *SamplingLogger) Log(entry LogEntry) { + if sl.sampler.ShouldSample(entry) { + // Write the log entry directly to the underlying logger + data, err := json.Marshal(entry) + if err != nil { + return + } + data = append(data, '\n') + sl.logger.writer.Write(data) + } +} + +// Debug logs a debug message with sampling +func (sl *SamplingLogger) Debug(msg string, fields ...map[string]interface{}) { + if sl.logger.level <= DEBUG { + entry := sl.logger.createEntry(DEBUG, msg, fields...) + if sl.sampler.ShouldSample(entry) { + sl.logger.log(DEBUG, msg, fields...) + } + } +} + +// Info logs an info message with sampling +func (sl *SamplingLogger) Info(msg string, fields ...map[string]interface{}) { + if sl.logger.level <= INFO { + entry := sl.logger.createEntry(INFO, msg, fields...) + if sl.sampler.ShouldSample(entry) { + sl.logger.log(INFO, msg, fields...) + } + } +} + +// Warn logs a warning message with sampling +func (sl *SamplingLogger) Warn(msg string, fields ...map[string]interface{}) { + if sl.logger.level <= WARN { + entry := sl.logger.createEntry(WARN, msg, fields...) + if sl.sampler.ShouldSample(entry) { + sl.logger.log(WARN, msg, fields...) + } + } +} + +// Error logs an error message with sampling +func (sl *SamplingLogger) Error(msg string, fields ...map[string]interface{}) { + if sl.logger.level <= ERROR { + entry := sl.logger.createEntry(ERROR, msg, fields...) + if sl.sampler.ShouldSample(entry) { + sl.logger.log(ERROR, msg, fields...) + } + } +} + +// Fatal logs a fatal message with sampling +func (sl *SamplingLogger) Fatal(msg string, fields ...map[string]interface{}) { + if sl.logger.level <= FATAL { + entry := sl.logger.createEntry(FATAL, msg, fields...) + if sl.sampler.ShouldSample(entry) { + sl.logger.log(FATAL, msg, fields...) + } + } +} + +// createEntry creates a log entry for sampling check +func (sl *StructuredLogger) createEntry(level LogLevel, msg string, fields ...map[string]interface{}) LogEntry { + entry := LogEntry{ + Timestamp: time.Now().UTC(), + Level: level.String(), + Message: msg, + ServiceName: sl.serviceName, + Version: sl.version, + Environment: sl.environment, + Fields: make(map[string]interface{}), + } + + for k, v := range sl.fields { + entry.Fields[k] = v + } + + for _, f := range fields { + for k, v := range f { + entry.Fields[k] = v + } + } + + return entry +} + +// AdaptiveSampler adapts sampling rate based on load +type AdaptiveSampler struct { + mu sync.Mutex + baseRate float64 + currentRate float64 + maxRate float64 + minRate float64 + window time.Duration + logCount int + lastAdjustment time.Time + adjustmentFactor float64 +} + +// NewAdaptiveSampler creates a new adaptive sampler +func NewAdaptiveSampler(baseRate, minRate, maxRate float64, window time.Duration) *AdaptiveSampler { + return &AdaptiveSampler{ + baseRate: baseRate, + currentRate: baseRate, + maxRate: maxRate, + minRate: minRate, + window: window, + lastAdjustment: time.Now(), + adjustmentFactor: 0.1, + } +} + +// ShouldSample determines if a log should be sampled +func (as *AdaptiveSampler) ShouldSample() bool { + as.mu.Lock() + defer as.mu.Unlock() + + as.logCount++ + + // Adjust rate periodically + now := time.Now() + if now.Sub(as.lastAdjustment) >= as.window { + as.adjustRate() + as.lastAdjustment = now + } + + // Sample based on current rate + h := fnv.New32a() + h.Write([]byte(time.Now().String())) + hash := h.Sum32() + + normalized := float64(hash%10000) / 10000.0 + + return normalized < as.currentRate +} + +// adjustRate adjusts the sampling rate based on log volume +func (as *AdaptiveSampler) adjustRate() { + // If too many logs, decrease rate + if as.logCount > 1000 { + as.currentRate = as.currentRate * (1 - as.adjustmentFactor) + if as.currentRate < as.minRate { + as.currentRate = as.minRate + } + } else if as.logCount < 100 { + // If too few logs, increase rate + as.currentRate = as.currentRate * (1 + as.adjustmentFactor) + if as.currentRate > as.maxRate { + as.currentRate = as.maxRate + } + } + + as.logCount = 0 +} + +// GetStats returns adaptive sampler statistics +func (as *AdaptiveSampler) GetStats() map[string]interface{} { + as.mu.Lock() + defer as.mu.Unlock() + + return map[string]interface{}{ + "base_rate": as.baseRate, + "current_rate": as.currentRate, + "max_rate": as.maxRate, + "min_rate": as.minRate, + "window": as.window.String(), + "log_count": as.logCount, + "last_adjustment": as.lastAdjustment, + } +} diff --git a/pkg/logging/structured.go b/pkg/logging/structured.go new file mode 100644 index 0000000..80a567e --- /dev/null +++ b/pkg/logging/structured.go @@ -0,0 +1,255 @@ +package logging + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "time" +) + +// LogLevel represents the log level +type LogLevel int + +const ( + DEBUG LogLevel = iota + INFO + WARN + ERROR + FATAL +) + +func (l LogLevel) String() string { + switch l { + case DEBUG: + return "DEBUG" + case INFO: + return "INFO" + case WARN: + return "WARN" + case ERROR: + return "ERROR" + case FATAL: + return "FATAL" + default: + return "UNKNOWN" + } +} + +// LogEntry represents a structured log entry +type LogEntry struct { + Timestamp time.Time `json:"timestamp"` + Level string `json:"level"` + Message string `json:"message"` + Caller string `json:"caller,omitempty"` + Stack string `json:"stack,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + RequestID string `json:"request_id,omitempty"` + UserID string `json:"user_id,omitempty"` + TraceID string `json:"trace_id,omitempty"` + SpanID string `json:"span_id,omitempty"` + ServiceName string `json:"service_name"` + Version string `json:"version"` + Environment string `json:"environment"` +} + +// StructuredLogger provides structured JSON logging +type StructuredLogger struct { + writer io.Writer + level LogLevel + serviceName string + version string + environment string + fields map[string]interface{} +} + +// NewStructuredLogger creates a new structured logger +func NewStructuredLogger(writer io.Writer, level LogLevel, serviceName, version, environment string) *StructuredLogger { + if writer == nil { + writer = os.Stdout + } + + return &StructuredLogger{ + writer: writer, + level: level, + serviceName: serviceName, + version: version, + environment: environment, + fields: make(map[string]interface{}), + } +} + +// WithFields creates a logger with additional fields +func (sl *StructuredLogger) WithFields(fields map[string]interface{}) *StructuredLogger { + newLogger := &StructuredLogger{ + writer: sl.writer, + level: sl.level, + serviceName: sl.serviceName, + version: sl.version, + environment: sl.environment, + fields: make(map[string]interface{}), + } + + for k, v := range sl.fields { + newLogger.fields[k] = v + } + + for k, v := range fields { + newLogger.fields[k] = v + } + + return newLogger +} + +// WithContext creates a logger with context information +func (sl *StructuredLogger) WithContext(ctx context.Context) *StructuredLogger { + fields := make(map[string]interface{}) + + if requestID := ctx.Value("request_id"); requestID != nil { + fields["request_id"] = requestID + } + + if userID := ctx.Value("user_id"); userID != nil { + fields["user_id"] = userID + } + + if traceID := ctx.Value("trace_id"); traceID != nil { + fields["trace_id"] = traceID + } + + if spanID := ctx.Value("span_id"); spanID != nil { + fields["span_id"] = spanID + } + + return sl.WithFields(fields) +} + +// Debug logs a debug message +func (sl *StructuredLogger) Debug(msg string, fields ...map[string]interface{}) { + if sl.level <= DEBUG { + sl.log(DEBUG, msg, fields...) + } +} + +// Info logs an info message +func (sl *StructuredLogger) Info(msg string, fields ...map[string]interface{}) { + if sl.level <= INFO { + sl.log(INFO, msg, fields...) + } +} + +// Warn logs a warning message +func (sl *StructuredLogger) Warn(msg string, fields ...map[string]interface{}) { + if sl.level <= WARN { + sl.log(WARN, msg, fields...) + } +} + +// Error logs an error message +func (sl *StructuredLogger) Error(msg string, fields ...map[string]interface{}) { + if sl.level <= ERROR { + sl.log(ERROR, msg, fields...) + } +} + +// Fatal logs a fatal message +func (sl *StructuredLogger) Fatal(msg string, fields ...map[string]interface{}) { + if sl.level <= FATAL { + sl.log(FATAL, msg, fields...) + os.Exit(1) + } +} + +// log writes a log entry +func (sl *StructuredLogger) log(level LogLevel, msg string, fields ...map[string]interface{}) { + entry := LogEntry{ + Timestamp: time.Now().UTC(), + Level: level.String(), + Message: msg, + ServiceName: sl.serviceName, + Version: sl.version, + Environment: sl.environment, + Fields: make(map[string]interface{}), + } + + // Add caller information + _, file, line, ok := runtime.Caller(3) + if ok { + entry.Caller = fmt.Sprintf("%s:%d", file, line) + } + + // Add fields + for k, v := range sl.fields { + entry.Fields[k] = v + } + + for _, f := range fields { + for k, v := range f { + entry.Fields[k] = v + } + } + + // Add context fields + if requestID, ok := entry.Fields["request_id"]; ok { + entry.RequestID = fmt.Sprint(requestID) + delete(entry.Fields, "request_id") + } + + if userID, ok := entry.Fields["user_id"]; ok { + entry.UserID = fmt.Sprint(userID) + delete(entry.Fields, "user_id") + } + + if traceID, ok := entry.Fields["trace_id"]; ok { + entry.TraceID = fmt.Sprint(traceID) + delete(entry.Fields, "trace_id") + } + + if spanID, ok := entry.Fields["span_id"]; ok { + entry.SpanID = fmt.Sprint(spanID) + delete(entry.Fields, "span_id") + } + + // Add stack trace for errors + if level >= ERROR { + entry.Stack = getStackTrace() + } + + // Write the log entry + data, err := json.Marshal(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to marshal log entry: %v\n", err) + return + } + + data = append(data, '\n') + sl.writer.Write(data) +} + +// getStackTrace returns a stack trace +func getStackTrace() string { + buf := make([]byte, 4096) + n := runtime.Stack(buf, false) + return string(buf[:n]) +} + +// LogMiddleware creates a middleware that logs requests +func LogMiddleware(logger *StructuredLogger) func(next func()) func() { + return func(next func()) func() { + return func() { + start := time.Now() + logger.Info("Request started", map[string]interface{}{ + "timestamp": start, + }) + + next() + + duration := time.Since(start) + logger.Info("Request completed", map[string]interface{}{ + "duration": duration.String(), + }) + } + } +} diff --git a/pkg/metrics/prometheus.go b/pkg/metrics/prometheus.go new file mode 100644 index 0000000..96a58c1 --- /dev/null +++ b/pkg/metrics/prometheus.go @@ -0,0 +1,237 @@ +package metrics + +import ( + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Metrics holds all Prometheus metrics +type Metrics struct { + HTTPRequestsTotal *prometheus.CounterVec + HTTPRequestDuration *prometheus.HistogramVec + HTTPRequestSize *prometheus.HistogramVec + HTTPResponseSize *prometheus.HistogramVec + ActiveConnections prometheus.Gauge + DatabaseConnections *prometheus.GaugeVec + CacheHits *prometheus.CounterVec + CacheMisses *prometheus.CounterVec + CircuitBreakerState *prometheus.GaugeVec + CircuitBreakerTrips *prometheus.CounterVec + WebhookEvents *prometheus.CounterVec + WebhookDuration *prometheus.HistogramVec + WebSocketConnections prometheus.Gauge + BatchOperations *prometheus.CounterVec + BatchDuration *prometheus.HistogramVec + LogEntries *prometheus.CounterVec + ErrorRate *prometheus.CounterVec +} + +// NewMetrics creates new Prometheus metrics +func NewMetrics() *Metrics { + return &Metrics{ + HTTPRequestsTotal: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "Total number of HTTP requests", + }, + []string{"method", "path", "status"}, + ), + HTTPRequestDuration: promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_duration_seconds", + Help: "HTTP request duration in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"method", "path", "status"}, + ), + HTTPRequestSize: promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_size_bytes", + Help: "HTTP request size in bytes", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + []string{"method", "path"}, + ), + HTTPResponseSize: promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_response_size_bytes", + Help: "HTTP response size in bytes", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + []string{"method", "path"}, + ), + ActiveConnections: promauto.NewGauge( + prometheus.GaugeOpts{ + Name: "active_connections", + Help: "Number of active connections", + }, + ), + DatabaseConnections: promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "database_connections", + Help: "Number of database connections", + }, + []string{"database", "state"}, + ), + CacheHits: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "cache_hits_total", + Help: "Total number of cache hits", + }, + []string{"cache", "operation"}, + ), + CacheMisses: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "cache_misses_total", + Help: "Total number of cache misses", + }, + []string{"cache", "operation"}, + ), + CircuitBreakerState: promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "circuit_breaker_state", + Help: "Circuit breaker state (0=closed, 1=half-open, 2=open)", + }, + []string{"name"}, + ), + CircuitBreakerTrips: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "circuit_breaker_trips_total", + Help: "Total number of circuit breaker trips", + }, + []string{"name"}, + ), + WebhookEvents: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "webhook_events_total", + Help: "Total number of webhook events", + }, + []string{"event_type", "status"}, + ), + WebhookDuration: promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "webhook_duration_seconds", + Help: "Webhook request duration in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"event_type"}, + ), + WebSocketConnections: promauto.NewGauge( + prometheus.GaugeOpts{ + Name: "websocket_connections", + Help: "Number of WebSocket connections", + }, + ), + BatchOperations: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "batch_operations_total", + Help: "Total number of batch operations", + }, + []string{"operation", "status"}, + ), + BatchDuration: promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "batch_duration_seconds", + Help: "Batch operation duration in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"operation"}, + ), + LogEntries: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "log_entries_total", + Help: "Total number of log entries", + }, + []string{"level", "service"}, + ), + ErrorRate: promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "errors_total", + Help: "Total number of errors", + }, + []string{"type", "service"}, + ), + } +} + +// RecordHTTPRequest records HTTP request metrics +func (m *Metrics) RecordHTTPRequest(method, path string, status int, duration time.Duration, requestSize, responseSize int64) { + statusStr := strconv.Itoa(status) + + m.HTTPRequestsTotal.WithLabelValues(method, path, statusStr).Inc() + m.HTTPRequestDuration.WithLabelValues(method, path, statusStr).Observe(duration.Seconds()) + m.HTTPRequestSize.WithLabelValues(method, path).Observe(float64(requestSize)) + m.HTTPResponseSize.WithLabelValues(method, path).Observe(float64(responseSize)) +} + +// RecordCacheHit records a cache hit +func (m *Metrics) RecordCacheHit(cache, operation string) { + m.CacheHits.WithLabelValues(cache, operation).Inc() +} + +// RecordCacheMiss records a cache miss +func (m *Metrics) RecordCacheMiss(cache, operation string) { + m.CacheMisses.WithLabelValues(cache, operation).Inc() +} + +// SetCircuitBreakerState sets circuit breaker state +func (m *Metrics) SetCircuitBreakerState(name string, state int) { + m.CircuitBreakerState.WithLabelValues(name).Set(float64(state)) +} + +// RecordCircuitBreakerTrip records a circuit breaker trip +func (m *Metrics) RecordCircuitBreakerTrip(name string) { + m.CircuitBreakerTrips.WithLabelValues(name).Inc() +} + +// RecordWebhookEvent records a webhook event +func (m *Metrics) RecordWebhookEvent(eventType, status string, duration time.Duration) { + m.WebhookEvents.WithLabelValues(eventType, status).Inc() + m.WebhookDuration.WithLabelValues(eventType).Observe(duration.Seconds()) +} + +// SetWebSocketConnections sets WebSocket connections count +func (m *Metrics) SetWebSocketConnections(count int) { + m.WebSocketConnections.Set(float64(count)) +} + +// RecordBatchOperation records a batch operation +func (m *Metrics) RecordBatchOperation(operation, status string, duration time.Duration) { + m.BatchOperations.WithLabelValues(operation, status).Inc() + m.BatchDuration.WithLabelValues(operation).Observe(duration.Seconds()) +} + +// RecordLogEntry records a log entry +func (m *Metrics) RecordLogEntry(level, service string) { + m.LogEntries.WithLabelValues(level, service).Inc() +} + +// RecordError records an error +func (m *Metrics) RecordError(errorType, service string) { + m.ErrorRate.WithLabelValues(errorType, service).Inc() +} + +// SetActiveConnections sets active connections count +func (m *Metrics) SetActiveConnections(count int) { + m.ActiveConnections.Set(float64(count)) +} + +// SetDatabaseConnections sets database connections count +func (m *Metrics) SetDatabaseConnections(database, state string, count int) { + m.DatabaseConnections.WithLabelValues(database, state).Set(float64(count)) +} + +// Handler returns Prometheus metrics HTTP handler +func (m *Metrics) Handler() http.Handler { + return promhttp.Handler() +} + +// GetMetrics returns the metrics instance +func GetMetrics() *Metrics { + return &Metrics{} +} diff --git a/pkg/pagination/cursor.go b/pkg/pagination/cursor.go new file mode 100644 index 0000000..ca5d1fa --- /dev/null +++ b/pkg/pagination/cursor.go @@ -0,0 +1,227 @@ +package pagination + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + "time" +) + +// Cursor represents a pagination cursor +type Cursor struct { + ID string `json:"id"` + Timestamp time.Time `json:"timestamp"` + Value string `json:"value,omitempty"` +} + +// CursorPagination represents cursor-based pagination parameters +type CursorPagination struct { + First int `json:"first,omitempty"` + After *Cursor `json:"after,omitempty"` + Last int `json:"last,omitempty"` + Before *Cursor `json:"before,omitempty"` +} + +// CursorPage represents a page of results with cursor information +type CursorPage struct { + Edges []Edge `json:"edges"` + PageInfo PageInfo `json:"page_info"` + TotalCount int `json:"total_count"` +} + +// Edge represents an edge in a cursor-based pagination +type Edge struct { + Node interface{} `json:"node"` + Cursor string `json:"cursor"` +} + +// PageInfo represents pagination metadata +type PageInfo struct { + HasNextPage bool `json:"has_next_page"` + HasPreviousPage bool `json:"has_previous_page"` + StartCursor *string `json:"start_cursor,omitempty"` + EndCursor *string `json:"end_cursor,omitempty"` +} + +// NewCursorPagination creates a new cursor pagination from query parameters +func NewCursorPagination(first, last int, after, before string) (*CursorPagination, error) { + pagination := &CursorPagination{ + First: first, + Last: last, + } + + if after != "" { + cursor, err := DecodeCursor(after) + if err != nil { + return nil, fmt.Errorf("invalid after cursor: %w", err) + } + pagination.After = cursor + } + + if before != "" { + cursor, err := DecodeCursor(before) + if err != nil { + return nil, fmt.Errorf("invalid before cursor: %w", err) + } + pagination.Before = cursor + } + + if pagination.First < 0 { + pagination.First = 0 + } + if pagination.Last < 0 { + pagination.Last = 0 + } + + if pagination.First == 0 && pagination.Last == 0 { + pagination.First = 10 + } + + return pagination, nil +} + +// EncodeCursor encodes a cursor to a base64 string +func EncodeCursor(cursor *Cursor) (string, error) { + data, err := json.Marshal(cursor) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(data), nil +} + +// DecodeCursor decodes a base64 cursor string +func DecodeCursor(cursorStr string) (*Cursor, error) { + data, err := base64.StdEncoding.DecodeString(cursorStr) + if err != nil { + return nil, err + } + + var cursor Cursor + if err := json.Unmarshal(data, &cursor); err != nil { + return nil, err + } + + return &cursor, nil +} + +// CreateCursor creates a cursor from an item +func CreateCursor(id string, timestamp time.Time, value string) (*Cursor, error) { + return &Cursor{ + ID: id, + Timestamp: timestamp, + Value: value, + }, nil +} + +// CreateEdge creates an edge from a node and cursor +func CreateEdge(node interface{}, cursor *Cursor) (*Edge, error) { + cursorStr, err := EncodeCursor(cursor) + if err != nil { + return nil, err + } + + return &Edge{ + Node: node, + Cursor: cursorStr, + }, nil +} + +// CreatePage creates a cursor page from edges +func CreatePage(edges []Edge, hasNextPage, hasPreviousPage bool, totalCount int) *CursorPage { + page := &CursorPage{ + Edges: edges, + TotalCount: totalCount, + PageInfo: PageInfo{ + HasNextPage: hasNextPage, + HasPreviousPage: hasPreviousPage, + }, + } + + if len(edges) > 0 { + startCursor := edges[0].Cursor + endCursor := edges[len(edges)-1].Cursor + page.PageInfo.StartCursor = &startCursor + page.PageInfo.EndCursor = &endCursor + } + + return page +} + +// GetLimit returns the limit for the query +func (p *CursorPagination) GetLimit() int { + if p.First > 0 { + return p.First + 1 + } + if p.Last > 0 { + return p.Last + 1 + } + return 11 +} + +// GetOffset returns the offset for the query (not used in cursor pagination) +func (p *CursorPagination) GetOffset() int { + return 0 +} + +// HasAfterCursor returns true if there's an after cursor +func (p *CursorPagination) HasAfterCursor() bool { + return p.After != nil +} + +// HasBeforeCursor returns true if there's a before cursor +func (p *CursorPagination) HasBeforeCursor() bool { + return p.Before != nil +} + +// IsForwardPagination returns true if forward pagination is requested +func (p *CursorPagination) IsForwardPagination() bool { + return p.First > 0 || (p.First == 0 && p.Last == 0) +} + +// IsBackwardPagination returns true if backward pagination is requested +func (p *CursorPagination) IsBackwardPagination() bool { + return p.Last > 0 +} + +// GetAfterID returns the after cursor ID +func (p *CursorPagination) GetAfterID() string { + if p.After != nil { + return p.After.ID + } + return "" +} + +// GetBeforeID returns the before cursor ID +func (p *CursorPagination) GetBeforeID() string { + if p.Before != nil { + return p.Before.ID + } + return "" +} + +// GetAfterTimestamp returns the after cursor timestamp +func (p *CursorPagination) GetAfterTimestamp() time.Time { + if p.After != nil { + return p.After.Timestamp + } + return time.Time{} +} + +// GetBeforeTimestamp returns the before cursor timestamp +func (p *CursorPagination) GetBeforeTimestamp() time.Time { + if p.Before != nil { + return p.Before.Timestamp + } + return time.Time{} +} + +// StringToInt converts a string to int +func StringToInt(s string) (int, error) { + return strconv.Atoi(s) +} + +// IntToString converts an int to string +func IntToString(i int) string { + return strconv.Itoa(i) +} diff --git a/pkg/resilience/circuit_breaker.go b/pkg/resilience/circuit_breaker.go new file mode 100644 index 0000000..d60cbcd --- /dev/null +++ b/pkg/resilience/circuit_breaker.go @@ -0,0 +1,264 @@ +package resilience + +import ( + "errors" + "sync" + "time" +) + +// State represents the circuit breaker state +type State int + +const ( + StateClosed State = iota + StateHalfOpen + StateOpen +) + +func (s State) String() string { + switch s { + case StateClosed: + return "closed" + case StateHalfOpen: + return "half-open" + case StateOpen: + return "open" + default: + return "unknown" + } +} + +// CircuitBreakerConfig holds circuit breaker configuration +type CircuitBreakerConfig struct { + Name string + MaxFailures int + ResetTimeout time.Duration + HalfOpenMaxRequests int + OnStateChange func(name string, from State, to State) +} + +// DefaultCircuitBreakerConfig returns default configuration +func DefaultCircuitBreakerConfig(name string) CircuitBreakerConfig { + return CircuitBreakerConfig{ + Name: name, + MaxFailures: 5, + ResetTimeout: 30 * time.Second, + HalfOpenMaxRequests: 1, + } +} + +// CircuitBreaker implements the circuit breaker pattern +type CircuitBreaker struct { + config CircuitBreakerConfig + state State + failures int + successes int + lastFailureTime time.Time + halfOpenCount int + mu sync.RWMutex +} + +// NewCircuitBreaker creates a new circuit breaker +func NewCircuitBreaker(config CircuitBreakerConfig) *CircuitBreaker { + return &CircuitBreaker{ + config: config, + state: StateClosed, + } +} + +// Execute executes a function with circuit breaker protection +func (cb *CircuitBreaker) Execute(fn func() error) error { + if !cb.AllowRequest() { + return errors.New("circuit breaker is open") + } + + err := fn() + + if err != nil { + cb.RecordFailure() + return err + } + + cb.RecordSuccess() + return nil +} + +// ExecuteWithFallback executes a function with circuit breaker protection and fallback +func (cb *CircuitBreaker) ExecuteWithFallback(fn func() error, fallback func() error) error { + if !cb.AllowRequest() { + if fallback != nil { + return fallback() + } + return errors.New("circuit breaker is open") + } + + err := fn() + + if err != nil { + cb.RecordFailure() + if fallback != nil { + return fallback() + } + return err + } + + cb.RecordSuccess() + return nil +} + +// AllowRequest checks if a request is allowed +func (cb *CircuitBreaker) AllowRequest() bool { + cb.mu.RLock() + defer cb.mu.RUnlock() + + switch cb.state { + case StateClosed: + return true + case StateOpen: + if time.Since(cb.lastFailureTime) > cb.config.ResetTimeout { + return true + } + return false + case StateHalfOpen: + return cb.halfOpenCount < cb.config.HalfOpenMaxRequests + default: + return false + } +} + +// RecordSuccess records a successful request +func (cb *CircuitBreaker) RecordSuccess() { + cb.mu.Lock() + defer cb.mu.Unlock() + + cb.successes++ + + if cb.state == StateHalfOpen { + cb.halfOpenCount++ + if cb.halfOpenCount >= cb.config.HalfOpenMaxRequests { + cb.setState(StateClosed) + cb.failures = 0 + cb.halfOpenCount = 0 + } + } else if cb.state == StateClosed { + cb.failures = 0 + } +} + +// RecordFailure records a failed request +func (cb *CircuitBreaker) RecordFailure() { + cb.mu.Lock() + defer cb.mu.Unlock() + + cb.failures++ + cb.lastFailureTime = time.Now() + + if cb.state == StateHalfOpen { + cb.setState(StateOpen) + cb.halfOpenCount = 0 + } else if cb.state == StateClosed && cb.failures >= cb.config.MaxFailures { + cb.setState(StateOpen) + } +} + +// setState changes the circuit breaker state +func (cb *CircuitBreaker) setState(newState State) { + if cb.state != newState { + oldState := cb.state + cb.state = newState + if cb.config.OnStateChange != nil { + go cb.config.OnStateChange(cb.config.Name, oldState, newState) + } + } +} + +// GetState returns the current state +func (cb *CircuitBreaker) GetState() State { + cb.mu.RLock() + defer cb.mu.RUnlock() + return cb.state +} + +// GetStats returns circuit breaker statistics +func (cb *CircuitBreaker) GetStats() map[string]interface{} { + cb.mu.RLock() + defer cb.mu.RUnlock() + + return map[string]interface{}{ + "name": cb.config.Name, + "state": cb.state.String(), + "failures": cb.failures, + "successes": cb.successes, + "last_failure_time": cb.lastFailureTime, + "half_open_count": cb.halfOpenCount, + } +} + +// Reset resets the circuit breaker to closed state +func (cb *CircuitBreaker) Reset() { + cb.mu.Lock() + defer cb.mu.Unlock() + + cb.state = StateClosed + cb.failures = 0 + cb.successes = 0 + cb.halfOpenCount = 0 +} + +// CircuitBreakerManager manages multiple circuit breakers +type CircuitBreakerManager struct { + breakers map[string]*CircuitBreaker + mu sync.RWMutex +} + +// NewCircuitBreakerManager creates a new circuit breaker manager +func NewCircuitBreakerManager() *CircuitBreakerManager { + return &CircuitBreakerManager{ + breakers: make(map[string]*CircuitBreaker), + } +} + +// GetOrCreate gets an existing circuit breaker or creates a new one +func (m *CircuitBreakerManager) GetOrCreate(config CircuitBreakerConfig) *CircuitBreaker { + m.mu.Lock() + defer m.mu.Unlock() + + if cb, exists := m.breakers[config.Name]; exists { + return cb + } + + cb := NewCircuitBreaker(config) + m.breakers[config.Name] = cb + return cb +} + +// Get returns a circuit breaker by name +func (m *CircuitBreakerManager) Get(name string) (*CircuitBreaker, bool) { + m.mu.RLock() + defer m.mu.RUnlock() + + cb, exists := m.breakers[name] + return cb, exists +} + +// GetAll returns all circuit breakers +func (m *CircuitBreakerManager) GetAll() map[string]*CircuitBreaker { + m.mu.RLock() + defer m.mu.RUnlock() + + result := make(map[string]*CircuitBreaker) + for k, v := range m.breakers { + result[k] = v + } + return result +} + +// ResetAll resets all circuit breakers +func (m *CircuitBreakerManager) ResetAll() { + m.mu.RLock() + defer m.mu.RUnlock() + + for _, cb := range m.breakers { + cb.Reset() + } +} diff --git a/pkg/resilience/health.go b/pkg/resilience/health.go new file mode 100644 index 0000000..d52ec4b --- /dev/null +++ b/pkg/resilience/health.go @@ -0,0 +1,262 @@ +package resilience + +import ( + "context" + "sync" + "time" +) + +// HealthStatus represents the health status +type HealthStatus string + +const ( + HealthStatusHealthy HealthStatus = "healthy" + HealthStatusDegraded HealthStatus = "degraded" + HealthStatusUnhealthy HealthStatus = "unhealthy" +) + +// HealthCheck represents a health check +type HealthCheck struct { + Name string + Check func(ctx context.Context) error + Timeout time.Duration + Critical bool +} + +// HealthResult represents the result of a health check +type HealthResult struct { + Name string `json:"name"` + Status HealthStatus `json:"status"` + Error string `json:"error,omitempty"` + Duration time.Duration `json:"duration"` + Timestamp time.Time `json:"timestamp"` + Critical bool `json:"critical"` +} + +// HealthReport represents the overall health report +type HealthReport struct { + Status HealthStatus `json:"status"` + Checks map[string]*HealthResult `json:"checks"` + Timestamp time.Time `json:"timestamp"` + Duration time.Duration `json:"duration"` +} + +// HealthChecker manages health checks +type HealthChecker struct { + checks map[string]*HealthCheck + mu sync.RWMutex +} + +// NewHealthChecker creates a new health checker +func NewHealthChecker() *HealthChecker { + return &HealthChecker{ + checks: make(map[string]*HealthCheck), + } +} + +// RegisterCheck registers a health check +func (hc *HealthChecker) RegisterCheck(check *HealthCheck) { + hc.mu.Lock() + defer hc.mu.Unlock() + + if check.Timeout == 0 { + check.Timeout = 5 * time.Second + } + + hc.checks[check.Name] = check +} + +// RegisterSimpleCheck registers a simple health check +func (hc *HealthChecker) RegisterSimpleCheck(name string, check func() error) { + hc.RegisterCheck(&HealthCheck{ + Name: name, + Check: func(ctx context.Context) error { return check() }, + Timeout: 5 * time.Second, + }) +} + +// RegisterCriticalCheck registers a critical health check +func (hc *HealthChecker) RegisterCriticalCheck(name string, check func(ctx context.Context) error) { + hc.RegisterCheck(&HealthCheck{ + Name: name, + Check: check, + Timeout: 5 * time.Second, + Critical: true, + }) +} + +// DeregisterCheck deregisters a health check +func (hc *HealthChecker) DeregisterCheck(name string) { + hc.mu.Lock() + defer hc.mu.Unlock() + + delete(hc.checks, name) +} + +// Check runs all health checks and returns a report +func (hc *HealthChecker) Check(ctx context.Context) *HealthReport { + start := time.Now() + + hc.mu.RLock() + checks := make(map[string]*HealthCheck, len(hc.checks)) + for k, v := range hc.checks { + checks[k] = v + } + hc.mu.RUnlock() + + results := make(map[string]*HealthResult) + var wg sync.WaitGroup + var mu sync.Mutex + + for name, check := range checks { + wg.Add(1) + go func(name string, check *HealthCheck) { + defer wg.Done() + + result := hc.runCheck(ctx, check) + + mu.Lock() + results[name] = result + mu.Unlock() + }(name, check) + } + + wg.Wait() + + report := &HealthReport{ + Checks: results, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + report.Status = hc.calculateOverallStatus(results) + + return report +} + +// CheckSingle runs a single health check +func (hc *HealthChecker) CheckSingle(ctx context.Context, name string) *HealthResult { + hc.mu.RLock() + check, exists := hc.checks[name] + hc.mu.RUnlock() + + if !exists { + return &HealthResult{ + Name: name, + Status: HealthStatusUnhealthy, + Error: "check not found", + Timestamp: time.Now(), + } + } + + return hc.runCheck(ctx, check) +} + +// runCheck runs a single health check +func (hc *HealthChecker) runCheck(ctx context.Context, check *HealthCheck) *HealthResult { + start := time.Now() + + checkCtx, cancel := context.WithTimeout(ctx, check.Timeout) + defer cancel() + + result := &HealthResult{ + Name: check.Name, + Timestamp: start, + Critical: check.Critical, + } + + errChan := make(chan error, 1) + go func() { + errChan <- check.Check(checkCtx) + }() + + select { + case err := <-errChan: + result.Duration = time.Since(start) + if err != nil { + result.Status = HealthStatusUnhealthy + result.Error = err.Error() + } else { + result.Status = HealthStatusHealthy + } + case <-checkCtx.Done(): + result.Duration = time.Since(start) + result.Status = HealthStatusUnhealthy + result.Error = "health check timed out" + } + + return result +} + +// calculateOverallStatus calculates the overall health status +func (hc *HealthChecker) calculateOverallStatus(results map[string]*HealthResult) HealthStatus { + hasUnhealthy := false + hasDegraded := false + hasCriticalUnhealthy := false + + for _, result := range results { + if result.Status == HealthStatusUnhealthy { + hasUnhealthy = true + if result.Critical { + hasCriticalUnhealthy = true + } + } else if result.Status == HealthStatusDegraded { + hasDegraded = true + } + } + + if hasCriticalUnhealthy { + return HealthStatusUnhealthy + } + + if hasUnhealthy { + return HealthStatusDegraded + } + + if hasDegraded { + return HealthStatusDegraded + } + + return HealthStatusHealthy +} + +// GetCheckNames returns all registered check names +func (hc *HealthChecker) GetCheckNames() []string { + hc.mu.RLock() + defer hc.mu.RUnlock() + + names := make([]string, 0, len(hc.checks)) + for name := range hc.checks { + names = append(names, name) + } + + return names +} + +// GetCheck returns a health check by name +func (hc *HealthChecker) GetCheck(name string) (*HealthCheck, bool) { + hc.mu.RLock() + defer hc.mu.RUnlock() + + check, exists := hc.checks[name] + return check, exists +} + +// IsHealthy returns true if all checks are healthy +func (hc *HealthChecker) IsHealthy(ctx context.Context) bool { + report := hc.Check(ctx) + return report.Status == HealthStatusHealthy +} + +// IsCriticalHealthy returns true if all critical checks are healthy +func (hc *HealthChecker) IsCriticalHealthy(ctx context.Context) bool { + report := hc.Check(ctx) + + for _, result := range report.Checks { + if result.Critical && result.Status != HealthStatusHealthy { + return false + } + } + + return true +} diff --git a/pkg/resilience/retry.go b/pkg/resilience/retry.go new file mode 100644 index 0000000..569719c --- /dev/null +++ b/pkg/resilience/retry.go @@ -0,0 +1,238 @@ +package resilience + +import ( + "context" + "errors" + "math" + "math/rand" + "time" +) + +// RetryConfig holds retry configuration +type RetryConfig struct { + MaxAttempts int + InitialDelay time.Duration + MaxDelay time.Duration + BackoffFactor float64 + Jitter bool + RetryIf func(error) bool + OnRetry func(attempt int, err error) +} + +// DefaultRetryConfig returns default retry configuration +func DefaultRetryConfig() RetryConfig { + return RetryConfig{ + MaxAttempts: 3, + InitialDelay: 100 * time.Millisecond, + MaxDelay: 10 * time.Second, + BackoffFactor: 2.0, + Jitter: true, + RetryIf: nil, + OnRetry: nil, + } +} + +// Retry executes a function with retry and exponential backoff +func Retry(fn func() error, config ...RetryConfig) error { + var cfg RetryConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultRetryConfig() + } + + var lastErr error + for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ { + err := fn() + if err == nil { + return nil + } + + lastErr = err + + if cfg.RetryIf != nil && !cfg.RetryIf(err) { + return err + } + + if attempt < cfg.MaxAttempts { + delay := calculateDelay(attempt, cfg) + if cfg.OnRetry != nil { + cfg.OnRetry(attempt, err) + } + time.Sleep(delay) + } + } + + return lastErr +} + +// RetryWithContext executes a function with retry and exponential backoff with context +func RetryWithContext(ctx context.Context, fn func() error, config ...RetryConfig) error { + var cfg RetryConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultRetryConfig() + } + + var lastErr error + for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := fn() + if err == nil { + return nil + } + + lastErr = err + + if cfg.RetryIf != nil && !cfg.RetryIf(err) { + return err + } + + if attempt < cfg.MaxAttempts { + delay := calculateDelay(attempt, cfg) + if cfg.OnRetry != nil { + cfg.OnRetry(attempt, err) + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + } + } + } + + return lastErr +} + +// RetryWithResult executes a function with retry and returns a result +func RetryWithResult[T any](fn func() (T, error), config ...RetryConfig) (T, error) { + var cfg RetryConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultRetryConfig() + } + + var lastErr error + var zero T + for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ { + result, err := fn() + if err == nil { + return result, nil + } + + lastErr = err + + if cfg.RetryIf != nil && !cfg.RetryIf(err) { + return zero, err + } + + if attempt < cfg.MaxAttempts { + delay := calculateDelay(attempt, cfg) + if cfg.OnRetry != nil { + cfg.OnRetry(attempt, err) + } + time.Sleep(delay) + } + } + + return zero, lastErr +} + +// RetryWithResultContext executes a function with retry and returns a result with context +func RetryWithResultContext[T any](ctx context.Context, fn func() (T, error), config ...RetryConfig) (T, error) { + var cfg RetryConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultRetryConfig() + } + + var lastErr error + var zero T + for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ { + select { + case <-ctx.Done(): + return zero, ctx.Err() + default: + } + + result, err := fn() + if err == nil { + return result, nil + } + + lastErr = err + + if cfg.RetryIf != nil && !cfg.RetryIf(err) { + return zero, err + } + + if attempt < cfg.MaxAttempts { + delay := calculateDelay(attempt, cfg) + if cfg.OnRetry != nil { + cfg.OnRetry(attempt, err) + } + select { + case <-ctx.Done(): + return zero, ctx.Err() + case <-time.After(delay): + } + } + } + + return zero, lastErr +} + +// calculateDelay calculates the delay for a retry attempt +func calculateDelay(attempt int, config RetryConfig) time.Duration { + delay := float64(config.InitialDelay) * math.Pow(config.BackoffFactor, float64(attempt-1)) + + if delay > float64(config.MaxDelay) { + delay = float64(config.MaxDelay) + } + + if config.Jitter { + jitter := rand.Float64() * 0.5 + delay = delay * (1 + jitter) + } + + return time.Duration(delay) +} + +// RetryableError wraps an error to indicate it's retryable +type RetryableError struct { + Err error +} + +func (e *RetryableError) Error() string { + return e.Err.Error() +} + +func (e *RetryableError) Unwrap() error { + return e.Err +} + +// NewRetryableError creates a new retryable error +func NewRetryableError(err error) *RetryableError { + return &RetryableError{Err: err} +} + +// IsRetryable checks if an error is retryable +func IsRetryable(err error) bool { + var retryableErr *RetryableError + return errors.As(err, &retryableErr) +} + +// RetryIfRetryable returns a RetryIf function that retries only retryable errors +func RetryIfRetryable() func(error) bool { + return func(err error) bool { + return IsRetryable(err) + } +} diff --git a/pkg/resilience/timeout.go b/pkg/resilience/timeout.go new file mode 100644 index 0000000..f365e19 --- /dev/null +++ b/pkg/resilience/timeout.go @@ -0,0 +1,115 @@ +package resilience + +import ( + "context" + "errors" + "time" +) + +var ( + ErrTimeout = errors.New("operation timed out") +) + +// TimeoutConfig holds timeout configuration +type TimeoutConfig struct { + Timeout time.Duration +} + +// DefaultTimeoutConfig returns default timeout configuration +func DefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 30 * time.Second, + } +} + +// WithTimeout executes a function with a timeout +func WithTimeout(fn func() error, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return WithContext(ctx, fn) +} + +// WithContext executes a function with a context +func WithContext(ctx context.Context, fn func() error) error { + errChan := make(chan error, 1) + + go func() { + errChan <- fn() + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + if ctx.Err() == context.DeadlineExceeded { + return ErrTimeout + } + return ctx.Err() + } +} + +// WithTimeoutResult executes a function with a timeout and returns a result +func WithTimeoutResult[T any](fn func() (T, error), timeout time.Duration) (T, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return WithContextResult(ctx, fn) +} + +// WithContextResult executes a function with a context and returns a result +func WithContextResult[T any](ctx context.Context, fn func() (T, error)) (T, error) { + resultChan := make(chan struct { + result T + err error + }, 1) + + go func() { + result, err := fn() + resultChan <- struct { + result T + err error + }{result, err} + }() + + select { + case res := <-resultChan: + return res.result, res.err + case <-ctx.Done(): + var zero T + if ctx.Err() == context.DeadlineExceeded { + return zero, ErrTimeout + } + return zero, ctx.Err() + } +} + +// TimeoutFunc wraps a function with timeout +type TimeoutFunc func() error + +// WithTimeoutConfig executes a function with timeout configuration +func WithTimeoutConfig(fn func() error, config ...TimeoutConfig) error { + var cfg TimeoutConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultTimeoutConfig() + } + + return WithTimeout(fn, cfg.Timeout) +} + +// TimeoutFuncResult wraps a function with timeout that returns a result +type TimeoutFuncResult[T any] func() (T, error) + +// WithTimeoutConfigResult executes a function with timeout configuration and returns a result +func WithTimeoutConfigResult[T any](fn func() (T, error), config ...TimeoutConfig) (T, error) { + var cfg TimeoutConfig + if len(config) > 0 { + cfg = config[0] + } else { + cfg = DefaultTimeoutConfig() + } + + return WithTimeoutResult(fn, cfg.Timeout) +} diff --git a/pkg/testing/helpers.go b/pkg/testing/helpers.go new file mode 100644 index 0000000..3a29ad1 --- /dev/null +++ b/pkg/testing/helpers.go @@ -0,0 +1,131 @@ +package testing + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/labstack/echo/v4" +) + +// TestContext creates an echo.Context for testing +type TestContext struct { + echo.Context + Request *http.Request + Response *httptest.ResponseRecorder +} + +// NewTestEcho creates a new Echo instance for testing +func NewTestEcho() *echo.Echo { + return echo.New() +} + +// NewTestContext creates a new test context with the given method, path, and body +func NewTestContext(method, path string, body interface{}) (echo.Context, *httptest.ResponseRecorder) { + e := echo.New() + rec := httptest.NewRecorder() + + var req *http.Request + if body != nil { + jsonBody, _ := json.Marshal(body) + req = httptest.NewRequest(method, path, bytes.NewBuffer(jsonBody)) + req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON) + } else { + req = httptest.NewRequest(method, path, nil) + } + + c := e.NewContext(req, rec) + return c, rec +} + +// NewTestContextWithQuery creates a test context with query parameters +func NewTestContextWithQuery(method, path string, queryParams map[string]string) (echo.Context, *httptest.ResponseRecorder) { + e := echo.New() + rec := httptest.NewRecorder() + + req := httptest.NewRequest(method, path, nil) + q := req.URL.Query() + for k, v := range queryParams { + q.Add(k, v) + } + req.URL.RawQuery = q.Encode() + + c := e.NewContext(req, rec) + return c, rec +} + +// NewTestContextWithParams creates a test context with path parameters +func NewTestContextWithParams(method, path string, params map[string]string, body interface{}) (echo.Context, *httptest.ResponseRecorder) { + e := echo.New() + rec := httptest.NewRecorder() + + var req *http.Request + if body != nil { + jsonBody, _ := json.Marshal(body) + req = httptest.NewRequest(method, path, bytes.NewBuffer(jsonBody)) + req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON) + } else { + req = httptest.NewRequest(method, path, nil) + } + + c := e.NewContext(req, rec) + c.SetParamNames(getKeys(params)...) + c.SetParamValues(getValues(params)...) + + return c, rec +} + +// ParseResponse parses the response body into the given struct +func ParseResponse(t *testing.T, rec *httptest.ResponseRecorder, v interface{}) { + t.Helper() + if err := json.Unmarshal(rec.Body.Bytes(), v); err != nil { + t.Fatalf("failed to parse response: %v", err) + } +} + +// AssertStatus asserts the response status code +func AssertStatus(t *testing.T, rec *httptest.ResponseRecorder, expected int) { + t.Helper() + if rec.Code != expected { + t.Errorf("expected status %d, got %d", expected, rec.Code) + } +} + +// AssertJSON asserts the response JSON contains the expected fields +func AssertJSON(t *testing.T, rec *httptest.ResponseRecorder, expected map[string]interface{}) { + t.Helper() + var actual map[string]interface{} + if err := json.Unmarshal(rec.Body.Bytes(), &actual); err != nil { + t.Fatalf("failed to parse response JSON: %v", err) + } + + for key, expectedValue := range expected { + actualValue, exists := actual[key] + if !exists { + t.Errorf("expected key %q not found in response", key) + continue + } + if actualValue != expectedValue { + t.Errorf("for key %q: expected %v, got %v", key, expectedValue, actualValue) + } + } +} + +// Helper functions +func getKeys(m map[string]string) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return keys +} + +func getValues(m map[string]string) []string { + values := make([]string, 0, len(m)) + for _, v := range m { + values = append(values, v) + } + return values +} diff --git a/pkg/testing/mocks.go b/pkg/testing/mocks.go new file mode 100644 index 0000000..8edcb8b --- /dev/null +++ b/pkg/testing/mocks.go @@ -0,0 +1,341 @@ +package testing + +import ( + "context" + "io" + "sync" + "time" +) + +// MockService implements a mock service for testing +type MockService struct { + name string + enabled bool + endpoints []string + handler func() error +} + +// NewMockService creates a new mock service +func NewMockService(name string, enabled bool, endpoints []string) *MockService { + return &MockService{ + name: name, + enabled: enabled, + endpoints: endpoints, + } +} + +func (m *MockService) Name() string { return m.name } +func (m *MockService) WireName() string { return "mock-" + m.name } +func (m *MockService) Enabled() bool { return m.enabled } +func (m *MockService) Endpoints() []string { return m.endpoints } +func (m *MockService) Get() interface{} { return m } + +func (m *MockService) RegisterRoutes(g interface{}) { + // Mock implementation - does nothing +} + +// MockLogger implements a mock logger for testing +type MockLogger struct { + mu sync.RWMutex + logs []LogEntry +} + +// LogEntry represents a single log entry +type LogEntry struct { + Level string + Message string + Args []interface{} +} + +// NewMockLogger creates a new mock logger +func NewMockLogger() *MockLogger { + return &MockLogger{ + logs: make([]LogEntry, 0), + } +} + +func (m *MockLogger) Debug(msg string, args ...interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = append(m.logs, LogEntry{Level: "DEBUG", Message: msg, Args: args}) +} + +func (m *MockLogger) Info(msg string, args ...interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = append(m.logs, LogEntry{Level: "INFO", Message: msg, Args: args}) +} + +func (m *MockLogger) Warn(msg string, args ...interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = append(m.logs, LogEntry{Level: "WARN", Message: msg, Args: args}) +} + +func (m *MockLogger) Error(msg string, args ...interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = append(m.logs, LogEntry{Level: "ERROR", Message: msg, Args: args}) +} + +func (m *MockLogger) Fatal(msg string, args ...interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = append(m.logs, LogEntry{Level: "FATAL", Message: msg, Args: args}) +} + +func (m *MockLogger) GetLogs() []LogEntry { + m.mu.RLock() + defer m.mu.RUnlock() + result := make([]LogEntry, len(m.logs)) + copy(result, m.logs) + return result +} + +func (m *MockLogger) Clear() { + m.mu.Lock() + defer m.mu.Unlock() + m.logs = m.logs[:0] +} + +// MockRedisManager implements a mock Redis manager for testing +type MockRedisManager struct { + mu sync.RWMutex + storage map[string]interface{} +} + +// NewMockRedisManager creates a new mock Redis manager +func NewMockRedisManager() *MockRedisManager { + return &MockRedisManager{ + storage: make(map[string]interface{}), + } +} + +func (m *MockRedisManager) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) error { + m.mu.Lock() + defer m.mu.Unlock() + m.storage[key] = value + return nil +} + +func (m *MockRedisManager) Get(ctx context.Context, key string) (string, error) { + m.mu.RLock() + defer m.mu.RUnlock() + if val, ok := m.storage[key]; ok { + if str, ok := val.(string); ok { + return str, nil + } + } + return "", nil +} + +func (m *MockRedisManager) Delete(ctx context.Context, key string) error { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.storage, key) + return nil +} + +func (m *MockRedisManager) Close() error { + return nil +} + +// MockPostgresManager implements a mock PostgreSQL manager for testing +type MockPostgresManager struct { + mu sync.RWMutex + storage map[string]interface{} +} + +// NewMockPostgresManager creates a new mock PostgreSQL manager +func NewMockPostgresManager() *MockPostgresManager { + return &MockPostgresManager{ + storage: make(map[string]interface{}), + } +} + +func (m *MockPostgresManager) Close() error { + return nil +} + +// MockMongoManager implements a mock MongoDB manager for testing +type MockMongoManager struct { + mu sync.RWMutex + storage map[string]interface{} +} + +// NewMockMongoManager creates a new mock MongoDB manager +func NewMockMongoManager() *MockMongoManager { + return &MockMongoManager{ + storage: make(map[string]interface{}), + } +} + +func (m *MockMongoManager) Close() error { + return nil +} + +// MockKafkaManager implements a mock Kafka manager for testing +type MockKafkaManager struct { + mu sync.RWMutex + messages []MockMessage +} + +// MockMessage represents a Kafka message +type MockMessage struct { + Topic string + Value []byte +} + +// NewMockKafkaManager creates a new mock Kafka manager +func NewMockKafkaManager() *MockKafkaManager { + return &MockKafkaManager{ + messages: make([]MockMessage, 0), + } +} + +func (m *MockKafkaManager) Publish(topic string, value []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + m.messages = append(m.messages, MockMessage{Topic: topic, Value: value}) + return nil +} + +func (m *MockKafkaManager) GetMessages() []MockMessage { + m.mu.RLock() + defer m.mu.RUnlock() + result := make([]MockMessage, len(m.messages)) + copy(result, m.messages) + return result +} + +func (m *MockKafkaManager) Close() error { + return nil +} + +// MockCronManager implements a mock Cron manager for testing +type MockCronManager struct { + mu sync.RWMutex + jobs map[string]func() +} + +// NewMockCronManager creates a new mock Cron manager +func NewMockCronManager() *MockCronManager { + return &MockCronManager{ + jobs: make(map[string]func()), + } +} + +func (m *MockCronManager) AddJob(name string, schedule string, cmd func()) error { + m.mu.Lock() + defer m.mu.Unlock() + m.jobs[name] = cmd + return nil +} + +func (m *MockCronManager) RemoveJob(name string) error { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.jobs, name) + return nil +} + +func (m *MockCronManager) Close() error { + return nil +} + +// MockFileReader implements a mock file reader for testing +type MockFileReader struct { + mu sync.RWMutex + files map[string][]byte +} + +// NewMockFileReader creates a new mock file reader +func NewMockFileReader() *MockFileReader { + return &MockFileReader{ + files: make(map[string][]byte), + } +} + +func (m *MockFileReader) Read(path string) ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + if data, ok := m.files[path]; ok { + return data, nil + } + return nil, io.EOF +} + +func (m *MockFileReader) AddFile(path string, content []byte) { + m.mu.Lock() + defer m.mu.Unlock() + m.files[path] = content +} + +// MockConfig implements a mock configuration for testing +type MockConfig struct { + Services map[string]bool + Redis struct { + Enabled bool + } + Postgres struct { + Enabled bool + } + Mongo struct { + Enabled bool + } + Kafka struct { + Enabled bool + } + Cron struct { + Enabled bool + } +} + +// NewMockConfig creates a new mock configuration +func NewMockConfig() *MockConfig { + return &MockConfig{ + Services: make(map[string]bool), + } +} + +func (m *MockConfig) IsServiceEnabled(name string) bool { + if enabled, ok := m.Services[name]; ok { + return enabled + } + return false +} + +func (m *MockConfig) SetServiceEnabled(name string, enabled bool) { + m.Services[name] = enabled +} + +// TestSuite provides a comprehensive test suite setup +type TestSuite struct { + Logger *MockLogger + Redis *MockRedisManager + Postgres *MockPostgresManager + Mongo *MockMongoManager + Kafka *MockKafkaManager + Cron *MockCronManager + FileReader *MockFileReader + Config *MockConfig +} + +// NewTestSuite creates a new test suite with all mocks +func NewTestSuite() *TestSuite { + return &TestSuite{ + Logger: NewMockLogger(), + Redis: NewMockRedisManager(), + Postgres: NewMockPostgresManager(), + Mongo: NewMockMongoManager(), + Kafka: NewMockKafkaManager(), + Cron: NewMockCronManager(), + FileReader: NewMockFileReader(), + Config: NewMockConfig(), + } +} + +// Cleanup cleans up all test suite resources +func (ts *TestSuite) Cleanup() { + ts.Logger.Clear() +} diff --git a/pkg/tui/charts.go b/pkg/tui/charts.go new file mode 100644 index 0000000..c2328fd --- /dev/null +++ b/pkg/tui/charts.go @@ -0,0 +1,337 @@ +package tui + +import ( + "fmt" + "math" + "strings" +) + +// BarChart represents a simple ASCII bar chart +type BarChart struct { + Title string + Items []ChartItem + Width int + ShowValues bool +} + +// ChartItem represents an item in a chart +type ChartItem struct { + Label string + Value float64 + Color string +} + +// NewBarChart creates a new bar chart +func NewBarChart(title string, width int) *BarChart { + if width <= 0 { + width = 40 + } + return &BarChart{ + Title: title, + Width: width, + ShowValues: true, + } +} + +// AddItem adds an item to the chart +func (bc *BarChart) AddItem(label string, value float64, color string) { + bc.Items = append(bc.Items, ChartItem{ + Label: label, + Value: value, + Color: color, + }) +} + +// Render renders the bar chart as a string +func (bc *BarChart) Render() string { + if len(bc.Items) == 0 { + return "" + } + + var sb strings.Builder + + if bc.Title != "" { + sb.WriteString(bc.Title + "\n") + sb.WriteString(strings.Repeat("─", bc.Width) + "\n") + } + + maxValue := 0.0 + maxLabelLen := 0 + + for _, item := range bc.Items { + if item.Value > maxValue { + maxValue = item.Value + } + if len(item.Label) > maxLabelLen { + maxLabelLen = len(item.Label) + } + } + + if maxValue == 0 { + maxValue = 1 + } + + for _, item := range bc.Items { + label := fmt.Sprintf("%-*s", maxLabelLen, item.Label) + barLen := int((item.Value / maxValue) * float64(bc.Width-maxLabelLen-10)) + + if barLen < 0 { + barLen = 0 + } + + bar := strings.Repeat("█", barLen) + + if bc.ShowValues { + sb.WriteString(fmt.Sprintf("%s %s %.1f\n", label, bar, item.Value)) + } else { + sb.WriteString(fmt.Sprintf("%s %s\n", label, bar)) + } + } + + return sb.String() +} + +// Sparkline represents a simple ASCII sparkline +type Sparkline struct { + Title string + Values []float64 + Width int + Chars []string +} + +// NewSparkline creates a new sparkline +func NewSparkline(title string, width int) *Sparkline { + if width <= 0 { + width = 20 + } + return &Sparkline{ + Title: title, + Width: width, + Chars: []string{"▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"}, + } +} + +// AddValue adds a value to the sparkline +func (s *Sparkline) AddValue(value float64) { + s.Values = append(s.Values, value) +} + +// SetValues sets multiple values +func (s *Sparkline) SetValues(values []float64) { + s.Values = values +} + +// Render renders the sparkline as a string +func (s *Sparkline) Render() string { + if len(s.Values) == 0 { + return "" + } + + var sb strings.Builder + + if s.Title != "" { + sb.WriteString(s.Title + ": ") + } + + values := s.Values + if len(values) > s.Width { + step := float64(len(values)) / float64(s.Width) + sampled := make([]float64, s.Width) + for i := 0; i < s.Width; i++ { + idx := int(float64(i) * step) + if idx >= len(values) { + idx = len(values) - 1 + } + sampled[i] = values[idx] + } + values = sampled + } + + minVal, maxVal := math.Inf(1), math.Inf(-1) + for _, v := range values { + if v < minVal { + minVal = v + } + if v > maxVal { + maxVal = v + } + } + + rangeVal := maxVal - minVal + if rangeVal == 0 { + rangeVal = 1 + } + + for _, v := range values { + normalized := (v - minVal) / rangeVal + idx := int(normalized * float64(len(s.Chars)-1)) + if idx < 0 { + idx = 0 + } + if idx >= len(s.Chars) { + idx = len(s.Chars) - 1 + } + sb.WriteString(s.Chars[idx]) + } + + return sb.String() +} + +// Gauge represents a simple ASCII gauge +type Gauge struct { + Title string + Value float64 + Max float64 + Width int +} + +// NewGauge creates a new gauge +func NewGauge(title string, width int) *Gauge { + if width <= 0 { + width = 20 + } + return &Gauge{ + Title: title, + Width: width, + Max: 100, + } +} + +// SetValue sets the gauge value +func (g *Gauge) SetValue(value float64) { + g.Value = value +} + +// SetMax sets the gauge max value +func (g *Gauge) SetMax(max float64) { + g.Max = max +} + +// Render renders the gauge as a string +func (g *Gauge) Render() string { + var sb strings.Builder + + if g.Title != "" { + sb.WriteString(g.Title + ": ") + } + + percentage := g.Value / g.Max + if percentage < 0 { + percentage = 0 + } + if percentage > 1 { + percentage = 1 + } + + filled := int(percentage * float64(g.Width)) + empty := g.Width - filled + + sb.WriteString("[") + sb.WriteString(strings.Repeat("█", filled)) + sb.WriteString(strings.Repeat("░", empty)) + sb.WriteString("]") + + sb.WriteString(fmt.Sprintf(" %.1f%%", percentage*100)) + + return sb.String() +} + +// Table represents a simple ASCII table +type Table struct { + Title string + Headers []string + Rows [][]string + Width int +} + +// NewTable creates a new table +func NewTable(title string, width int) *Table { + if width <= 0 { + width = 80 + } + return &Table{ + Title: title, + Width: width, + } +} + +// SetHeaders sets the table headers +func (t *Table) SetHeaders(headers ...string) { + t.Headers = headers +} + +// AddRow adds a row to the table +func (t *Table) AddRow(cells ...string) { + t.Rows = append(t.Rows, cells) +} + +// Render renders the table as a string +func (t *Table) Render() string { + var sb strings.Builder + + if t.Title != "" { + sb.WriteString(t.Title + "\n") + } + + if len(t.Headers) == 0 { + return sb.String() + } + + colWidths := make([]int, len(t.Headers)) + for i, h := range t.Headers { + colWidths[i] = len(h) + } + + for _, row := range t.Rows { + for i, cell := range row { + if i < len(colWidths) && len(cell) > colWidths[i] { + colWidths[i] = len(cell) + } + } + } + + sb.WriteString("┌") + for i, w := range colWidths { + sb.WriteString(strings.Repeat("─", w+2)) + if i < len(colWidths)-1 { + sb.WriteString("┬") + } + } + sb.WriteString("┐\n") + + sb.WriteString("│") + for i, h := range t.Headers { + sb.WriteString(fmt.Sprintf(" %-*s │", colWidths[i], h)) + } + sb.WriteString("\n") + + sb.WriteString("├") + for i, w := range colWidths { + sb.WriteString(strings.Repeat("─", w+2)) + if i < len(colWidths)-1 { + sb.WriteString("┼") + } + } + sb.WriteString("┤\n") + + for _, row := range t.Rows { + sb.WriteString("│") + for i, cell := range row { + if i < len(colWidths) { + sb.WriteString(fmt.Sprintf(" %-*s │", colWidths[i], cell)) + } + } + sb.WriteString("\n") + } + + sb.WriteString("└") + for i, w := range colWidths { + sb.WriteString(strings.Repeat("─", w+2)) + if i < len(colWidths)-1 { + sb.WriteString("┴") + } + } + sb.WriteString("┘\n") + + return sb.String() +} diff --git a/pkg/webhook/handler.go b/pkg/webhook/handler.go new file mode 100644 index 0000000..26b16d6 --- /dev/null +++ b/pkg/webhook/handler.go @@ -0,0 +1,258 @@ +package webhook + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "sync" + "time" +) + +// WebhookConfig holds webhook configuration +type WebhookConfig struct { + URL string + Secret string + Timeout time.Duration + MaxRetries int + RetryDelay time.Duration + Headers map[string]string + Enabled bool +} + +// DefaultWebhookConfig returns default webhook configuration +func DefaultWebhookConfig() WebhookConfig { + return WebhookConfig{ + Timeout: 30 * time.Second, + MaxRetries: 3, + RetryDelay: 1 * time.Second, + Headers: make(map[string]string), + Enabled: true, + } +} + +// WebhookEvent represents a webhook event +type WebhookEvent struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + Data map[string]interface{} `json:"data"` + Signature string `json:"signature,omitempty"` +} + +// WebhookResponse represents a webhook response +type WebhookResponse struct { + StatusCode int + Body string + Headers map[string]string + Duration time.Duration +} + +// WebhookManager manages webhooks +type WebhookManager struct { + config WebhookConfig + client *http.Client + mu sync.RWMutex + handlers map[string][]func(event WebhookEvent) +} + +// NewWebhookManager creates a new webhook manager +func NewWebhookManager(config WebhookConfig) *WebhookManager { + return &WebhookManager{ + config: config, + client: &http.Client{ + Timeout: config.Timeout, + }, + handlers: make(map[string][]func(event WebhookEvent)), + } +} + +// Register registers a webhook handler for an event type +func (wm *WebhookManager) Register(eventType string, handler func(event WebhookEvent)) { + wm.mu.Lock() + defer wm.mu.Unlock() + + wm.handlers[eventType] = append(wm.handlers[eventType], handler) +} + +// Trigger triggers webhook handlers for an event +func (wm *WebhookManager) Trigger(event WebhookEvent) { + wm.mu.RLock() + handlers := wm.handlers[event.Type] + wm.mu.RUnlock() + + for _, handler := range handlers { + go handler(event) + } +} + +// Send sends a webhook event to a URL +func (wm *WebhookManager) Send(ctx context.Context, event WebhookEvent) (*WebhookResponse, error) { + if !wm.config.Enabled { + return nil, fmt.Errorf("webhook is disabled") + } + + payload, err := json.Marshal(event) + if err != nil { + return nil, err + } + + // Sign the payload + if wm.config.Secret != "" { + signature := wm.SignPayload(payload) + event.Signature = signature + payload, _ = json.Marshal(event) + } + + var lastErr error + for attempt := 0; attempt <= wm.config.MaxRetries; attempt++ { + if attempt > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(wm.config.RetryDelay): + } + } + + resp, err := wm.doRequest(ctx, payload) + if err != nil { + lastErr = err + continue + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return resp, nil + } + + lastErr = fmt.Errorf("webhook returned status %d", resp.StatusCode) + } + + return nil, lastErr +} + +// doRequest performs the HTTP request +func (wm *WebhookManager) doRequest(ctx context.Context, payload []byte) (*WebhookResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, wm.config.URL, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", "Stackyard-Webhook/1.0") + + for key, value := range wm.config.Headers { + req.Header.Set(key, value) + } + + start := time.Now() + resp, err := wm.client.Do(req) + duration := time.Since(start) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + headers := make(map[string]string) + for key, values := range resp.Header { + if len(values) > 0 { + headers[key] = values[0] + } + } + + return &WebhookResponse{ + StatusCode: resp.StatusCode, + Body: string(body), + Headers: headers, + Duration: duration, + }, nil +} + +// SignPayload signs a payload with HMAC-SHA256 +func (wm *WebhookManager) SignPayload(payload []byte) string { + h := hmac.New(sha256.New, []byte(wm.config.Secret)) + h.Write(payload) + return hex.EncodeToString(h.Sum(nil)) +} + +// VerifySignature verifies a webhook signature +func VerifySignature(payload []byte, signature, secret string) bool { + h := hmac.New(sha256.New, []byte(secret)) + h.Write(payload) + expected := hex.EncodeToString(h.Sum(nil)) + return hmac.Equal([]byte(signature), []byte(expected)) +} + +// WebhookHandler handles incoming webhook requests +type WebhookHandler struct { + manager *WebhookManager +} + +// NewWebhookHandler creates a new webhook handler +func NewWebhookHandler(manager *WebhookManager) *WebhookHandler { + return &WebhookHandler{ + manager: manager, + } +} + +// Handle handles an incoming webhook request +func (wh *WebhookHandler) Handle(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read body", http.StatusBadRequest) + return + } + + // Verify signature if secret is set + if wh.manager.config.Secret != "" { + signature := r.Header.Get("X-Webhook-Signature") + if !VerifySignature(body, signature, wh.manager.config.Secret) { + http.Error(w, "Invalid signature", http.StatusUnauthorized) + return + } + } + + var event WebhookEvent + if err := json.Unmarshal(body, &event); err != nil { + http.Error(w, "Invalid JSON", http.StatusBadRequest) + return + } + + // Trigger handlers + wh.manager.Trigger(event) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"status":"ok"}`)) +} + +// GetStats returns webhook statistics +func (wm *WebhookManager) GetStats() map[string]interface{} { + wm.mu.RLock() + defer wm.mu.RUnlock() + + eventTypes := make([]string, 0, len(wm.handlers)) + for eventType := range wm.handlers { + eventTypes = append(eventTypes, eventType) + } + + return map[string]interface{}{ + "enabled": wm.config.Enabled, + "event_types": eventTypes, + "url": wm.config.URL, + } +} diff --git a/pkg/websocket/handler.go b/pkg/websocket/handler.go new file mode 100644 index 0000000..d3c078e --- /dev/null +++ b/pkg/websocket/handler.go @@ -0,0 +1,224 @@ +package websocket + +import ( + "encoding/json" + "log" + "net/http" + "sync" + + "github.com/gorilla/websocket" + "github.com/labstack/echo/v4" +) + +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return true + }, +} + +// Client represents a WebSocket client +type Client struct { + ID string + Conn *websocket.Conn + Send chan []byte + Hub *Hub +} + +// Hub manages WebSocket connections +type Hub struct { + clients map[*Client]bool + broadcast chan []byte + register chan *Client + unregister chan *Client + mu sync.RWMutex +} + +// Message represents a WebSocket message +type Message struct { + Type string `json:"type"` + Payload interface{} `json:"payload"` + Room string `json:"room,omitempty"` +} + +// NewHub creates a new WebSocket hub +func NewHub() *Hub { + return &Hub{ + clients: make(map[*Client]bool), + broadcast: make(chan []byte), + register: make(chan *Client), + unregister: make(chan *Client), + } +} + +// Run starts the hub +func (h *Hub) Run() { + for { + select { + case client := <-h.register: + h.mu.Lock() + h.clients[client] = true + h.mu.Unlock() + log.Printf("Client connected: %s", client.ID) + + case client := <-h.unregister: + h.mu.Lock() + if _, ok := h.clients[client]; ok { + delete(h.clients, client) + close(client.Send) + } + h.mu.Unlock() + log.Printf("Client disconnected: %s", client.ID) + + case message := <-h.broadcast: + h.mu.RLock() + for client := range h.clients { + select { + case client.Send <- message: + default: + close(client.Send) + delete(h.clients, client) + } + } + h.mu.RUnlock() + } + } +} + +// Broadcast sends a message to all clients +func (h *Hub) Broadcast(message []byte) { + h.broadcast <- message +} + +// SendToClient sends a message to a specific client +func (h *Hub) SendToClient(clientID string, message []byte) { + h.mu.RLock() + defer h.mu.RUnlock() + + for client := range h.clients { + if client.ID == clientID { + select { + case client.Send <- message: + default: + close(client.Send) + delete(h.clients, client) + } + break + } + } +} + +// GetConnectedClients returns the number of connected clients +func (h *Hub) GetConnectedClients() int { + h.mu.RLock() + defer h.mu.RUnlock() + return len(h.clients) +} + +// HandleWebSocket handles WebSocket connections +func HandleWebSocket(hub *Hub) echo.HandlerFunc { + return func(c echo.Context) error { + conn, err := upgrader.Upgrade(c.Response(), c.Request(), nil) + if err != nil { + log.Printf("WebSocket upgrade error: %v", err) + return err + } + + clientID := c.QueryParam("client_id") + if clientID == "" { + clientID = c.RealIP() + } + + client := &Client{ + ID: clientID, + Conn: conn, + Send: make(chan []byte, 256), + Hub: hub, + } + + hub.register <- client + + go client.writePump() + go client.readPump() + + return nil + } +} + +// readPump reads messages from the WebSocket connection +func (c *Client) readPump() { + defer func() { + c.Hub.unregister <- c + c.Conn.Close() + }() + + for { + _, message, err := c.Conn.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { + log.Printf("WebSocket read error: %v", err) + } + break + } + + var msg Message + if err := json.Unmarshal(message, &msg); err != nil { + log.Printf("JSON unmarshal error: %v", err) + continue + } + + c.handleMessage(msg) + } +} + +// writePump writes messages to the WebSocket connection +func (c *Client) writePump() { + defer c.Conn.Close() + + for message := range c.Send { + if err := c.Conn.WriteMessage(websocket.TextMessage, message); err != nil { + log.Printf("WebSocket write error: %v", err) + break + } + } +} + +// handleMessage handles incoming messages +func (c *Client) handleMessage(msg Message) { + switch msg.Type { + case "ping": + response := Message{ + Type: "pong", + Payload: "pong", + } + data, _ := json.Marshal(response) + c.Send <- data + + case "broadcast": + data, _ := json.Marshal(msg) + c.Hub.Broadcast(data) + + default: + log.Printf("Unknown message type: %s", msg.Type) + } +} + +// BroadcastMessage broadcasts a message to all connected clients +func BroadcastMessage(hub *Hub, messageType string, payload interface{}) { + msg := Message{ + Type: messageType, + Payload: payload, + } + data, err := json.Marshal(msg) + if err != nil { + log.Printf("JSON marshal error: %v", err) + return + } + hub.Broadcast(data) +} + +// GetHubStats returns hub statistics +func GetHubStats(hub *Hub) map[string]interface{} { + return map[string]interface{}{ + "connected_clients": hub.GetConnectedClients(), + } +} diff --git a/tests/infrastructure/afero_test.go b/tests/infrastructure/afero_test.go new file mode 100644 index 0000000..07875bd --- /dev/null +++ b/tests/infrastructure/afero_test.go @@ -0,0 +1,190 @@ +package infrastructure_test + +import ( + "embed" + "strings" + "testing" + + "stackyard/pkg/infrastructure" +) + +//go:embed testdata/config.yaml testdata/README.md testdata/test.txt +var testFS embed.FS + +func TestAferoManager(t *testing.T) { + // Test alias configuration + aliasMap := map[string]string{ + "config": "all:testdata/config.yaml", + "readme": "all:testdata/README.md", + "test": "all:testdata/test.txt", + } + + // Test initialization + t.Run("Init", func(t *testing.T) { + infrastructure.ResetForTesting() + infrastructure.Init(testFS, aliasMap, true) + + if infrastructure.GetFileSystem() == nil { + t.Fatal("Expected filesystem to be initialized") + } + + if len(infrastructure.GetAliases()) != 3 { + t.Errorf("Expected 3 aliases, got %d", len(infrastructure.GetAliases())) + } + }) + + // Test Exists function + t.Run("Exists", func(t *testing.T) { + // Reset and re-initialize for this test to ensure singleton is set + infrastructure.ResetForTesting() + infrastructure.Init(testFS, aliasMap, true) + + // Debug: Check if aliases are set + aliases := infrastructure.GetAliases() + t.Logf("Aliases after Init: %v", aliases) + + // Debug: Check if filesystem is set + fs := infrastructure.GetFileSystem() + t.Logf("Filesystem is nil: %v", fs == nil) + + // Test non-existing alias + if infrastructure.Exists("nonexistent") { + t.Error("Expected 'nonexistent' alias to not exist") + } + + // Test existing alias + if !infrastructure.Exists("config") { + t.Error("Expected 'config' alias to exist") + } + }) + + // Test GetAliases function + t.Run("GetAliases", func(t *testing.T) { + aliases := infrastructure.GetAliases() + if len(aliases) != 3 { + t.Errorf("Expected 3 aliases, got %d. Aliases: %v", len(aliases), aliases) + } + + if aliases["config"] != "all:testdata/config.yaml" { + t.Errorf("Expected config alias to be 'all:testdata/config.yaml', got %s", aliases["config"]) + } + + if aliases["readme"] != "all:testdata/README.md" { + t.Errorf("Expected readme alias to be 'all:testdata/README.md', got %s", aliases["readme"]) + } + + if aliases["test"] != "all:testdata/test.txt" { + t.Errorf("Expected test alias to be 'all:testdata/test.txt', got %s", aliases["test"]) + } + }) + + // Test GetFileSystem function + t.Run("GetFileSystem", func(t *testing.T) { + fs := infrastructure.GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be returned") + } + }) + + // Test Read function + t.Run("Read", func(t *testing.T) { + content, err := infrastructure.Read("test") + if err != nil { + t.Errorf("Expected to read file, got error: %v", err) + } + if !strings.Contains(string(content), "test content") { + t.Errorf("Expected file content to contain 'test content', got: %s", string(content)) + } + }) + + // Test Stream function + t.Run("Stream", func(t *testing.T) { + stream, err := infrastructure.Stream("test") + if err != nil { + t.Errorf("Expected to stream file, got error: %v", err) + } + if stream == nil { + t.Error("Expected stream to be returned") + } + stream.Close() + }) + + // Test development mode (CopyOnWriteFs) + t.Run("DevelopmentMode", func(t *testing.T) { + // Reset and re-initialize in development mode + infrastructure.ResetForTesting() + infrastructure.Init(testFS, aliasMap, true) + + // Should be CopyOnWriteFs in development mode + fs := infrastructure.GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be initialized") + } + }) + + // Test production mode (ReadOnlyFs) + t.Run("ProductionMode", func(t *testing.T) { + // Reset and re-initialize in production mode + infrastructure.ResetForTesting() + aliasMap := map[string]string{ + "test": "all:testdata/test.txt", + } + infrastructure.Init(testFS, aliasMap, false) + + // Should be ReadOnlyFs in production mode + fs := infrastructure.GetFileSystem() + if fs == nil { + t.Error("Expected filesystem to be initialized") + } + }) + + // Test error handling + t.Run("ErrorHandling", func(t *testing.T) { + // Reset for this test + infrastructure.ResetForTesting() + + // Test Read without initialization + _, err := infrastructure.Read("test") + if err == nil { + t.Error("Expected error when reading without initialization") + } + if !strings.Contains(err.Error(), "not initialized") { + t.Errorf("Expected 'not initialized' error, got: %v", err) + } + + // Test Stream without initialization + _, err = infrastructure.Stream("test") + if err == nil { + t.Error("Expected error when streaming without initialization") + } + if !strings.Contains(err.Error(), "not initialized") { + t.Errorf("Expected 'not initialized' error, got: %v", err) + } + + // Test Exists without initialization + if infrastructure.Exists("test") { + t.Error("Expected false when checking existence without initialization") + } + + // Now initialize and test with non-existent alias + infrastructure.Init(testFS, aliasMap, true) + + // Test Read with non-existent alias + _, err = infrastructure.Read("nonexistent") + if err == nil { + t.Error("Expected error when reading non-existent alias") + } + if !strings.Contains(err.Error(), "not found") { + t.Errorf("Expected 'not found' error, got: %v", err) + } + + // Test Stream with non-existent alias + _, err = infrastructure.Stream("nonexistent") + if err == nil { + t.Error("Expected error when streaming non-existent alias") + } + if !strings.Contains(err.Error(), "not found") { + t.Errorf("Expected 'not found' error, got: %v", err) + } + }) +} diff --git a/tests/infrastructure/testdata/README.md b/tests/infrastructure/testdata/README.md new file mode 100644 index 0000000..e8f9bf7 --- /dev/null +++ b/tests/infrastructure/testdata/README.md @@ -0,0 +1 @@ +# Test README diff --git a/tests/infrastructure/testdata/config.yaml b/tests/infrastructure/testdata/config.yaml new file mode 100644 index 0000000..c75dc06 --- /dev/null +++ b/tests/infrastructure/testdata/config.yaml @@ -0,0 +1 @@ +# Test Config diff --git a/tests/infrastructure/testdata/test.txt b/tests/infrastructure/testdata/test.txt new file mode 100644 index 0000000..d670460 --- /dev/null +++ b/tests/infrastructure/testdata/test.txt @@ -0,0 +1 @@ +test content diff --git a/tests/services/products_service_test.go b/tests/services/products_service_test.go new file mode 100644 index 0000000..2f0158b --- /dev/null +++ b/tests/services/products_service_test.go @@ -0,0 +1,69 @@ +package services_test + +import ( + "testing" + + "stackyard/internal/services/modules" + testhelpers "stackyard/pkg/testing" +) + +func TestNewProductsService(t *testing.T) { + service := modules.NewProductsService(true) + if service == nil { + t.Fatal("expected service to be created") + } + if !service.Enabled() { + t.Error("expected service to be enabled") + } + if service.Name() != "Products Service" { + t.Errorf("expected name 'Products Service', got %q", service.Name()) + } + if service.WireName() != "products-service" { + t.Errorf("expected wire name 'products-service', got %q", service.WireName()) + } +} + +func TestProductsServiceDisabled(t *testing.T) { + service := modules.NewProductsService(false) + if service.Enabled() { + t.Error("expected service to be disabled") + } +} + +func TestProductsServiceEndpoints(t *testing.T) { + service := modules.NewProductsService(true) + endpoints := service.Endpoints() + if len(endpoints) != 1 { + t.Fatalf("expected 1 endpoint, got %d", len(endpoints)) + } + if endpoints[0] != "/products" { + t.Errorf("expected endpoint '/products', got %q", endpoints[0]) + } +} + +func TestProductsServiceRegisterRoutes(t *testing.T) { + service := modules.NewProductsService(true) + defer func() { + if r := recover(); r != nil { + t.Errorf("RegisterRoutes panicked: %v", r) + } + }() + + e := testhelpers.NewTestEcho() + g := e.Group("/api/v1") + service.RegisterRoutes(g) +} + +func BenchmarkProductsServiceName(b *testing.B) { + service := modules.NewProductsService(true) + for i := 0; i < b.N; i++ { + _ = service.Name() + } +} + +func BenchmarkProductsServiceEnabled(b *testing.B) { + service := modules.NewProductsService(true) + for i := 0; i < b.N; i++ { + _ = service.Enabled() + } +} diff --git a/tests/services/users_service_test.go b/tests/services/users_service_test.go new file mode 100644 index 0000000..0185c46 --- /dev/null +++ b/tests/services/users_service_test.go @@ -0,0 +1,281 @@ +package services_test + +import ( + "encoding/json" + "net/http" + "testing" + + "stackyard/internal/services/modules" + "stackyard/pkg/response" + testhelpers "stackyard/pkg/testing" +) + +func TestNewUsersService(t *testing.T) { + service := modules.NewUsersService(true) + if service == nil { + t.Fatal("expected service to be created") + } + if !service.Enabled() { + t.Error("expected service to be enabled") + } + if service.Name() != "Users Service" { + t.Errorf("expected name 'Users Service', got %q", service.Name()) + } +} + +func TestUsersServiceDisabled(t *testing.T) { + service := modules.NewUsersService(false) + if service.Enabled() { + t.Error("expected service to be disabled") + } +} + +func TestGetUsers(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContext(http.MethodGet, "/api/v1/users", nil) + + err := service.GetUsers(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusOK) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if !resp.Success { + t.Error("expected success to be true") + } + if resp.Data == nil { + t.Error("expected data to be present") + } +} + +func TestGetUsersWithPagination(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContextWithQuery(http.MethodGet, "/api/v1/users", map[string]string{ + "page": "1", + "per_page": "10", + }) + + err := service.GetUsers(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusOK) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if resp.Meta == nil { + t.Error("expected meta to be present") + } +} + +func TestGetUser(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContextWithParams(http.MethodGet, "/api/v1/users/:id", map[string]string{ + "id": "1", + }, nil) + + err := service.GetUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusOK) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if !resp.Success { + t.Error("expected success to be true") + } +} + +func TestGetUserNotFound(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContextWithParams(http.MethodGet, "/api/v1/users/:id", map[string]string{ + "id": "999", + }, nil) + + err := service.GetUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusNotFound) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if resp.Success { + t.Error("expected success to be false") + } +} + +func TestCreateUser(t *testing.T) { + service := modules.NewUsersService(true) + body := modules.CreateUserRequest{ + Username: "testuser", + Email: "test@example.com", + FullName: "Test User", + } + c, rec := testhelpers.NewTestContext(http.MethodPost, "/api/v1/users", body) + + err := service.CreateUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusCreated) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if !resp.Success { + t.Error("expected success to be true") + } + if resp.Message != "User created successfully" { + t.Errorf("expected message 'User created successfully', got %q", resp.Message) + } +} + +func TestCreateUserValidation(t *testing.T) { + service := modules.NewUsersService(true) + body := modules.CreateUserRequest{ + Username: "", + Email: "invalid-email", + FullName: "T", + } + c, rec := testhelpers.NewTestContext(http.MethodPost, "/api/v1/users", body) + + err := service.CreateUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusUnprocessableEntity) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if resp.Success { + t.Error("expected success to be false") + } +} + +func TestUpdateUser(t *testing.T) { + service := modules.NewUsersService(true) + body := modules.UpdateUserRequest{ + Username: "updateduser", + Email: "updated@example.com", + FullName: "Updated User", + Status: "active", + } + c, rec := testhelpers.NewTestContextWithParams(http.MethodPut, "/api/v1/users/:id", map[string]string{ + "id": "1", + }, body) + + err := service.UpdateUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusOK) + + var resp response.Response + testhelpers.ParseResponse(t, rec, &resp) + + if !resp.Success { + t.Error("expected success to be true") + } +} + +func TestDeleteUser(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContextWithParams(http.MethodDelete, "/api/v1/users/:id", map[string]string{ + "id": "1", + }, nil) + + err := service.DeleteUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusNoContent) +} + +func TestDeleteUserNotFound(t *testing.T) { + service := modules.NewUsersService(true) + c, rec := testhelpers.NewTestContextWithParams(http.MethodDelete, "/api/v1/users/:id", map[string]string{ + "id": "999", + }, nil) + + err := service.DeleteUser(c) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + testhelpers.AssertStatus(t, rec, http.StatusNotFound) +} + +func TestUserStruct(t *testing.T) { + user := modules.User{ + ID: "1", + Username: "testuser", + Email: "test@example.com", + Status: "active", + CreatedAt: 1234567890, + } + + jsonData, err := json.Marshal(user) + if err != nil { + t.Fatalf("failed to marshal user: %v", err) + } + + var decoded modules.User + if err := json.Unmarshal(jsonData, &decoded); err != nil { + t.Fatalf("failed to unmarshal user: %v", err) + } + + if decoded.ID != user.ID { + t.Errorf("expected ID %q, got %q", user.ID, decoded.ID) + } + if decoded.Username != user.Username { + t.Errorf("expected Username %q, got %q", user.Username, decoded.Username) + } +} + +func BenchmarkGetUsers(b *testing.B) { + service := modules.NewUsersService(true) + for i := 0; i < b.N; i++ { + c, _ := testhelpers.NewTestContext(http.MethodGet, "/api/v1/users", nil) + _ = service.GetUsers(c) + } +} + +func BenchmarkGetUser(b *testing.B) { + service := modules.NewUsersService(true) + for i := 0; i < b.N; i++ { + c, _ := testhelpers.NewTestContextWithParams(http.MethodGet, "/api/v1/users/:id", map[string]string{ + "id": "1", + }, nil) + _ = service.GetUser(c) + } +} + +func BenchmarkCreateUser(b *testing.B) { + service := modules.NewUsersService(true) + body := modules.CreateUserRequest{ + Username: "benchuser", + Email: "bench@example.com", + FullName: "Benchmark User", + } + for i := 0; i < b.N; i++ { + c, _ := testhelpers.NewTestContext(http.MethodPost, "/api/v1/users", body) + _ = service.CreateUser(c) + } +} From 48a1604383fdcd7ca9ecb457575c6513ace5f229 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sat, 28 Mar 2026 11:45:50 +0700 Subject: [PATCH 14/18] docs: add Swagger API documentation and refactor flag parsing Add comprehensive Swagger/OpenAPI annotations to API endpoints including Grafana and MongoDB services for improved documentation. Refactor command-line flag parsing to use parameter utility, adding support for port, verbose, and environment flags. --- cmd/app/main.go | 80 +- docs/docs.go | 2097 +++++++++++++++++ docs/swagger.json | 2073 ++++++++++++++++ docs/swagger.yaml | 1582 +++++++++++-- docs_wiki/API_DOCS.md | 1120 +++++++++ docs_wiki/blueprint/blueprint.txt | 18 + .../services/modules/broadcast_service.go | 10 + internal/services/modules/cache_service.go | 67 +- .../services/modules/encryption_service.go | 39 + internal/services/modules/grafana_service.go | 95 +- internal/services/modules/mongodb_service.go | 99 +- .../services/modules/multi_tenant_service.go | 69 +- internal/services/modules/tasks_service.go | 43 + pkg/utils/parameter.go | 17 +- scripts/swagger/swagger.go | 565 +++++ 15 files changed, 7665 insertions(+), 309 deletions(-) create mode 100644 docs/docs.go create mode 100644 docs/swagger.json create mode 100644 docs_wiki/API_DOCS.md create mode 100644 scripts/swagger/swagger.go diff --git a/cmd/app/main.go b/cmd/app/main.go index ea1824d..c9af7a4 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -1,19 +1,38 @@ package main import ( - "flag" "fmt" "net/url" "os" + + "stackyard/pkg/utils" ) +// @title Stackyard API +// @version 1.0 +// @description Stackyard API Documentation - A modular Go API framework +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.email admin@stackyard.com + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.apikey ApiKeyAuth +// @in header +// @name Authorization + // main is the entry point of the application func main() { // Parse command line flags - configURL := parseFlags() + flags := parseFlags() // Create configuration manager - configManager := NewConfigManager(configURL) + configManager := NewConfigManager(flags.ConfigURL) // Create application with dependency injection app := NewApplication(configManager) @@ -25,20 +44,47 @@ func main() { } } -// parseFlags parses command line flags using standard Go flag package -func parseFlags() string { - var configURL string - flag.StringVar(&configURL, "c", "", "URL to load configuration from (YAML format)") - flag.Parse() - - // Validate URL if provided - if configURL != "" { - if _, err := url.ParseRequestURI(configURL); err != nil { - fmt.Printf("Invalid config URL format: %v\n", err) - fmt.Println("Usage: stackyard [-c config-url]") - os.Exit(1) - } +// parseFlags parses command line flags using the parameter utility +func parseFlags() *utils.ParsedFlags { + // Define flag definitions + flagDefinitions := []utils.FlagDefinition{ + { + Name: "c", + DefaultValue: "", + Description: "URL to load configuration from (YAML format)", + Validator: func(value interface{}) error { + if urlStr, ok := value.(string); ok && urlStr != "" { + if _, err := url.ParseRequestURI(urlStr); err != nil { + return fmt.Errorf("invalid config URL format: %w", err) + } + } + return nil + }, + }, + { + Name: "port", + DefaultValue: "", + Description: "Server port (overrides config)", + }, + { + Name: "verbose", + DefaultValue: false, + Description: "Enable verbose logging", + }, + { + Name: "env", + DefaultValue: "", + Description: "Environment (development/staging/production)", + }, + } + + // Parse flags using the utility + flags, err := utils.ParseFlags(flagDefinitions) + if err != nil { + fmt.Printf("Error parsing flags: %v\n", err) + utils.PrintUsage(flagDefinitions, AppName) + os.Exit(1) } - return configURL + return flags } diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 0000000..d17a57b --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,2097 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "API Support", + "email": "admin@stackyard.com" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/cache/{key}": { + "get": { + "description": "Retrieve a cached value by its key", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cache" + ], + "summary": "Get cached value by key", + "parameters": [ + { + "type": "string", + "description": "Cache key", + "name": "key", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Key not found or expired", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Store a value in the cache with optional TTL", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cache" + ], + "summary": "Set cached value", + "parameters": [ + { + "type": "string", + "description": "Cache key", + "name": "key", + "in": "path", + "required": true + }, + { + "description": "Cache request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.CacheRequest" + } + } + ], + "responses": { + "200": { + "description": "Cached successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid body", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/decrypt": { + "post": { + "description": "Decrypt encrypted data using AES-256-GCM", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Decrypt data", + "parameters": [ + { + "description": "Data to decrypt", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.DecryptRequest" + } + } + ], + "responses": { + "200": { + "description": "Data decrypted successfully", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.DecryptResponse" + } + } + } + ] + } + }, + "400": { + "description": "Invalid request body or decryption failed", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/encrypt": { + "post": { + "description": "Encrypt plaintext data using AES-256-GCM", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Encrypt data", + "parameters": [ + { + "description": "Data to encrypt", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.EncryptRequest" + } + } + ], + "responses": { + "200": { + "description": "Data encrypted successfully", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.EncryptResponse" + } + } + } + ] + } + }, + "400": { + "description": "Invalid request body", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Encryption failed", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/key-rotate": { + "post": { + "description": "Rotate the encryption key with a new key", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Rotate encryption key", + "parameters": [ + { + "description": "New encryption key", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.KeyRotateRequest" + } + } + ], + "responses": { + "200": { + "description": "Key rotation successful", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid request body", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/status": { + "get": { + "description": "Get the current status and configuration of the encryption service", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Get encryption service status", + "responses": { + "200": { + "description": "Encryption service status", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.StatusResponse" + } + } + } + ] + } + } + } + } + }, + "/events/stream/{stream_id}": { + "get": { + "description": "Subscribe to Server-Sent Events (SSE) for a specific stream", + "consumes": [ + "application/json" + ], + "produces": [ + "text/event-stream" + ], + "tags": [ + "events" + ], + "summary": "Stream events from a specific stream", + "parameters": [ + { + "type": "string", + "description": "Stream ID", + "name": "stream_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "SSE stream", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Stream not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/annotations": { + "post": { + "description": "Create a new Grafana annotation", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana annotation", + "parameters": [ + { + "description": "Annotation data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaAnnotation" + } + } + ], + "responses": { + "201": { + "description": "Annotation created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid annotation data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create annotation", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/dashboards": { + "get": { + "description": "List all Grafana dashboards with pagination", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "List Grafana dashboards", + "parameters": [ + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 50, + "description": "Items per page", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Dashboards retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to list dashboards", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new Grafana dashboard", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana dashboard", + "parameters": [ + { + "description": "Dashboard data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDashboard" + } + } + ], + "responses": { + "201": { + "description": "Dashboard created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid dashboard data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/dashboards/{uid}": { + "get": { + "description": "Retrieve a Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Get Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Dashboard retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Dashboard UID is required", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Dashboard not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update an existing Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Update Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + }, + { + "description": "Dashboard data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDashboard" + } + } + ], + "responses": { + "200": { + "description": "Dashboard updated successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid dashboard data or missing UID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Delete Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Dashboard deleted successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Dashboard UID is required", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/datasources": { + "post": { + "description": "Create a new Grafana data source", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana data source", + "parameters": [ + { + "description": "Data source configuration", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDataSource" + } + } + ], + "responses": { + "201": { + "description": "Data source created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid data source data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create data source", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/health": { + "get": { + "description": "Check Grafana service health", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Get Grafana health status", + "responses": { + "200": { + "description": "Grafana health check successful", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "503": { + "description": "Grafana is not available", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/orders/{tenant}": { + "get": { + "description": "Retrieve all orders from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "List orders by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Orders retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new order in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Create order in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "description": "Order data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.MultiTenantOrder" + } + } + ], + "responses": { + "201": { + "description": "Order created in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/orders/{tenant}/{id}": { + "get": { + "description": "Retrieve a specific order from a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Get order by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Order retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update an order in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Update order in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Order update data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.MultiTenantOrder" + } + } + ], + "responses": { + "200": { + "description": "Order updated in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID or update data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete an order from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Delete order from tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Order deleted from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products": { + "get": { + "description": "Get a list of products", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get products", + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}": { + "get": { + "description": "Retrieve all products from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "List products by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Products retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new product in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Create product in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "description": "Product data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Product" + } + } + ], + "responses": { + "201": { + "description": "Product created in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/analytics": { + "get": { + "description": "Get analytics for products in a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get product analytics", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product analytics for tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to aggregate product analytics", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/search": { + "get": { + "description": "Search products with various filters in a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Search products in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Search by product name", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "Filter by category", + "name": "category", + "in": "query" + }, + { + "type": "boolean", + "description": "Filter by stock availability", + "name": "in_stock", + "in": "query" + }, + { + "type": "number", + "description": "Minimum price filter", + "name": "min_price", + "in": "query" + }, + { + "type": "number", + "description": "Maximum price filter", + "name": "max_price", + "in": "query" + }, + { + "type": "string", + "description": "Filter by tags (comma-separated)", + "name": "tags", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Products found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to search products", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/{id}": { + "get": { + "description": "Retrieve a specific product from a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get product by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update a product in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Update product in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Product update data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": true + } + } + ], + "responses": { + "200": { + "description": "Product updated in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format or update data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a product from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Delete product from tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product deleted from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/tasks": { + "get": { + "description": "Retrieve all tasks from the database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "List all tasks", + "responses": { + "200": { + "description": "Tasks retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to retrieve tasks", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new task in the database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Create a new task", + "parameters": [ + { + "description": "Task to create", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Task" + } + } + ], + "responses": { + "201": { + "description": "Task created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid input", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/tasks/{id}": { + "put": { + "description": "Update an existing task by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Update a task", + "parameters": [ + { + "type": "integer", + "description": "Task ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Task data to update", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Task" + } + } + ], + "responses": { + "200": { + "description": "Task updated successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid input", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Task not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a task by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Delete a task", + "parameters": [ + { + "type": "integer", + "description": "Task ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Task deleted successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + } + }, + "definitions": { + "infrastructure.GrafanaAnnotation": { + "type": "object", + "properties": { + "dashboardId": { + "type": "integer" + }, + "data": { + "type": "object", + "additionalProperties": true + }, + "id": { + "type": "integer" + }, + "panelId": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "text": { + "type": "string" + }, + "time": { + "type": "integer" + }, + "timeEnd": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaAnnotations": { + "type": "object", + "properties": { + "list": { + "type": "array", + "items": {} + } + } + }, + "infrastructure.GrafanaDashboard": { + "type": "object", + "properties": { + "annotations": { + "$ref": "#/definitions/infrastructure.GrafanaAnnotations" + }, + "id": { + "type": "integer" + }, + "links": { + "type": "array", + "items": {} + }, + "panels": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaPanel" + } + }, + "refresh": { + "type": "string" + }, + "schemaVersion": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "templating": { + "$ref": "#/definitions/infrastructure.GrafanaTemplating" + }, + "time": { + "$ref": "#/definitions/infrastructure.GrafanaTimeRange" + }, + "timepicker": { + "$ref": "#/definitions/infrastructure.GrafanaTimePicker" + }, + "timezone": { + "type": "string" + }, + "title": { + "type": "string" + }, + "uid": { + "type": "string" + }, + "version": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaDataSource": { + "type": "object", + "properties": { + "access": { + "type": "string" + }, + "basicAuth": { + "type": "boolean" + }, + "basicAuthPassword": { + "type": "string" + }, + "basicAuthUser": { + "type": "string" + }, + "database": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "jsonData": { + "type": "object", + "additionalProperties": true + }, + "name": { + "type": "string" + }, + "password": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "secureJsonData": { + "type": "object", + "additionalProperties": true + }, + "type": { + "type": "string" + }, + "uid": { + "type": "string" + }, + "url": { + "type": "string" + }, + "user": { + "type": "string" + } + } + }, + "infrastructure.GrafanaDatasource": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "uid": { + "type": "string" + } + } + }, + "infrastructure.GrafanaFieldConfig": { + "type": "object", + "properties": { + "defaults": { + "$ref": "#/definitions/infrastructure.GrafanaFieldDefaults" + }, + "overrides": { + "type": "array", + "items": {} + } + } + }, + "infrastructure.GrafanaFieldDefaults": { + "type": "object", + "properties": { + "custom": { + "type": "object", + "additionalProperties": true + }, + "decimals": { + "type": "integer" + }, + "unit": { + "type": "string" + } + } + }, + "infrastructure.GrafanaGridPos": { + "type": "object", + "properties": { + "h": { + "type": "integer" + }, + "w": { + "type": "integer" + }, + "x": { + "type": "integer" + }, + "y": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaPanel": { + "type": "object", + "properties": { + "fieldConfig": { + "$ref": "#/definitions/infrastructure.GrafanaFieldConfig" + }, + "gridPos": { + "$ref": "#/definitions/infrastructure.GrafanaGridPos" + }, + "id": { + "type": "integer" + }, + "options": { + "type": "object", + "additionalProperties": true + }, + "pluginVersion": { + "type": "string" + }, + "targets": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaTarget" + } + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTarget": { + "type": "object", + "properties": { + "datasource": { + "$ref": "#/definitions/infrastructure.GrafanaDatasource" + }, + "expr": { + "type": "string" + }, + "legendFormat": { + "type": "string" + }, + "refId": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTemplateVar": { + "type": "object", + "properties": { + "datasource": {}, + "label": { + "type": "string" + }, + "name": { + "type": "string" + }, + "query": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTemplating": { + "type": "object", + "properties": { + "list": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaTemplateVar" + } + } + } + }, + "infrastructure.GrafanaTimePicker": { + "type": "object", + "properties": { + "refresh_intervals": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "infrastructure.GrafanaTimeRange": { + "type": "object", + "properties": { + "from": { + "type": "string" + }, + "to": { + "type": "string" + } + } + }, + "modules.CacheRequest": { + "type": "object", + "properties": { + "ttl_seconds": { + "description": "Optional", + "type": "integer" + }, + "value": { + "type": "string" + } + } + }, + "modules.DecryptRequest": { + "type": "object", + "required": [ + "encrypted_data" + ], + "properties": { + "content_type": { + "type": "string" + }, + "encrypted_data": { + "type": "string" + } + } + }, + "modules.DecryptResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "content_type": { + "type": "string" + }, + "decrypted_data": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + }, + "modules.EncryptRequest": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "content_type": { + "description": "e.g., \"application/json\", \"text/plain\"", + "type": "string" + }, + "data": { + "type": "string" + } + } + }, + "modules.EncryptResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "content_type": { + "type": "string" + }, + "encrypted_data": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + }, + "modules.KeyRotateRequest": { + "type": "object", + "required": [ + "new_key" + ], + "properties": { + "new_key": { + "type": "string", + "maxLength": 64, + "minLength": 16 + } + } + }, + "modules.MultiTenantOrder": { + "type": "object" + }, + "modules.Product": { + "type": "object", + "properties": { + "category": { + "type": "string" + }, + "description": { + "type": "string" + }, + "in_stock": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "price": { + "type": "number" + }, + "quantity": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "modules.StatusResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "current_key": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "key_length": { + "type": "integer" + }, + "last_rotation": { + "type": "integer" + }, + "rotate_keys": { + "type": "boolean" + } + } + }, + "modules.Task": { + "type": "object" + }, + "response.ErrorDetail": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "details": { + "type": "object", + "additionalProperties": true + }, + "message": { + "type": "string" + } + } + }, + "response.Meta": { + "type": "object", + "properties": { + "extra": { + "type": "object", + "additionalProperties": true + }, + "page": { + "type": "integer" + }, + "per_page": { + "type": "integer" + }, + "total": { + "type": "integer" + }, + "total_pages": { + "type": "integer" + } + } + }, + "response.Response": { + "type": "object", + "properties": { + "correlation_id": { + "description": "Request ID for tracking", + "type": "string" + }, + "data": {}, + "datetime": { + "description": "ISO8601 Datetime", + "type": "string" + }, + "error": { + "$ref": "#/definitions/response.ErrorDetail" + }, + "message": { + "type": "string" + }, + "meta": { + "$ref": "#/definitions/response.Meta" + }, + "status": { + "description": "HTTP Status Code", + "type": "integer" + }, + "success": { + "type": "boolean" + }, + "timestamp": { + "description": "Unix Timestamp", + "type": "integer" + } + } + } + }, + "securityDefinitions": { + "ApiKeyAuth": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:8080", + BasePath: "/api/v1", + Schemes: []string{}, + Title: "Stackyard API", + Description: "Stackyard API Documentation - A modular Go API framework", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/docs/swagger.json b/docs/swagger.json new file mode 100644 index 0000000..73c9734 --- /dev/null +++ b/docs/swagger.json @@ -0,0 +1,2073 @@ +{ + "swagger": "2.0", + "info": { + "description": "Stackyard API Documentation - A modular Go API framework", + "title": "Stackyard API", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "API Support", + "email": "admin@stackyard.com" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "1.0" + }, + "host": "localhost:8080", + "basePath": "/api/v1", + "paths": { + "/cache/{key}": { + "get": { + "description": "Retrieve a cached value by its key", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cache" + ], + "summary": "Get cached value by key", + "parameters": [ + { + "type": "string", + "description": "Cache key", + "name": "key", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Key not found or expired", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Store a value in the cache with optional TTL", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "cache" + ], + "summary": "Set cached value", + "parameters": [ + { + "type": "string", + "description": "Cache key", + "name": "key", + "in": "path", + "required": true + }, + { + "description": "Cache request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.CacheRequest" + } + } + ], + "responses": { + "200": { + "description": "Cached successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid body", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/decrypt": { + "post": { + "description": "Decrypt encrypted data using AES-256-GCM", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Decrypt data", + "parameters": [ + { + "description": "Data to decrypt", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.DecryptRequest" + } + } + ], + "responses": { + "200": { + "description": "Data decrypted successfully", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.DecryptResponse" + } + } + } + ] + } + }, + "400": { + "description": "Invalid request body or decryption failed", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/encrypt": { + "post": { + "description": "Encrypt plaintext data using AES-256-GCM", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Encrypt data", + "parameters": [ + { + "description": "Data to encrypt", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.EncryptRequest" + } + } + ], + "responses": { + "200": { + "description": "Data encrypted successfully", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.EncryptResponse" + } + } + } + ] + } + }, + "400": { + "description": "Invalid request body", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Encryption failed", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/key-rotate": { + "post": { + "description": "Rotate the encryption key with a new key", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Rotate encryption key", + "parameters": [ + { + "description": "New encryption key", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.KeyRotateRequest" + } + } + ], + "responses": { + "200": { + "description": "Key rotation successful", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid request body", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/encryption/status": { + "get": { + "description": "Get the current status and configuration of the encryption service", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "encryption" + ], + "summary": "Get encryption service status", + "responses": { + "200": { + "description": "Encryption service status", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/response.Response" + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/definitions/modules.StatusResponse" + } + } + } + ] + } + } + } + } + }, + "/events/stream/{stream_id}": { + "get": { + "description": "Subscribe to Server-Sent Events (SSE) for a specific stream", + "consumes": [ + "application/json" + ], + "produces": [ + "text/event-stream" + ], + "tags": [ + "events" + ], + "summary": "Stream events from a specific stream", + "parameters": [ + { + "type": "string", + "description": "Stream ID", + "name": "stream_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "SSE stream", + "schema": { + "type": "string" + } + }, + "404": { + "description": "Stream not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/annotations": { + "post": { + "description": "Create a new Grafana annotation", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana annotation", + "parameters": [ + { + "description": "Annotation data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaAnnotation" + } + } + ], + "responses": { + "201": { + "description": "Annotation created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid annotation data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create annotation", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/dashboards": { + "get": { + "description": "List all Grafana dashboards with pagination", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "List Grafana dashboards", + "parameters": [ + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 50, + "description": "Items per page", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Dashboards retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to list dashboards", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new Grafana dashboard", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana dashboard", + "parameters": [ + { + "description": "Dashboard data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDashboard" + } + } + ], + "responses": { + "201": { + "description": "Dashboard created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid dashboard data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/dashboards/{uid}": { + "get": { + "description": "Retrieve a Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Get Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Dashboard retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Dashboard UID is required", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Dashboard not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update an existing Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Update Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + }, + { + "description": "Dashboard data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDashboard" + } + } + ], + "responses": { + "200": { + "description": "Dashboard updated successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid dashboard data or missing UID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a Grafana dashboard by UID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Delete Grafana dashboard", + "parameters": [ + { + "type": "string", + "description": "Dashboard UID", + "name": "uid", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Dashboard deleted successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Dashboard UID is required", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete dashboard", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/datasources": { + "post": { + "description": "Create a new Grafana data source", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Create Grafana data source", + "parameters": [ + { + "description": "Data source configuration", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/infrastructure.GrafanaDataSource" + } + } + ], + "responses": { + "201": { + "description": "Data source created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid data source data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create data source", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/grafana/health": { + "get": { + "description": "Check Grafana service health", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "grafana" + ], + "summary": "Get Grafana health status", + "responses": { + "200": { + "description": "Grafana health check successful", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "503": { + "description": "Grafana is not available", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/orders/{tenant}": { + "get": { + "description": "Retrieve all orders from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "List orders by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Orders retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new order in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Create order in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "description": "Order data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.MultiTenantOrder" + } + } + ], + "responses": { + "201": { + "description": "Order created in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/orders/{tenant}/{id}": { + "get": { + "description": "Retrieve a specific order from a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Get order by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Order retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update an order in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Update order in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Order update data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.MultiTenantOrder" + } + } + ], + "responses": { + "200": { + "description": "Order updated in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID or update data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete an order from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "orders" + ], + "summary": "Delete order from tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Order ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Order deleted from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid order ID", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or order not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete order", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products": { + "get": { + "description": "Get a list of products", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get products", + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}": { + "get": { + "description": "Retrieve all products from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "List products by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Products retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new product in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Create product in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "description": "Product data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Product" + } + } + ], + "responses": { + "201": { + "description": "Product created in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/analytics": { + "get": { + "description": "Get analytics for products in a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get product analytics", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product analytics for tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to aggregate product analytics", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/search": { + "get": { + "description": "Search products with various filters in a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Search products in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Search by product name", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "Filter by category", + "name": "category", + "in": "query" + }, + { + "type": "boolean", + "description": "Filter by stock availability", + "name": "in_stock", + "in": "query" + }, + { + "type": "number", + "description": "Minimum price filter", + "name": "min_price", + "in": "query" + }, + { + "type": "number", + "description": "Maximum price filter", + "name": "max_price", + "in": "query" + }, + { + "type": "string", + "description": "Filter by tags (comma-separated)", + "name": "tags", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Products found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to search products", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/products/{tenant}/{id}": { + "get": { + "description": "Retrieve a specific product from a tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Get product by tenant", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product retrieved from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to query tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "put": { + "description": "Update a product in a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Update product in tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Product update data", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": true + } + } + ], + "responses": { + "200": { + "description": "Product updated in tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format or update data", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a product from a specific tenant's database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "products" + ], + "summary": "Delete product from tenant database", + "parameters": [ + { + "type": "string", + "description": "Tenant identifier", + "name": "tenant", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Product ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Product deleted from tenant database", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid product ID format", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Tenant database or product not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete product", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/tasks": { + "get": { + "description": "Retrieve all tasks from the database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "List all tasks", + "responses": { + "200": { + "description": "Tasks retrieved successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to retrieve tasks", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "post": { + "description": "Create a new task in the database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Create a new task", + "parameters": [ + { + "description": "Task to create", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Task" + } + } + ], + "responses": { + "201": { + "description": "Task created successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid input", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to create task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + }, + "/tasks/{id}": { + "put": { + "description": "Update an existing task by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Update a task", + "parameters": [ + { + "type": "integer", + "description": "Task ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Task data to update", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/modules.Task" + } + } + ], + "responses": { + "200": { + "description": "Task updated successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "400": { + "description": "Invalid input", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "404": { + "description": "Task not found", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to update task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + }, + "delete": { + "description": "Delete a task by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "tasks" + ], + "summary": "Delete a task", + "parameters": [ + { + "type": "integer", + "description": "Task ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Task deleted successfully", + "schema": { + "$ref": "#/definitions/response.Response" + } + }, + "500": { + "description": "Failed to delete task", + "schema": { + "$ref": "#/definitions/response.Response" + } + } + } + } + } + }, + "definitions": { + "infrastructure.GrafanaAnnotation": { + "type": "object", + "properties": { + "dashboardId": { + "type": "integer" + }, + "data": { + "type": "object", + "additionalProperties": true + }, + "id": { + "type": "integer" + }, + "panelId": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "text": { + "type": "string" + }, + "time": { + "type": "integer" + }, + "timeEnd": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaAnnotations": { + "type": "object", + "properties": { + "list": { + "type": "array", + "items": {} + } + } + }, + "infrastructure.GrafanaDashboard": { + "type": "object", + "properties": { + "annotations": { + "$ref": "#/definitions/infrastructure.GrafanaAnnotations" + }, + "id": { + "type": "integer" + }, + "links": { + "type": "array", + "items": {} + }, + "panels": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaPanel" + } + }, + "refresh": { + "type": "string" + }, + "schemaVersion": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "templating": { + "$ref": "#/definitions/infrastructure.GrafanaTemplating" + }, + "time": { + "$ref": "#/definitions/infrastructure.GrafanaTimeRange" + }, + "timepicker": { + "$ref": "#/definitions/infrastructure.GrafanaTimePicker" + }, + "timezone": { + "type": "string" + }, + "title": { + "type": "string" + }, + "uid": { + "type": "string" + }, + "version": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaDataSource": { + "type": "object", + "properties": { + "access": { + "type": "string" + }, + "basicAuth": { + "type": "boolean" + }, + "basicAuthPassword": { + "type": "string" + }, + "basicAuthUser": { + "type": "string" + }, + "database": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "jsonData": { + "type": "object", + "additionalProperties": true + }, + "name": { + "type": "string" + }, + "password": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + }, + "secureJsonData": { + "type": "object", + "additionalProperties": true + }, + "type": { + "type": "string" + }, + "uid": { + "type": "string" + }, + "url": { + "type": "string" + }, + "user": { + "type": "string" + } + } + }, + "infrastructure.GrafanaDatasource": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "uid": { + "type": "string" + } + } + }, + "infrastructure.GrafanaFieldConfig": { + "type": "object", + "properties": { + "defaults": { + "$ref": "#/definitions/infrastructure.GrafanaFieldDefaults" + }, + "overrides": { + "type": "array", + "items": {} + } + } + }, + "infrastructure.GrafanaFieldDefaults": { + "type": "object", + "properties": { + "custom": { + "type": "object", + "additionalProperties": true + }, + "decimals": { + "type": "integer" + }, + "unit": { + "type": "string" + } + } + }, + "infrastructure.GrafanaGridPos": { + "type": "object", + "properties": { + "h": { + "type": "integer" + }, + "w": { + "type": "integer" + }, + "x": { + "type": "integer" + }, + "y": { + "type": "integer" + } + } + }, + "infrastructure.GrafanaPanel": { + "type": "object", + "properties": { + "fieldConfig": { + "$ref": "#/definitions/infrastructure.GrafanaFieldConfig" + }, + "gridPos": { + "$ref": "#/definitions/infrastructure.GrafanaGridPos" + }, + "id": { + "type": "integer" + }, + "options": { + "type": "object", + "additionalProperties": true + }, + "pluginVersion": { + "type": "string" + }, + "targets": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaTarget" + } + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTarget": { + "type": "object", + "properties": { + "datasource": { + "$ref": "#/definitions/infrastructure.GrafanaDatasource" + }, + "expr": { + "type": "string" + }, + "legendFormat": { + "type": "string" + }, + "refId": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTemplateVar": { + "type": "object", + "properties": { + "datasource": {}, + "label": { + "type": "string" + }, + "name": { + "type": "string" + }, + "query": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "infrastructure.GrafanaTemplating": { + "type": "object", + "properties": { + "list": { + "type": "array", + "items": { + "$ref": "#/definitions/infrastructure.GrafanaTemplateVar" + } + } + } + }, + "infrastructure.GrafanaTimePicker": { + "type": "object", + "properties": { + "refresh_intervals": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "infrastructure.GrafanaTimeRange": { + "type": "object", + "properties": { + "from": { + "type": "string" + }, + "to": { + "type": "string" + } + } + }, + "modules.CacheRequest": { + "type": "object", + "properties": { + "ttl_seconds": { + "description": "Optional", + "type": "integer" + }, + "value": { + "type": "string" + } + } + }, + "modules.DecryptRequest": { + "type": "object", + "required": [ + "encrypted_data" + ], + "properties": { + "content_type": { + "type": "string" + }, + "encrypted_data": { + "type": "string" + } + } + }, + "modules.DecryptResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "content_type": { + "type": "string" + }, + "decrypted_data": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + }, + "modules.EncryptRequest": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "content_type": { + "description": "e.g., \"application/json\", \"text/plain\"", + "type": "string" + }, + "data": { + "type": "string" + } + } + }, + "modules.EncryptResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "content_type": { + "type": "string" + }, + "encrypted_data": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + }, + "modules.KeyRotateRequest": { + "type": "object", + "required": [ + "new_key" + ], + "properties": { + "new_key": { + "type": "string", + "maxLength": 64, + "minLength": 16 + } + } + }, + "modules.MultiTenantOrder": { + "type": "object" + }, + "modules.Product": { + "type": "object", + "properties": { + "category": { + "type": "string" + }, + "description": { + "type": "string" + }, + "in_stock": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "price": { + "type": "number" + }, + "quantity": { + "type": "integer" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "modules.StatusResponse": { + "type": "object", + "properties": { + "algorithm": { + "type": "string" + }, + "current_key": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "key_length": { + "type": "integer" + }, + "last_rotation": { + "type": "integer" + }, + "rotate_keys": { + "type": "boolean" + } + } + }, + "modules.Task": { + "type": "object" + }, + "response.ErrorDetail": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "details": { + "type": "object", + "additionalProperties": true + }, + "message": { + "type": "string" + } + } + }, + "response.Meta": { + "type": "object", + "properties": { + "extra": { + "type": "object", + "additionalProperties": true + }, + "page": { + "type": "integer" + }, + "per_page": { + "type": "integer" + }, + "total": { + "type": "integer" + }, + "total_pages": { + "type": "integer" + } + } + }, + "response.Response": { + "type": "object", + "properties": { + "correlation_id": { + "description": "Request ID for tracking", + "type": "string" + }, + "data": {}, + "datetime": { + "description": "ISO8601 Datetime", + "type": "string" + }, + "error": { + "$ref": "#/definitions/response.ErrorDetail" + }, + "message": { + "type": "string" + }, + "meta": { + "$ref": "#/definitions/response.Meta" + }, + "status": { + "description": "HTTP Status Code", + "type": "integer" + }, + "success": { + "type": "boolean" + }, + "timestamp": { + "description": "Unix Timestamp", + "type": "integer" + } + } + } + }, + "securityDefinitions": { + "ApiKeyAuth": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } +} \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml index cbf4c6c..18e8156 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1,280 +1,1366 @@ -swagger: "2.0" +basePath: /api/v1 +definitions: + infrastructure.GrafanaAnnotation: + properties: + dashboardId: + type: integer + data: + additionalProperties: true + type: object + id: + type: integer + panelId: + type: integer + tags: + items: + type: string + type: array + text: + type: string + time: + type: integer + timeEnd: + type: integer + type: object + infrastructure.GrafanaAnnotations: + properties: + list: + items: {} + type: array + type: object + infrastructure.GrafanaDashboard: + properties: + annotations: + $ref: '#/definitions/infrastructure.GrafanaAnnotations' + id: + type: integer + links: + items: {} + type: array + panels: + items: + $ref: '#/definitions/infrastructure.GrafanaPanel' + type: array + refresh: + type: string + schemaVersion: + type: integer + tags: + items: + type: string + type: array + templating: + $ref: '#/definitions/infrastructure.GrafanaTemplating' + time: + $ref: '#/definitions/infrastructure.GrafanaTimeRange' + timepicker: + $ref: '#/definitions/infrastructure.GrafanaTimePicker' + timezone: + type: string + title: + type: string + uid: + type: string + version: + type: integer + type: object + infrastructure.GrafanaDataSource: + properties: + access: + type: string + basicAuth: + type: boolean + basicAuthPassword: + type: string + basicAuthUser: + type: string + database: + type: string + id: + type: integer + jsonData: + additionalProperties: true + type: object + name: + type: string + password: + type: string + readOnly: + type: boolean + secureJsonData: + additionalProperties: true + type: object + type: + type: string + uid: + type: string + url: + type: string + user: + type: string + type: object + infrastructure.GrafanaDatasource: + properties: + type: + type: string + uid: + type: string + type: object + infrastructure.GrafanaFieldConfig: + properties: + defaults: + $ref: '#/definitions/infrastructure.GrafanaFieldDefaults' + overrides: + items: {} + type: array + type: object + infrastructure.GrafanaFieldDefaults: + properties: + custom: + additionalProperties: true + type: object + decimals: + type: integer + unit: + type: string + type: object + infrastructure.GrafanaGridPos: + properties: + h: + type: integer + w: + type: integer + x: + type: integer + "y": + type: integer + type: object + infrastructure.GrafanaPanel: + properties: + fieldConfig: + $ref: '#/definitions/infrastructure.GrafanaFieldConfig' + gridPos: + $ref: '#/definitions/infrastructure.GrafanaGridPos' + id: + type: integer + options: + additionalProperties: true + type: object + pluginVersion: + type: string + targets: + items: + $ref: '#/definitions/infrastructure.GrafanaTarget' + type: array + title: + type: string + type: + type: string + type: object + infrastructure.GrafanaTarget: + properties: + datasource: + $ref: '#/definitions/infrastructure.GrafanaDatasource' + expr: + type: string + legendFormat: + type: string + refId: + type: string + type: object + infrastructure.GrafanaTemplateVar: + properties: + datasource: {} + label: + type: string + name: + type: string + query: + type: string + type: + type: string + type: object + infrastructure.GrafanaTemplating: + properties: + list: + items: + $ref: '#/definitions/infrastructure.GrafanaTemplateVar' + type: array + type: object + infrastructure.GrafanaTimePicker: + properties: + refresh_intervals: + items: + type: string + type: array + type: object + infrastructure.GrafanaTimeRange: + properties: + from: + type: string + to: + type: string + type: object + modules.CacheRequest: + properties: + ttl_seconds: + description: Optional + type: integer + value: + type: string + type: object + modules.DecryptRequest: + properties: + content_type: + type: string + encrypted_data: + type: string + required: + - encrypted_data + type: object + modules.DecryptResponse: + properties: + algorithm: + type: string + content_type: + type: string + decrypted_data: + type: string + timestamp: + type: integer + type: object + modules.EncryptRequest: + properties: + content_type: + description: e.g., "application/json", "text/plain" + type: string + data: + type: string + required: + - data + type: object + modules.EncryptResponse: + properties: + algorithm: + type: string + content_type: + type: string + encrypted_data: + type: string + timestamp: + type: integer + type: object + modules.KeyRotateRequest: + properties: + new_key: + maxLength: 64 + minLength: 16 + type: string + required: + - new_key + type: object + modules.MultiTenantOrder: + type: object + modules.Product: + properties: + category: + type: string + description: + type: string + in_stock: + type: boolean + name: + type: string + price: + type: number + quantity: + type: integer + tags: + items: + type: string + type: array + type: object + modules.StatusResponse: + properties: + algorithm: + type: string + current_key: + type: string + enabled: + type: boolean + key_length: + type: integer + last_rotation: + type: integer + rotate_keys: + type: boolean + type: object + modules.Task: + type: object + response.ErrorDetail: + properties: + code: + type: string + details: + additionalProperties: true + type: object + message: + type: string + type: object + response.Meta: + properties: + extra: + additionalProperties: true + type: object + page: + type: integer + per_page: + type: integer + total: + type: integer + total_pages: + type: integer + type: object + response.Response: + properties: + correlation_id: + description: Request ID for tracking + type: string + data: {} + datetime: + description: ISO8601 Datetime + type: string + error: + $ref: '#/definitions/response.ErrorDetail' + message: + type: string + meta: + $ref: '#/definitions/response.Meta' + status: + description: HTTP Status Code + type: integer + success: + type: boolean + timestamp: + description: Unix Timestamp + type: integer + type: object +host: localhost:8080 info: - description: "Stackyard API Documentation" - version: "1.0.0" - title: "Stackyard API" contact: - email: "admin@stackyard.com" + email: admin@stackyard.com + name: API Support + description: Stackyard API Documentation - A modular Go API framework license: - name: "Apache 2.0" - url: "http://www.apache.org/licenses/LICENSE-2.0.html" -host: "localhost:8080" -basePath: "/api/v1" -tags: - - name: "users" - description: "User management" - - name: "products" - description: "Product management" -schemes: - - "https" - - "http" + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: Stackyard API + version: "1.0" paths: - /users: + /cache/{key}: get: + consumes: + - application/json + description: Retrieve a cached value by its key + parameters: + - description: Cache key + in: path + name: key + required: true + type: string + produces: + - application/json + responses: + "200": + description: Success + schema: + $ref: '#/definitions/response.Response' + "404": + description: Key not found or expired + schema: + $ref: '#/definitions/response.Response' + summary: Get cached value by key tags: - - "users" - summary: "List users with pagination" - description: "Get a paginated list of users" + - cache + post: + consumes: + - application/json + description: Store a value in the cache with optional TTL + parameters: + - description: Cache key + in: path + name: key + required: true + type: string + - description: Cache request + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.CacheRequest' produces: - - "application/json" + - application/json + responses: + "200": + description: Cached successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid body + schema: + $ref: '#/definitions/response.Response' + summary: Set cached value + tags: + - cache + /encryption/decrypt: + post: + consumes: + - application/json + description: Decrypt encrypted data using AES-256-GCM + parameters: + - description: Data to decrypt + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.DecryptRequest' + produces: + - application/json + responses: + "200": + description: Data decrypted successfully + schema: + allOf: + - $ref: '#/definitions/response.Response' + - properties: + data: + $ref: '#/definitions/modules.DecryptResponse' + type: object + "400": + description: Invalid request body or decryption failed + schema: + $ref: '#/definitions/response.Response' + summary: Decrypt data + tags: + - encryption + /encryption/encrypt: + post: + consumes: + - application/json + description: Encrypt plaintext data using AES-256-GCM parameters: - - name: "page" - in: "query" - description: "Page number" - type: "integer" - default: 1 - - name: "per_page" - in: "query" - description: "Items per page" - type: "integer" - default: 10 - responses: - 200: - description: "Success" - schema: - type: "object" - properties: - success: - type: "boolean" - status: - type: "integer" - message: - type: "string" - data: - type: "array" - items: - $ref: "#/definitions/User" - meta: - $ref: "#/definitions/Meta" - 400: - description: "Bad request" - schema: - $ref: "#/definitions/Response" + - description: Data to encrypt + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.EncryptRequest' + produces: + - application/json + responses: + "200": + description: Data encrypted successfully + schema: + allOf: + - $ref: '#/definitions/response.Response' + - properties: + data: + $ref: '#/definitions/modules.EncryptResponse' + type: object + "400": + description: Invalid request body + schema: + $ref: '#/definitions/response.Response' + "500": + description: Encryption failed + schema: + $ref: '#/definitions/response.Response' + summary: Encrypt data + tags: + - encryption + /encryption/key-rotate: post: + consumes: + - application/json + description: Rotate the encryption key with a new key + parameters: + - description: New encryption key + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.KeyRotateRequest' + produces: + - application/json + responses: + "200": + description: Key rotation successful + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid request body + schema: + $ref: '#/definitions/response.Response' + summary: Rotate encryption key tags: - - "users" - summary: "Create user" - description: "Create a new user" + - encryption + /encryption/status: + get: consumes: - - "application/json" + - application/json + description: Get the current status and configuration of the encryption service produces: - - "application/json" + - application/json + responses: + "200": + description: Encryption service status + schema: + allOf: + - $ref: '#/definitions/response.Response' + - properties: + data: + $ref: '#/definitions/modules.StatusResponse' + type: object + summary: Get encryption service status + tags: + - encryption + /events/stream/{stream_id}: + get: + consumes: + - application/json + description: Subscribe to Server-Sent Events (SSE) for a specific stream + parameters: + - description: Stream ID + in: path + name: stream_id + required: true + type: string + produces: + - text/event-stream + responses: + "200": + description: SSE stream + schema: + type: string + "404": + description: Stream not found + schema: + $ref: '#/definitions/response.Response' + summary: Stream events from a specific stream + tags: + - events + /grafana/annotations: + post: + consumes: + - application/json + description: Create a new Grafana annotation parameters: - - in: "body" - name: "body" - required: true - schema: - $ref: "#/definitions/CreateUserRequest" - responses: - 201: - description: "Created" - schema: - type: "object" - properties: - success: - type: "boolean" - status: - type: "integer" - message: - type: "string" - data: - $ref: "#/definitions/User" - 400: - description: "Bad request" - schema: - $ref: "#/definitions/Response" - 422: - description: "Validation error" - schema: - $ref: "#/definitions/Response" - /users/{id}: + - description: Annotation data + in: body + name: request + required: true + schema: + $ref: '#/definitions/infrastructure.GrafanaAnnotation' + produces: + - application/json + responses: + "201": + description: Annotation created successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid annotation data + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create annotation + schema: + $ref: '#/definitions/response.Response' + summary: Create Grafana annotation + tags: + - grafana + /grafana/dashboards: get: + consumes: + - application/json + description: List all Grafana dashboards with pagination + parameters: + - default: 1 + description: Page number + in: query + name: page + type: integer + - default: 50 + description: Items per page + in: query + name: per_page + type: integer + produces: + - application/json + responses: + "200": + description: Dashboards retrieved successfully + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to list dashboards + schema: + $ref: '#/definitions/response.Response' + summary: List Grafana dashboards tags: - - "users" - summary: "Get single user" - description: "Get a specific user by ID" + - grafana + post: + consumes: + - application/json + description: Create a new Grafana dashboard + parameters: + - description: Dashboard data + in: body + name: request + required: true + schema: + $ref: '#/definitions/infrastructure.GrafanaDashboard' produces: - - "application/json" + - application/json + responses: + "201": + description: Dashboard created successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid dashboard data + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create dashboard + schema: + $ref: '#/definitions/response.Response' + summary: Create Grafana dashboard + tags: + - grafana + /grafana/dashboards/{uid}: + delete: + consumes: + - application/json + description: Delete a Grafana dashboard by UID parameters: - - name: "id" - in: "path" - description: "User ID" - required: true - type: "string" - responses: - 200: - description: "Success" - schema: - type: "object" - properties: - success: - type: "boolean" - status: - type: "integer" - message: - type: "string" - data: - $ref: "#/definitions/User" - 404: - description: "Not found" - schema: - $ref: "#/definitions/Response" + - description: Dashboard UID + in: path + name: uid + required: true + type: string + produces: + - application/json + responses: + "200": + description: Dashboard deleted successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Dashboard UID is required + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to delete dashboard + schema: + $ref: '#/definitions/response.Response' + summary: Delete Grafana dashboard + tags: + - grafana + get: + consumes: + - application/json + description: Retrieve a Grafana dashboard by UID + parameters: + - description: Dashboard UID + in: path + name: uid + required: true + type: string + produces: + - application/json + responses: + "200": + description: Dashboard retrieved successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Dashboard UID is required + schema: + $ref: '#/definitions/response.Response' + "404": + description: Dashboard not found + schema: + $ref: '#/definitions/response.Response' + summary: Get Grafana dashboard + tags: + - grafana put: + consumes: + - application/json + description: Update an existing Grafana dashboard by UID + parameters: + - description: Dashboard UID + in: path + name: uid + required: true + type: string + - description: Dashboard data + in: body + name: request + required: true + schema: + $ref: '#/definitions/infrastructure.GrafanaDashboard' + produces: + - application/json + responses: + "200": + description: Dashboard updated successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid dashboard data or missing UID + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to update dashboard + schema: + $ref: '#/definitions/response.Response' + summary: Update Grafana dashboard + tags: + - grafana + /grafana/datasources: + post: + consumes: + - application/json + description: Create a new Grafana data source + parameters: + - description: Data source configuration + in: body + name: request + required: true + schema: + $ref: '#/definitions/infrastructure.GrafanaDataSource' + produces: + - application/json + responses: + "201": + description: Data source created successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid data source data + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create data source + schema: + $ref: '#/definitions/response.Response' + summary: Create Grafana data source tags: - - "users" - summary: "Update user" - description: "Update an existing user" + - grafana + /grafana/health: + get: consumes: - - "application/json" + - application/json + description: Check Grafana service health produces: - - "application/json" + - application/json + responses: + "200": + description: Grafana health check successful + schema: + $ref: '#/definitions/response.Response' + "503": + description: Grafana is not available + schema: + $ref: '#/definitions/response.Response' + summary: Get Grafana health status + tags: + - grafana + /orders/{tenant}: + get: + consumes: + - application/json + description: Retrieve all orders from a specific tenant's database parameters: - - name: "id" - in: "path" - description: "User ID" - required: true - type: "string" - - in: "body" - name: "body" - required: true - schema: - $ref: "#/definitions/UpdateUserRequest" - responses: - 200: - description: "Success" - schema: - type: "object" - properties: - success: - type: "boolean" - status: - type: "integer" - message: - type: "string" - data: - $ref: "#/definitions/User" - 400: - description: "Bad request" - schema: - $ref: "#/definitions/Response" - 422: - description: "Validation error" - schema: - $ref: "#/definitions/Response" + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + produces: + - application/json + responses: + "200": + description: Orders retrieved from tenant database + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to query tenant database + schema: + $ref: '#/definitions/response.Response' + summary: List orders by tenant + tags: + - orders + post: + consumes: + - application/json + description: Create a new order in a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Order data + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.MultiTenantOrder' + produces: + - application/json + responses: + "201": + description: Order created in tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid order data + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create order + schema: + $ref: '#/definitions/response.Response' + summary: Create order in tenant database + tags: + - orders + /orders/{tenant}/{id}: delete: + consumes: + - application/json + description: Delete an order from a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Order ID + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: Order deleted from tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid order ID + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or order not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to delete order + schema: + $ref: '#/definitions/response.Response' + summary: Delete order from tenant database tags: - - "users" - summary: "Delete user" - description: "Delete a user by ID" + - orders + get: + consumes: + - application/json + description: Retrieve a specific order from a tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Order ID + in: path + name: id + required: true + type: string produces: - - "application/json" + - application/json + responses: + "200": + description: Order retrieved from tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid order ID + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or order not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to query tenant database + schema: + $ref: '#/definitions/response.Response' + summary: Get order by tenant + tags: + - orders + put: + consumes: + - application/json + description: Update an order in a specific tenant's database parameters: - - name: "id" - in: "path" - description: "User ID" - required: true - type: "string" - responses: - 204: - description: "No content" - 404: - description: "Not found" - schema: - $ref: "#/definitions/Response" + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Order ID + in: path + name: id + required: true + type: string + - description: Order update data + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.MultiTenantOrder' + produces: + - application/json + responses: + "200": + description: Order updated in tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid order ID or update data + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or order not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to update order + schema: + $ref: '#/definitions/response.Response' + summary: Update order in tenant database + tags: + - orders /products: get: + consumes: + - application/json + description: Get a list of products + produces: + - application/json + responses: + "200": + description: Success + schema: + $ref: '#/definitions/response.Response' + summary: Get products tags: - - "products" - summary: "Get products" - description: "Get a list of products" + - products + /products/{tenant}: + get: + consumes: + - application/json + description: Retrieve all products from a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string produces: - - "application/json" + - application/json responses: - 200: - description: "Success" + "200": + description: Products retrieved from tenant database schema: - $ref: "#/definitions/Response" -definitions: - User: - type: "object" - properties: - id: - type: "string" - username: - type: "string" - email: - type: "string" - status: - type: "string" - created_at: - type: "integer" - CreateUserRequest: - type: "object" - required: - - "username" - - "email" - - "full_name" - properties: - username: - type: "string" - email: - type: "string" - full_name: - type: "string" - UpdateUserRequest: - type: "object" - properties: - username: - type: "string" - email: - type: "string" - full_name: - type: "string" - status: - type: "string" - enum: - - "active" - - "inactive" - - "suspended" - Response: - type: "object" - properties: - success: - type: "boolean" - status: - type: "integer" - message: - type: "string" - data: - type: "object" - error: - type: "object" - meta: - $ref: "#/definitions/Meta" - timestamp: - type: "integer" - datetime: - type: "string" - correlation_id: - type: "string" - Meta: - type: "object" - properties: - page: - type: "integer" - per_page: - type: "integer" - total: - type: "integer" - total_pages: - type: "integer" \ No newline at end of file + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to query tenant database + schema: + $ref: '#/definitions/response.Response' + summary: List products by tenant + tags: + - products + post: + consumes: + - application/json + description: Create a new product in a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Product data + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.Product' + produces: + - application/json + responses: + "201": + description: Product created in tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid product data + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create product + schema: + $ref: '#/definitions/response.Response' + summary: Create product in tenant database + tags: + - products + /products/{tenant}/{id}: + delete: + consumes: + - application/json + description: Delete a product from a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Product ID + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: Product deleted from tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid product ID format + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or product not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to delete product + schema: + $ref: '#/definitions/response.Response' + summary: Delete product from tenant database + tags: + - products + get: + consumes: + - application/json + description: Retrieve a specific product from a tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Product ID + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: Product retrieved from tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid product ID format + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or product not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to query tenant database + schema: + $ref: '#/definitions/response.Response' + summary: Get product by tenant + tags: + - products + put: + consumes: + - application/json + description: Update a product in a specific tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Product ID + in: path + name: id + required: true + type: string + - description: Product update data + in: body + name: request + required: true + schema: + additionalProperties: true + type: object + produces: + - application/json + responses: + "200": + description: Product updated in tenant database + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid product ID format or update data + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database or product not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to update product + schema: + $ref: '#/definitions/response.Response' + summary: Update product in tenant database + tags: + - products + /products/{tenant}/analytics: + get: + consumes: + - application/json + description: Get analytics for products in a tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + produces: + - application/json + responses: + "200": + description: Product analytics for tenant database + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to aggregate product analytics + schema: + $ref: '#/definitions/response.Response' + summary: Get product analytics + tags: + - products + /products/{tenant}/search: + get: + consumes: + - application/json + description: Search products with various filters in a tenant's database + parameters: + - description: Tenant identifier + in: path + name: tenant + required: true + type: string + - description: Search by product name + in: query + name: name + type: string + - description: Filter by category + in: query + name: category + type: string + - description: Filter by stock availability + in: query + name: in_stock + type: boolean + - description: Minimum price filter + in: query + name: min_price + type: number + - description: Maximum price filter + in: query + name: max_price + type: number + - description: Filter by tags (comma-separated) + in: query + name: tags + type: string + produces: + - application/json + responses: + "200": + description: Products found + schema: + $ref: '#/definitions/response.Response' + "404": + description: Tenant database not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to search products + schema: + $ref: '#/definitions/response.Response' + summary: Search products in tenant database + tags: + - products + /tasks: + get: + consumes: + - application/json + description: Retrieve all tasks from the database + produces: + - application/json + responses: + "200": + description: Tasks retrieved successfully + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to retrieve tasks + schema: + $ref: '#/definitions/response.Response' + summary: List all tasks + tags: + - tasks + post: + consumes: + - application/json + description: Create a new task in the database + parameters: + - description: Task to create + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.Task' + produces: + - application/json + responses: + "201": + description: Task created successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid input + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to create task + schema: + $ref: '#/definitions/response.Response' + summary: Create a new task + tags: + - tasks + /tasks/{id}: + delete: + consumes: + - application/json + description: Delete a task by ID + parameters: + - description: Task ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: Task deleted successfully + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to delete task + schema: + $ref: '#/definitions/response.Response' + summary: Delete a task + tags: + - tasks + put: + consumes: + - application/json + description: Update an existing task by ID + parameters: + - description: Task ID + in: path + name: id + required: true + type: integer + - description: Task data to update + in: body + name: request + required: true + schema: + $ref: '#/definitions/modules.Task' + produces: + - application/json + responses: + "200": + description: Task updated successfully + schema: + $ref: '#/definitions/response.Response' + "400": + description: Invalid input + schema: + $ref: '#/definitions/response.Response' + "404": + description: Task not found + schema: + $ref: '#/definitions/response.Response' + "500": + description: Failed to update task + schema: + $ref: '#/definitions/response.Response' + summary: Update a task + tags: + - tasks +securityDefinitions: + ApiKeyAuth: + in: header + name: Authorization + type: apiKey +swagger: "2.0" diff --git a/docs_wiki/API_DOCS.md b/docs_wiki/API_DOCS.md new file mode 100644 index 0000000..0aa38db --- /dev/null +++ b/docs_wiki/API_DOCS.md @@ -0,0 +1,1120 @@ +# API Documentation Guide + +This guide covers how to automatically generate and maintain Swagger/OpenAPI documentation for Stackyard services using swaggo/swag. No manual YAML editing required—documentation is generated directly from your Go code annotations. + +## Overview + +Stackyard uses **swaggo/swag** to automatically generate Swagger documentation from code annotations. This approach ensures your API documentation stays synchronized with your actual code implementation. + +### Benefits + +- **Single Source of Truth**: Documentation lives alongside your code +- **Auto-Generation**: No manual YAML maintenance required +- **Always Up-to-Date**: Docs update when you update your code +- **Interactive UI**: Built-in Swagger UI for testing endpoints +- **Type Safety**: Leverages Go's type system for accurate documentation + +## Installation + +### Step 1: Install Swag CLI Tool + +The swag CLI tool scans your Go code and generates documentation files: + +```bash +# Install globally +go install github.com/swaggo/swag/cmd/swag@latest + +# Verify installation +swag --version +``` + +### Step 2: Add Echo-Swagger Dependency + +Add the echo-swagger middleware to serve the Swagger UI: + +```bash +go get github.com/swaggo/echo-swagger +``` + +### Step 3: Verify Installation + +```bash +# Check swag is in your PATH +which swag + +# Test in your project +cd /path/to/stackyard +swag init --help +``` + +## API-Level Documentation + +### Main API Info + +Create API-level metadata that appears at the top of your Swagger documentation. Add this to your `main.go` or create a dedicated `docs/docs.go` file: + +```go +package main + +// @title Stackyard API +// @version 1.0 +// @description Stackyard API Documentation - A modular Go API framework +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.email admin@stackyard.com + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.apikey ApiKeyAuth +// @in header +// @name Authorization + +func main() { + // Your application code +} +``` + +### Annotation Reference + +| Annotation | Description | Example | +|------------|-------------|---------| +| `@title` | API title | `@title Stackyard API` | +| `@version` | API version | `@version 1.0` | +| `@description` | API description | `@description Stackyard API Documentation` | +| `@host` | Server host | `@host localhost:8080` | +| `@BasePath` | Base path for all endpoints | `@BasePath /api/v1` | +| `@contact.name` | Contact name | `@contact.name API Support` | +| `@contact.email` | Contact email | `@contact.email admin@stackyard.com` | +| `@license.name` | License name | `@license.name Apache 2.0` | +| `@license.url` | License URL | `@license.url http://www.apache.org/licenses/LICENSE-2.0.html` | + +## Endpoint Annotations + +### Basic Annotations + +Every endpoint handler should have annotations describing its behavior: + +```go +// GetUsers godoc +// @Summary List users with pagination +// @Description Get a paginated list of users +// @Tags users +// @Accept json +// @Produce json +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(10) +// @Success 200 {object} response.Response{data=[]User} "Success" +// @Failure 400 {object} response.Response "Bad request" +// @Router /users [get] +func (s *UsersService) GetUsers(c echo.Context) error { + // Handler implementation +} +``` + +### Annotation Details + +**@Summary** +- Brief description of what the endpoint does +- Appears as the endpoint title in Swagger UI +- Keep it concise (1 line) + +```go +// @Summary List users with pagination +``` + +**@Description** +- Detailed explanation of the endpoint +- Can span multiple lines +- Explain business logic, constraints, or special behavior + +```go +// @Description Get a paginated list of users with optional filtering. +// @Description Supports search by name and email. +// @Description Results are sorted by creation date descending. +``` + +**@Tags** +- Groups related endpoints together +- Used for organizing documentation by feature/domain +- Multiple tags can be specified + +```go +// @Tags users +// @Tags admin +``` + +**@Accept** and **@Produce** +- Specify content types the endpoint accepts and returns +- Common values: `json`, `xml`, `plain`, `multipart/form-data` + +```go +// @Accept json +// @Produce json +``` + +**@Router** +- Defines the URL path and HTTP method +- Path parameters use `{param}` syntax +- Must match the actual route registration + +```go +// @Router /users [get] +// @Router /users/{id} [get] +// @Router /users [post] +// @Router /users/{id} [put] +// @Router /users/{id} [delete] +``` + +### Request Parameters + +**Query Parameters** + +```go +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(10) +// @Param search query string false "Search term" +// @Param status query string false "Filter by status" Enums(active,inactive,suspended) +``` + +Format: `@Param name location type required "description" [options]` + +- `name`: Parameter name +- `location`: `query`, `path`, `header`, `formData`, `body` +- `type`: `string`, `int`, `bool`, `float`, or struct name +- `required`: `true` or `false` +- `description`: Human-readable description +- `options`: `default(value)`, `Enums(a,b,c)`, `minimum`, `maximum` + +**Path Parameters** + +```go +// @Param id path string true "User ID" +// @Param tenant path string true "Tenant identifier" +``` + +**Body Parameters** + +```go +// @Param request body CreateUserRequest true "Create user request" +``` + +### Response Annotations + +**Success Responses** + +```go +// @Success 200 {object} User "User retrieved successfully" +// @Success 200 {object} response.Response{data=User} "Success" +// @Success 200 {object} response.Response{data=[]User} "List of users" +// @Success 201 {object} response.Response{data=User} "Created" +// @Success 204 "No content" +``` + +**Error Responses** + +```go +// @Failure 400 {object} response.Response "Bad request" +// @Failure 401 {object} response.Response "Unauthorized" +// @Failure 403 {object} response.Response "Forbidden" +// @Failure 404 {object} response.Response "Not found" +// @Failure 422 {object} response.Response "Validation error" +// @Failure 500 {object} response.Response "Internal server error" +``` + +## Model Documentation + +### Documenting Structs + +Add comments to your struct definitions to document them in Swagger: + +```go +// User represents a user in the system +type User struct { + ID string `json:"id" example:"usr_123" description:"Unique user identifier"` + Username string `json:"username" example:"john_doe" description:"User's login name"` + Email string `json:"email" example:"john@example.com" description:"User's email address"` + Status string `json:"status" example:"active" description:"Account status" enums:"active,inactive,suspended"` + CreatedAt int64 `json:"created_at" example:"1640995200" description:"Unix timestamp of account creation"` +} +``` + +### Struct Tags for Swagger + +| Tag | Description | Example | +|-----|-------------|---------| +| `json` | JSON field name | `json:"username"` | +| `example` | Example value | `example:"john_doe"` | +| `description` | Field description | `description:"User's login name"` | +| `enums` | Allowed values | `enums:"active,inactive"` | +| `default` | Default value | `default:"active"` | +| `binding` | Validation rules | `binding:"required"` | +| `minimum` | Minimum value | `minimum:"0"` | +| `maximum` | Maximum value | `maximum:"100"` | +| `minlength` | Minimum length | `minlength:"3"` | +| `maxlength` | Maximum length | `maxlength:"50"` | + +### Request/Response Structs + +Document your request and response structs: + +```go +// CreateUserRequest represents the request body for creating a new user +type CreateUserRequest struct { + Username string `json:"username" validate:"required,username" example:"john_doe" description:"Unique username (3-20 alphanumeric characters)"` + Email string `json:"email" validate:"required,email" example:"john@example.com" description:"Valid email address"` + FullName string `json:"full_name" validate:"required,min=3,max=100" example:"John Doe" description:"User's full name"` +} + +// UpdateUserRequest represents the request body for updating an existing user +type UpdateUserRequest struct { + Username string `json:"username" validate:"omitempty,username" example:"john_updated" description:"New username"` + Email string `json:"email" validate:"omitempty,email" example:"john.new@example.com" description:"New email address"` + FullName string `json:"full_name" validate:"omitempty,min=3,max=100" example:"John Updated Doe" description:"New full name"` + Status string `json:"status" validate:"omitempty,oneof=active inactive suspended" example:"active" description:"Account status" enums:"active,inactive,suspended"` +} +``` + +### Response Wrapper Structs + +Document the standard response format: + +```go +// Response represents the standard API response structure +type Response struct { + Success bool `json:"success" example:"true" description:"Indicates if the request was successful"` + Status int `json:"status" example:"200" description:"HTTP status code"` + Message string `json:"message" description:"Human-readable response message"` + Data interface{} `json:"data,omitempty" description:"Response data payload"` + Error interface{} `json:"error,omitempty" description:"Error details if request failed"` + Meta *Meta `json:"meta,omitempty" description:"Pagination metadata"` + Timestamp int64 `json:"timestamp" example:"1640995200" description:"Unix timestamp of response"` + Datetime string `json:"datetime" example:"2022-01-01T00:00:00Z" description:"ISO 8601 datetime of response"` + CorrelationID string `json:"correlation_id" example:"550e8400-e29b-41d4-a716-446655440000" description:"Request correlation ID for tracing"` +} + +// Meta represents pagination metadata +type Meta struct { + Page int `json:"page" example:"1" description:"Current page number"` + PerPage int `json:"per_page" example:"10" description:"Items per page"` + Total int64 `json:"total" example:"100" description:"Total number of items"` + TotalPages int `json:"total_pages" example:"10" description:"Total number of pages"` +} +``` + +## Complete Service Examples + +### Users Service (Complete Example) + +```go +package modules + +import ( + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/request" + "stackyard/pkg/response" + "time" + + "github.com/labstack/echo/v4" +) + +type UsersService struct { + enabled bool +} + +func NewUsersService(enabled bool) *UsersService { + return &UsersService{enabled: enabled} +} + +func (s *UsersService) Name() string { return "Users Service" } +func (s *UsersService) WireName() string { return "users-service" } +func (s *UsersService) Enabled() bool { return s.enabled } +func (s *UsersService) Endpoints() []string { return []string{"/users", "/users/:id"} } +func (s *UsersService) Get() interface{} { return s } + +func (s *UsersService) RegisterRoutes(g *echo.Group) { + sub := g.Group("/users") + + sub.GET("", s.GetUsers) + sub.GET("/:id", s.GetUser) + sub.POST("", s.CreateUser) + sub.PUT("/:id", s.UpdateUser) + sub.DELETE("/:id", s.DeleteUser) +} + +// User represents a user in the system +type User struct { + ID string `json:"id" example:"usr_123" description:"Unique user identifier"` + Username string `json:"username" example:"john_doe" description:"User's login name"` + Email string `json:"email" example:"john@example.com" description:"User's email address"` + Status string `json:"status" example:"active" description:"Account status"` + CreatedAt int64 `json:"created_at" example:"1640995200" description:"Unix timestamp of account creation"` +} + +// CreateUserRequest represents the request body for creating a new user +type CreateUserRequest struct { + Username string `json:"username" validate:"required,username" example:"john_doe" description:"Unique username"` + Email string `json:"email" validate:"required,email" example:"john@example.com" description:"Valid email address"` + FullName string `json:"full_name" validate:"required,min=3,max=100" example:"John Doe" description:"User's full name"` +} + +// UpdateUserRequest represents the request body for updating a user +type UpdateUserRequest struct { + Username string `json:"username" validate:"omitempty,username" example:"john_updated" description:"New username"` + Email string `json:"email" validate:"omitempty,email" example:"john.new@example.com" description:"New email"` + FullName string `json:"full_name" validate:"omitempty,min=3,max=100" example:"John Updated Doe" description:"New full name"` + Status string `json:"status" validate:"omitempty,oneof=active inactive suspended" example:"active" description:"Account status"` +} + +// GetUsers godoc +// @Summary List users with pagination +// @Description Get a paginated list of users +// @Tags users +// @Accept json +// @Produce json +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(10) +// @Success 200 {object} response.Response{data=[]User} "Success" +// @Failure 400 {object} response.Response "Bad request" +// @Router /users [get] +func (s *UsersService) GetUsers(c echo.Context) error { + var pagination response.PaginationRequest + if err := c.Bind(&pagination); err != nil { + return response.BadRequest(c, "Invalid pagination parameters") + } + + users := []User{ + {ID: "1", Username: "john_doe", Email: "john@example.com", Status: "active", CreatedAt: time.Now().Unix()}, + {ID: "2", Username: "jane_smith", Email: "jane@example.com", Status: "active", CreatedAt: time.Now().Unix()}, + } + + total := int64(len(users)) + meta := response.CalculateMeta(pagination.GetPage(), pagination.GetPerPage(), total) + return response.SuccessWithMeta(c, users, meta, "Users retrieved successfully") +} + +// GetUser godoc +// @Summary Get single user +// @Description Get a specific user by ID +// @Tags users +// @Accept json +// @Produce json +// @Param id path string true "User ID" +// @Success 200 {object} response.Response{data=User} "Success" +// @Failure 404 {object} response.Response "Not found" +// @Router /users/{id} [get] +func (s *UsersService) GetUser(c echo.Context) error { + id := c.Param("id") + user := User{ + ID: id, + Username: "john_doe", + Email: "john@example.com", + Status: "active", + CreatedAt: time.Now().Unix(), + } + + if id == "999" { + return response.NotFound(c, "User not found") + } + + return response.Success(c, user, "User retrieved successfully") +} + +// CreateUser godoc +// @Summary Create user +// @Description Create a new user +// @Tags users +// @Accept json +// @Produce json +// @Param request body CreateUserRequest true "Create user request" +// @Success 201 {object} response.Response{data=User} "Created" +// @Failure 400 {object} response.Response "Bad request" +// @Failure 422 {object} response.Response "Validation error" +// @Router /users [post] +func (s *UsersService) CreateUser(c echo.Context) error { + var req CreateUserRequest + + if err := request.Bind(c, &req); err != nil { + if validationErr, ok := err.(*request.ValidationError); ok { + return response.ValidationError(c, "Validation failed", validationErr.GetFieldErrors()) + } + return response.BadRequest(c, err.Error()) + } + + user := User{ + ID: "123", + Username: req.Username, + Email: req.Email, + Status: "active", + CreatedAt: time.Now().Unix(), + } + + return response.Created(c, user, "User created successfully") +} + +// UpdateUser godoc +// @Summary Update user +// @Description Update an existing user +// @Tags users +// @Accept json +// @Produce json +// @Param id path string true "User ID" +// @Param request body UpdateUserRequest true "Update user request" +// @Success 200 {object} response.Response{data=User} "Success" +// @Failure 400 {object} response.Response "Bad request" +// @Failure 422 {object} response.Response "Validation error" +// @Router /users/{id} [put] +func (s *UsersService) UpdateUser(c echo.Context) error { + id := c.Param("id") + + var req UpdateUserRequest + if err := request.Bind(c, &req); err != nil { + if validationErr, ok := err.(*request.ValidationError); ok { + return response.ValidationError(c, "Validation failed", validationErr.GetFieldErrors()) + } + return response.BadRequest(c, err.Error()) + } + + user := User{ + ID: id, + Username: req.Username, + Email: req.Email, + Status: req.Status, + CreatedAt: time.Now().Unix(), + } + + return response.Success(c, user, "User updated successfully") +} + +// DeleteUser godoc +// @Summary Delete user +// @Description Delete a user by ID +// @Tags users +// @Accept json +// @Produce json +// @Param id path string true "User ID" +// @Success 204 "No content" +// @Failure 404 {object} response.Response "Not found" +// @Router /users/{id} [delete] +func (s *UsersService) DeleteUser(c echo.Context) error { + id := c.Param("id") + + if id == "999" { + return response.NotFound(c, "User not found") + } + + return response.NoContent(c) +} + +func init() { + registry.RegisterService("users_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewUsersService(config.Services.IsEnabled("users_service")) + }) +} +``` + +### Products Service (Simplified Example) + +```go +package modules + +import ( + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" + + "github.com/labstack/echo/v4" +) + +const SERVICE_NAME = "products-service" + +type ProductsService struct { + enabled bool +} + +func NewProductsService(enabled bool) *ProductsService { + return &ProductsService{enabled: enabled} +} + +func (s *ProductsService) Name() string { return "Products Service" } +func (s *ProductsService) WireName() string { return SERVICE_NAME } +func (s *ProductsService) Enabled() bool { return s.enabled } +func (s *ProductsService) Endpoints() []string { return []string{"/products"} } +func (s *ProductsService) Get() interface{} { return s } + +// GetProducts godoc +// @Summary Get products +// @Description Get a list of products +// @Tags products +// @Accept json +// @Produce json +// @Success 200 {object} response.Response "Success" +// @Router /products [get] +func (s *ProductsService) RegisterRoutes(g *echo.Group) { + sub := g.Group("/products") + sub.GET("", func(c echo.Context) error { + return response.Success(c, map[string]string{"message": "Hello from Products Service"}) + }) +} + +func init() { + registry.RegisterService(SERVICE_NAME, func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewProductsService(config.Services.IsEnabled(SERVICE_NAME)) + }) +} +``` + +### Tasks Service (Database Example) + +```go +// Tasks godoc +// @Summary List all tasks +// @Description Retrieve all tasks from the database +// @Tags tasks +// @Accept json +// @Produce json +// @Success 200 {object} response.Response{data=[]Task} "Success" +// @Failure 500 {object} response.Response "Internal server error" +// @Router /tasks [get] +func (s *TasksService) listTasks(c echo.Context) error { + var tasks []Task + result := s.db.GORMFindAsync(context.Background(), &tasks) + _, err := result.Wait() + if err != nil { + return response.InternalServerError(c, err.Error()) + } + return response.Success(c, tasks) +} + +// CreateTask godoc +// @Summary Create a new task +// @Description Create a new task in the database +// @Tags tasks +// @Accept json +// @Produce json +// @Param request body Task true "Task to create" +// @Success 201 {object} response.Response{data=Task} "Created" +// @Failure 400 {object} response.Response "Bad request" +// @Failure 500 {object} response.Response "Internal server error" +// @Router /tasks [post] +func (s *TasksService) createTask(c echo.Context) error { + task := new(Task) + if err := c.Bind(task); err != nil { + return response.BadRequest(c, "Invalid input") + } + result := s.db.GORMCreateAsync(context.Background(), task) + _, err := result.Wait() + if err != nil { + return response.InternalServerError(c, err.Error()) + } + return response.Created(c, task) +} +``` + +## Advanced Features + +### Authentication Annotations + +Document endpoints that require authentication: + +```go +// @Security ApiKeyAuth +// @Security BearerAuth +// @Summary Get protected resource +// @Description This endpoint requires authentication +// @Tags protected +// @Accept json +// @Produce json +// @Success 200 {object} response.Response "Success" +// @Failure 401 {object} response.Response "Unauthorized" +// @Router /protected/resource [get] +``` + +Define security schemes in your main API info: + +```go +// @securityDefinitions.apikey ApiKeyAuth +// @in header +// @name Authorization + +// @securityDefinitions.bearer BearerAuth +// @in header +// @name Authorization +``` + +### Enum Values + +Document allowed values for parameters: + +```go +// @Param status query string false "Filter by status" Enums(active,inactive,suspended) +// @Param sort query string false "Sort field" Enums(name,created_at,updated_at) +// @Param order query string false "Sort order" Enums(asc,desc) default(desc) +``` + +### Default Values + +Specify default values for optional parameters: + +```go +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(10) +// @Param order query string false "Sort order" default(desc) +``` + +### File Uploads + +Document file upload endpoints: + +```go +// UploadFile godoc +// @Summary Upload a file +// @Description Upload a file to the server +// @Tags files +// @Accept multipart/form-data +// @Produce json +// @Param file formData file true "File to upload" +// @Param description formData string false "File description" +// @Success 201 {object} response.Response "File uploaded" +// @Failure 400 {object} response.Response "Bad request" +// @Router /files/upload [post] +func (s *FileService) uploadFile(c echo.Context) error { + // Implementation +} +``` + +### Multiple Response Types + +Document different response scenarios: + +```go +// @Success 200 {object} User "User found" +// @Success 200 {object} UserList "Multiple users" +// @Failure 400 {object} response.Response "Bad request" +// @Failure 401 {object} response.Response "Unauthorized" +// @Failure 403 {object} response.Response "Forbidden" +// @Failure 404 {object} response.Response "Not found" +// @Failure 500 {object} response.Response "Internal server error" +``` + +## Generating Documentation + +### Stackyard Swagger Generator (Recommended) + +Stackyard includes a custom swagger generator script that follows the same pattern as the build system. This script provides detailed analysis of exposed endpoints before generation. + +**One-Line Command:** + +```bash +# Generate swagger documentation with detailed analysis +go run scripts/swagger/swagger.go + +# Dry run (analysis only, no generation) +go run scripts/swagger/swagger.go --dry-run + +# Verbose output +go run scripts/swagger/swagger.go --verbose +``` + +**Features:** + +- **API Analysis**: Scans all service files and discovers endpoints, methods, and annotations +- **Detailed Reporting**: Shows service count, endpoint count, and struct count +- **Endpoint Discovery**: Lists all exposed API endpoints with methods and paths +- **Annotation Detection**: Identifies which services have swagger annotations +- **Automatic Installation**: Installs swag CLI if not found +- **User Confirmation**: Asks for confirmation before generation +- **Output Verification**: Verifies generated files exist + +**Example Output:** + +``` + /\ + ( ) Swagger Generator for Stackyard + \/ +---------------------------------------------------------------------- +[1/6] Finding project root +[2/6] Checking swag CLI +[3/6] Analyzing API endpoints + + SWAGGER ANALYSIS RESULTS + +Broadcast Service + File: broadcast_service.go + Annotations: Found + Endpoints: 1 + • get /events/stream/{stream_id} + Stream events from a specific stream + +Cache Service + File: cache_service.go + Annotations: Found + Endpoints: 2 + • get /cache/{key} + Get cached value by key + • post /cache/{key} + Set cached value + +Encryption Service + File: encryption_service.go + Annotations: Found + Endpoints: 4 + • post /encryption/encrypt + Encrypt data + • post /encryption/decrypt + Decrypt data + • get /encryption/status + Get encryption service status + • post /encryption/key-rotate + Rotate encryption key + + Total Services: 9 + Services with Annotations: 9 + Total Endpoints: 37 + Total Structs: 24 + +[4/6] Asking for confirmation +[5/6] Generating swagger docs +[6/6] Verifying output + + SUCCESS! Swagger docs at: /path/to/project/docs + + Generated files: + • docs/docs.go + • docs/swagger.json + • docs/swagger.yaml +``` + +**Script Location:** `scripts/swagger/swagger.go` + +**Command Line Options:** +- `--dry-run`: Only analyze, don't generate documentation +- `--verbose`: Enable verbose logging output +- `--help`: Show help information + +### Basic Generation + +Run the swag init command from your project root: + +```bash +# Generate documentation from current directory +swag init + +# Generate from specific directory +swag init -g cmd/app/main.go + +# Specify output directory +swag init -o docs + +# Generate with specific general API info +swag init -g cmd/app/main.go -o docs +``` + +### Output Files + +Swag generates three files: + +``` +docs/ +├── docs.go # Go file containing embedded docs +├── swagger.json # OpenAPI 2.0 specification (JSON) +└── swagger.yaml # OpenAPI 2.0 specification (YAML) +``` + +### Generation Options + +```bash +# Generate with specific output format +swag init --outputTypes go,json,yaml + +# Generate only JSON +swag init --outputTypes json + +# Generate with custom package name +swag init --packageName docs + +# Parse specific directories +swag init --parseDependency --parseInternal + +# Exclude vendor directory +swag init --exclude ./vendor + +# Include markdown files +swag init --markdownFiles docs + +# Enable verbose output +swag init --v +``` + +### Automatic Generation on Build + +Add to your Makefile: + +```makefile +.PHONY: docs +docs: + swag init -g cmd/app/main.go -o docs + +.PHONY: build +build: docs + go build -o app cmd/app/main.go +``` + +Or create a build script: + +```bash +#!/bin/bash +# scripts/build.sh + +echo "Generating Swagger documentation..." +swag init -g cmd/app/main.go -o docs + +echo "Building application..." +go build -ldflags="-w -s" -o app cmd/app/main.go + +echo "Build complete!" +``` + +## Serving Swagger UI + +### Add Swagger Route + +Add the echo-swagger middleware to serve the Swagger UI. In your server setup: + +```go +package server + +import ( + "github.com/labstack/echo/v4" + echoSwagger "github.com/swaggo/echo-swagger" + + // Import generated docs + _ "stackyard/docs" +) + +func (s *Server) setupSwagger() { + // Serve Swagger UI at /swagger/* + s.echo.GET("/swagger/*", echoSwagger.EchoWrapHandler()) +} +``` + +### Custom Swagger UI Options + +```go +import ( + "github.com/swaggo/echo-swagger" + swaggerFiles "github.com/swaggo/files" +) + +func (s *Server) setupSwagger() { + // Custom configuration + config := &echoSwagger.Config{ + URL: "/swagger/doc.json", + DocExpansion: "list", + DeepLinking: true, + DefaultModelsExpandDepth: 1, + } + + s.echo.GET("/swagger/*", echoSwagger.EchoWrapHandler(config)) +} +``` + +### Access Documentation + +After starting your server: + +1. Open your browser to `http://localhost:8080/swagger/index.html` +2. Browse available endpoints +3. Test endpoints directly from the UI +4. View request/response schemas + +## Best Practices + +### Annotation Conventions + +1. **Be Descriptive**: Write clear summaries and descriptions +2. **Use Examples**: Include realistic example values +3. **Document All Responses**: Include both success and error cases +4. **Group by Tag**: Use tags to organize related endpoints +5. **Version Your API**: Include version in API info + +### Code Organization + +```go +// Keep annotations directly above the handler function +// GetUsers godoc +// @Summary List users with pagination +// @Description Get a paginated list of users +// @Tags users +// @Accept json +// @Produce json +// @Param page query int false "Page number" default(1) +// @Success 200 {object} response.Response{data=[]User} "Success" +// @Router /users [get] +func (s *UsersService) GetUsers(c echo.Context) error { + // Implementation +} +``` + +### Struct Documentation + +```go +// Always document request/response structs +// CreateUserRequest represents the request body for creating a user +type CreateUserRequest struct { + // Document each field with example and description + Username string `json:"username" example:"john_doe" description:"Unique username" validate:"required"` + Email string `json:"email" example:"john@example.com" description:"Valid email address" validate:"required,email"` +} +``` + +### Keeping Docs in Sync + +1. **Update Annotations First**: When changing endpoints, update annotations before code +2. **Regenerate Regularly**: Run `swag init` before commits +3. **Add to CI/CD**: Auto-generate docs in your build pipeline +4. **Review Changes**: Check generated docs for accuracy + +### CI/CD Integration + +Add to your GitHub Actions workflow: + +```yaml +name: Generate Swagger Docs + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + generate-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.21' + + - name: Install swag + run: go install github.com/swaggo/swag/cmd/swag@latest + + - name: Generate docs + run: swag init -g cmd/app/main.go -o docs + + - name: Verify docs + run: | + if [ -f "docs/swagger.json" ]; then + echo "Swagger documentation generated successfully" + else + echo "Failed to generate documentation" + exit 1 + fi +``` + +## Troubleshooting + +### Common Issues + +**Problem**: `swag: command not found` +```bash +# Solution: Add Go bin to PATH +export PATH=$PATH:$(go env GOPATH)/bin +# Or install again +go install github.com/swaggo/swag/cmd/swag@latest +``` + +**Problem**: Annotations not appearing in generated docs +```bash +# Solution: Ensure annotations are directly above the function +# Check for typos in annotation names +# Verify you're running swag from project root +swag init --v # Enable verbose output +``` + +**Problem**: Struct fields not documented +```bash +# Solution: Add example and description tags to struct fields +type User struct { + Name string `json:"name" example:"John" description:"User's name"` +} +``` + +**Problem**: Generated docs are outdated +```bash +# Solution: Run swag init before building +swag init -g cmd/app/main.go -o docs +go build cmd/app/main.go +``` + +**Problem**: Swagger UI not loading +```bash +# Solution: Verify echo-swagger is registered +# Check that docs package is imported: _ "yourproject/docs" +# Ensure route is registered: e.GET("/swagger/*", echoSwagger.EchoWrapHandler()) +``` + +## Example Workflow + +Complete workflow for adding a new documented endpoint: + +```bash +# 1. Add handler with annotations +# Edit internal/services/modules/users_service.go + +# 2. Generate documentation +cd /path/to/stackyard +swag init -g cmd/app/main.go -o docs + +# 3. Verify generated files +ls -la docs/ + +# 4. Build and run +go run cmd/app/main.go + +# 5. Test in browser +open http://localhost:8080/swagger/index.html +``` + +## Migration from Manual Swagger + +If migrating from a manual `swagger.yaml`: + +1. **Add annotations** to all endpoint handlers +2. **Document structs** with example tags +3. **Run swag init** to generate new docs +4. **Compare output** with your manual YAML +5. **Update server** to use generated docs +6. **Remove manual YAML** when satisfied + +```bash +# Generate new docs +swag init -o docs_new + +# Compare with existing +diff docs/swagger.yaml docs_new/swagger.yaml + +# Replace when ready +mv docs_new/* docs/ +rm -rf docs_new +``` + +## Next Steps + +Now that you understand API documentation with swag, explore: + +- **[Development Guide](DEVELOPMENT.md)** - Learn to build services +- **[Architecture Overview](ARCHITECTURE.md)** - Understand the system design +- **[Getting Started](GETTING_STARTED.md)** - Set up your development environment \ No newline at end of file diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index 8a878eb..3e6f020 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -1,5 +1,23 @@ # PROJECT BLUEPRINT - COMPREHENSIVE ANALYSIS +## DOCUMENTATION TERMS + +### Writing Guidelines + +**Formatting Rules:** +- No emojis in documentation +- No "new" context references (describe existing state only) +- Use present tense for current features +- Use past tense for historical changes +- Maintain professional tone throughout + +**Terminology Standards:** +- Use "existing" instead of "new" +- Use "current" instead of "modern" +- Use "available" instead of "introduced" +- Use "includes" instead of "adds" +- Use "provides" instead of "offers" + ## 1. PROJECT OVERVIEW This project is a sophisticated Go-based boilerplate application with a modular architecture, comprehensive API structure, and advanced monitoring capabilities. The system is designed for extensibility, security, and production readiness. diff --git a/internal/services/modules/broadcast_service.go b/internal/services/modules/broadcast_service.go index 2c323f8..d8a9117 100644 --- a/internal/services/modules/broadcast_service.go +++ b/internal/services/modules/broadcast_service.go @@ -140,6 +140,16 @@ func (s *BroadcastService) RegisterRoutes(g *echo.Group) { // HANDLER METHODS - Using Broadcast Utility // ========================================= +// StreamEvents godoc +// @Summary Stream events from a specific stream +// @Description Subscribe to Server-Sent Events (SSE) for a specific stream +// @Tags events +// @Accept json +// @Produce text/event-stream +// @Param stream_id path string true "Stream ID" +// @Success 200 {string} string "SSE stream" +// @Failure 404 {object} response.Response "Stream not found" +// @Router /events/stream/{stream_id} [get] func (s *BroadcastService) streamEvents(c echo.Context) error { streamID := c.Param("stream_id") client := s.broadcaster.Subscribe(streamID) diff --git a/internal/services/modules/cache_service.go b/internal/services/modules/cache_service.go index e68d7d8..b4cd09c 100644 --- a/internal/services/modules/cache_service.go +++ b/internal/services/modules/cache_service.go @@ -40,31 +40,56 @@ func (s *CacheService) RegisterRoutes(g *echo.Group) { sub := g.Group("/cache") // GET /cache/:key - sub.GET("/:key", func(c echo.Context) error { - key := c.Param("key") - val, found := s.store.Get(key) - if !found { - return response.NotFound(c, "Key not found or expired") - } - return response.Success(c, map[string]string{"key": key, "value": val}) - }) + sub.GET("/:key", s.GetCachedValue) // POST /cache/:key - sub.POST("/:key", func(c echo.Context) error { - key := c.Param("key") - var req CacheRequest - if err := c.Bind(&req); err != nil { - return response.BadRequest(c, "Invalid body") - } + sub.POST("/:key", s.SetCachedValue) +} + +// GetCachedValue godoc +// @Summary Get cached value by key +// @Description Retrieve a cached value by its key +// @Tags cache +// @Accept json +// @Produce json +// @Param key path string true "Cache key" +// @Success 200 {object} response.Response "Success" +// @Failure 404 {object} response.Response "Key not found or expired" +// @Router /cache/{key} [get] +func (s *CacheService) GetCachedValue(c echo.Context) error { + key := c.Param("key") + val, found := s.store.Get(key) + if !found { + return response.NotFound(c, "Key not found or expired") + } + return response.Success(c, map[string]string{"key": key, "value": val}) +} + +// SetCachedValue godoc +// @Summary Set cached value +// @Description Store a value in the cache with optional TTL +// @Tags cache +// @Accept json +// @Produce json +// @Param key path string true "Cache key" +// @Param request body CacheRequest true "Cache request" +// @Success 200 {object} response.Response "Cached successfully" +// @Failure 400 {object} response.Response "Invalid body" +// @Router /cache/{key} [post] +func (s *CacheService) SetCachedValue(c echo.Context) error { + key := c.Param("key") + var req CacheRequest + if err := c.Bind(&req); err != nil { + return response.BadRequest(c, "Invalid body") + } - ttl := time.Duration(req.TTL) * time.Second - s.store.Set(key, req.Value, ttl) + ttl := time.Duration(req.TTL) * time.Second + s.store.Set(key, req.Value, ttl) - return response.Success(c, map[string]string{ - "message": "Cached successfully", - "key": key, - "ttl": ttl.String(), - }) + return response.Success(c, map[string]string{ + "message": "Cached successfully", + "key": key, + "ttl": ttl.String(), }) } diff --git a/internal/services/modules/encryption_service.go b/internal/services/modules/encryption_service.go index 105c48b..989cdd4 100644 --- a/internal/services/modules/encryption_service.go +++ b/internal/services/modules/encryption_service.go @@ -186,6 +186,17 @@ func (s *EncryptionService) decrypt(encryptedData string) ([]byte, error) { } // Handlers +// EncryptData godoc +// @Summary Encrypt data +// @Description Encrypt plaintext data using AES-256-GCM +// @Tags encryption +// @Accept json +// @Produce json +// @Param request body EncryptRequest true "Data to encrypt" +// @Success 200 {object} response.Response{data=EncryptResponse} "Data encrypted successfully" +// @Failure 400 {object} response.Response "Invalid request body" +// @Failure 500 {object} response.Response "Encryption failed" +// @Router /encryption/encrypt [post] func (s *EncryptionService) EncryptData(c echo.Context) error { var req EncryptRequest if err := c.Bind(&req); err != nil { @@ -214,6 +225,16 @@ func (s *EncryptionService) EncryptData(c echo.Context) error { return response.Success(c, resp, "Data encrypted successfully") } +// DecryptData godoc +// @Summary Decrypt data +// @Description Decrypt encrypted data using AES-256-GCM +// @Tags encryption +// @Accept json +// @Produce json +// @Param request body DecryptRequest true "Data to decrypt" +// @Success 200 {object} response.Response{data=DecryptResponse} "Data decrypted successfully" +// @Failure 400 {object} response.Response "Invalid request body or decryption failed" +// @Router /encryption/decrypt [post] func (s *EncryptionService) DecryptData(c echo.Context) error { var req DecryptRequest if err := c.Bind(&req); err != nil { @@ -242,6 +263,14 @@ func (s *EncryptionService) DecryptData(c echo.Context) error { return response.Success(c, resp, "Data decrypted successfully") } +// GetStatus godoc +// @Summary Get encryption service status +// @Description Get the current status and configuration of the encryption service +// @Tags encryption +// @Accept json +// @Produce json +// @Success 200 {object} response.Response{data=StatusResponse} "Encryption service status" +// @Router /encryption/status [get] func (s *EncryptionService) GetStatus(c echo.Context) error { // Get current key info (show only first 8 chars for security) currentKeyPreview := fmt.Sprintf("%s...", hex.EncodeToString(s.encryptionKey[:4])) @@ -258,6 +287,16 @@ func (s *EncryptionService) GetStatus(c echo.Context) error { return response.Success(c, resp, "Encryption service status") } +// RotateKey godoc +// @Summary Rotate encryption key +// @Description Rotate the encryption key with a new key +// @Tags encryption +// @Accept json +// @Produce json +// @Param request body KeyRotateRequest true "New encryption key" +// @Success 200 {object} response.Response "Key rotation successful" +// @Failure 400 {object} response.Response "Invalid request body" +// @Router /encryption/key-rotate [post] func (s *EncryptionService) RotateKey(c echo.Context) error { var req KeyRotateRequest if err := c.Bind(&req); err != nil { diff --git a/internal/services/modules/grafana_service.go b/internal/services/modules/grafana_service.go index 63c4085..4e797a3 100644 --- a/internal/services/modules/grafana_service.go +++ b/internal/services/modules/grafana_service.go @@ -53,7 +53,17 @@ func (s *GrafanaService) RegisterRoutes(g *echo.Group) { sub.GET("/health", s.getHealth) } -// createDashboard creates a new Grafana dashboard +// createDashboard godoc +// @Summary Create Grafana dashboard +// @Description Create a new Grafana dashboard +// @Tags grafana +// @Accept json +// @Produce json +// @Param request body infrastructure.GrafanaDashboard true "Dashboard data" +// @Success 201 {object} response.Response "Dashboard created successfully" +// @Failure 400 {object} response.Response "Invalid dashboard data" +// @Failure 500 {object} response.Response "Failed to create dashboard" +// @Router /grafana/dashboards [post] func (s *GrafanaService) createDashboard(c echo.Context) error { var dashboard infrastructure.GrafanaDashboard if err := c.Bind(&dashboard); err != nil { @@ -69,7 +79,18 @@ func (s *GrafanaService) createDashboard(c echo.Context) error { return response.Created(c, result, "Dashboard created successfully") } -// updateDashboard updates an existing Grafana dashboard +// updateDashboard godoc +// @Summary Update Grafana dashboard +// @Description Update an existing Grafana dashboard by UID +// @Tags grafana +// @Accept json +// @Produce json +// @Param uid path string true "Dashboard UID" +// @Param request body infrastructure.GrafanaDashboard true "Dashboard data" +// @Success 200 {object} response.Response "Dashboard updated successfully" +// @Failure 400 {object} response.Response "Invalid dashboard data or missing UID" +// @Failure 500 {object} response.Response "Failed to update dashboard" +// @Router /grafana/dashboards/{uid} [put] func (s *GrafanaService) updateDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { @@ -93,7 +114,17 @@ func (s *GrafanaService) updateDashboard(c echo.Context) error { return response.Success(c, result, "Dashboard updated successfully") } -// getDashboard retrieves a Grafana dashboard by UID +// getDashboard godoc +// @Summary Get Grafana dashboard +// @Description Retrieve a Grafana dashboard by UID +// @Tags grafana +// @Accept json +// @Produce json +// @Param uid path string true "Dashboard UID" +// @Success 200 {object} response.Response "Dashboard retrieved successfully" +// @Failure 400 {object} response.Response "Dashboard UID is required" +// @Failure 404 {object} response.Response "Dashboard not found" +// @Router /grafana/dashboards/{uid} [get] func (s *GrafanaService) getDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { @@ -109,7 +140,17 @@ func (s *GrafanaService) getDashboard(c echo.Context) error { return response.Success(c, dashboard, "Dashboard retrieved successfully") } -// deleteDashboard deletes a Grafana dashboard by UID +// deleteDashboard godoc +// @Summary Delete Grafana dashboard +// @Description Delete a Grafana dashboard by UID +// @Tags grafana +// @Accept json +// @Produce json +// @Param uid path string true "Dashboard UID" +// @Success 200 {object} response.Response "Dashboard deleted successfully" +// @Failure 400 {object} response.Response "Dashboard UID is required" +// @Failure 500 {object} response.Response "Failed to delete dashboard" +// @Router /grafana/dashboards/{uid} [delete] func (s *GrafanaService) deleteDashboard(c echo.Context) error { uid := c.Param("uid") if uid == "" { @@ -125,7 +166,17 @@ func (s *GrafanaService) deleteDashboard(c echo.Context) error { return response.Success(c, nil, "Dashboard deleted successfully") } -// listDashboards lists all Grafana dashboards +// listDashboards godoc +// @Summary List Grafana dashboards +// @Description List all Grafana dashboards with pagination +// @Tags grafana +// @Accept json +// @Produce json +// @Param page query int false "Page number" default(1) +// @Param per_page query int false "Items per page" default(50) +// @Success 200 {object} response.Response "Dashboards retrieved successfully" +// @Failure 500 {object} response.Response "Failed to list dashboards" +// @Router /grafana/dashboards [get] func (s *GrafanaService) listDashboards(c echo.Context) error { // Parse pagination parameters page := 1 @@ -165,7 +216,17 @@ func (s *GrafanaService) listDashboards(c echo.Context) error { return response.SuccessWithMeta(c, dashboards, meta, "Dashboards retrieved successfully") } -// createDataSource creates a new Grafana data source +// createDataSource godoc +// @Summary Create Grafana data source +// @Description Create a new Grafana data source +// @Tags grafana +// @Accept json +// @Produce json +// @Param request body infrastructure.GrafanaDataSource true "Data source configuration" +// @Success 201 {object} response.Response "Data source created successfully" +// @Failure 400 {object} response.Response "Invalid data source data" +// @Failure 500 {object} response.Response "Failed to create data source" +// @Router /grafana/datasources [post] func (s *GrafanaService) createDataSource(c echo.Context) error { var ds infrastructure.GrafanaDataSource if err := c.Bind(&ds); err != nil { @@ -181,7 +242,17 @@ func (s *GrafanaService) createDataSource(c echo.Context) error { return response.Created(c, result, "Data source created successfully") } -// createAnnotation creates a new Grafana annotation +// createAnnotation godoc +// @Summary Create Grafana annotation +// @Description Create a new Grafana annotation +// @Tags grafana +// @Accept json +// @Produce json +// @Param request body infrastructure.GrafanaAnnotation true "Annotation data" +// @Success 201 {object} response.Response "Annotation created successfully" +// @Failure 400 {object} response.Response "Invalid annotation data" +// @Failure 500 {object} response.Response "Failed to create annotation" +// @Router /grafana/annotations [post] func (s *GrafanaService) createAnnotation(c echo.Context) error { var annotation infrastructure.GrafanaAnnotation if err := c.Bind(&annotation); err != nil { @@ -197,7 +268,15 @@ func (s *GrafanaService) createAnnotation(c echo.Context) error { return response.Created(c, result, "Annotation created successfully") } -// getHealth returns Grafana health status +// getHealth godoc +// @Summary Get Grafana health status +// @Description Check Grafana service health +// @Tags grafana +// @Accept json +// @Produce json +// @Success 200 {object} response.Response "Grafana health check successful" +// @Failure 503 {object} response.Response "Grafana is not available" +// @Router /grafana/health [get] func (s *GrafanaService) getHealth(c echo.Context) error { health, err := s.grafanaManager.GetHealth(c.Request().Context()) if err != nil { diff --git a/internal/services/modules/mongodb_service.go b/internal/services/modules/mongodb_service.go index ce06ce7..0848c24 100644 --- a/internal/services/modules/mongodb_service.go +++ b/internal/services/modules/mongodb_service.go @@ -66,7 +66,17 @@ func (s *MongoDBService) RegisterRoutes(g *echo.Group) { sub.GET("/:tenant/analytics", s.getProductAnalytics) } -// listProductsByTenant lists products from a specific tenant database +// listProductsByTenant godoc +// @Summary List products by tenant +// @Description Retrieve all products from a specific tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Success 200 {object} response.Response "Products retrieved from tenant database" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to query tenant database" +// @Router /products/{tenant} [get] func (s *MongoDBService) listProductsByTenant(c echo.Context) error { tenant := c.Param("tenant") @@ -94,7 +104,19 @@ func (s *MongoDBService) listProductsByTenant(c echo.Context) error { return response.Success(c, products, fmt.Sprintf("Products retrieved from tenant '%s' database", tenant)) } -// createProduct creates a new product in the specified tenant database +// createProduct godoc +// @Summary Create product in tenant database +// @Description Create a new product in a specific tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param request body Product true "Product data" +// @Success 201 {object} response.Response "Product created in tenant database" +// @Failure 400 {object} response.Response "Invalid product data" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to create product" +// @Router /products/{tenant} [post] func (s *MongoDBService) createProduct(c echo.Context) error { tenant := c.Param("tenant") @@ -144,7 +166,19 @@ func (s *MongoDBService) createProduct(c echo.Context) error { return response.Created(c, responseData, fmt.Sprintf("Product created in tenant '%s' database", tenant)) } -// getProductByTenant retrieves a specific product from a tenant database +// getProductByTenant godoc +// @Summary Get product by tenant +// @Description Retrieve a specific product from a tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Product ID" +// @Success 200 {object} response.Response "Product retrieved from tenant database" +// @Failure 400 {object} response.Response "Invalid product ID format" +// @Failure 404 {object} response.Response "Tenant database or product not found" +// @Failure 500 {object} response.Response "Failed to query tenant database" +// @Router /products/{tenant}/{id} [get] func (s *MongoDBService) getProductByTenant(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -174,7 +208,20 @@ func (s *MongoDBService) getProductByTenant(c echo.Context) error { return response.Success(c, product, fmt.Sprintf("Product retrieved from tenant '%s' database", tenant)) } -// updateProduct updates a product in the specified tenant database +// updateProduct godoc +// @Summary Update product in tenant database +// @Description Update a product in a specific tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Product ID" +// @Param request body map[string]interface{} true "Product update data" +// @Success 200 {object} response.Response "Product updated in tenant database" +// @Failure 400 {object} response.Response "Invalid product ID format or update data" +// @Failure 404 {object} response.Response "Tenant database or product not found" +// @Failure 500 {object} response.Response "Failed to update product" +// @Router /products/{tenant}/{id} [put] func (s *MongoDBService) updateProduct(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -221,7 +268,19 @@ func (s *MongoDBService) updateProduct(c echo.Context) error { return response.Success(c, bson.M{"modified_count": result.ModifiedCount}, fmt.Sprintf("Product updated in tenant '%s' database", tenant)) } -// deleteProduct deletes a product from the specified tenant database +// deleteProduct godoc +// @Summary Delete product from tenant database +// @Description Delete a product from a specific tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Product ID" +// @Success 200 {object} response.Response "Product deleted from tenant database" +// @Failure 400 {object} response.Response "Invalid product ID format" +// @Failure 404 {object} response.Response "Tenant database or product not found" +// @Failure 500 {object} response.Response "Failed to delete product" +// @Router /products/{tenant}/{id} [delete] func (s *MongoDBService) deleteProduct(c echo.Context) error { tenant := c.Param("tenant") id := c.Param("id") @@ -252,7 +311,23 @@ func (s *MongoDBService) deleteProduct(c echo.Context) error { return response.Success(c, bson.M{"deleted_count": result.DeletedCount}, fmt.Sprintf("Product deleted from tenant '%s' database", tenant)) } -// searchProducts performs advanced search on products +// searchProducts godoc +// @Summary Search products in tenant database +// @Description Search products with various filters in a tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param name query string false "Search by product name" +// @Param category query string false "Filter by category" +// @Param in_stock query boolean false "Filter by stock availability" +// @Param min_price query number false "Minimum price filter" +// @Param max_price query number false "Maximum price filter" +// @Param tags query string false "Filter by tags (comma-separated)" +// @Success 200 {object} response.Response "Products found" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to search products" +// @Router /products/{tenant}/search [get] func (s *MongoDBService) searchProducts(c echo.Context) error { tenant := c.Param("tenant") @@ -324,7 +399,17 @@ func (s *MongoDBService) searchProducts(c echo.Context) error { return response.Success(c, products, fmt.Sprintf("Found %d products in tenant '%s' database", len(products), tenant)) } -// getProductAnalytics provides analytics for products in a tenant +// getProductAnalytics godoc +// @Summary Get product analytics +// @Description Get analytics for products in a tenant's database +// @Tags products +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Success 200 {object} response.Response "Product analytics for tenant database" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to aggregate product analytics" +// @Router /products/{tenant}/analytics [get] func (s *MongoDBService) getProductAnalytics(c echo.Context) error { tenant := c.Param("tenant") diff --git a/internal/services/modules/multi_tenant_service.go b/internal/services/modules/multi_tenant_service.go index b6f9781..490fec7 100644 --- a/internal/services/modules/multi_tenant_service.go +++ b/internal/services/modules/multi_tenant_service.go @@ -76,7 +76,17 @@ func (s *MultiTenantService) RegisterRoutes(g *echo.Group) { sub.DELETE("/:tenant/:id", s.deleteOrder) } -// listOrdersByTenant lists orders from a specific tenant database +// listOrdersByTenant godoc +// @Summary List orders by tenant +// @Description Retrieve all orders from a specific tenant's database +// @Tags orders +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Success 200 {object} response.Response "Orders retrieved from tenant database" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to query tenant database" +// @Router /orders/{tenant} [get] func (s *MultiTenantService) listOrdersByTenant(c echo.Context) error { tenant := c.Param("tenant") @@ -96,7 +106,19 @@ func (s *MultiTenantService) listOrdersByTenant(c echo.Context) error { return response.Success(c, orders, fmt.Sprintf("Orders retrieved from tenant '%s' database", tenant)) } -// createOrder creates a new order in the specified tenant database +// createOrder godoc +// @Summary Create order in tenant database +// @Description Create a new order in a specific tenant's database +// @Tags orders +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param request body MultiTenantOrder true "Order data" +// @Success 201 {object} response.Response "Order created in tenant database" +// @Failure 400 {object} response.Response "Invalid order data" +// @Failure 404 {object} response.Response "Tenant database not found" +// @Failure 500 {object} response.Response "Failed to create order" +// @Router /orders/{tenant} [post] func (s *MultiTenantService) createOrder(c echo.Context) error { tenant := c.Param("tenant") @@ -124,7 +146,19 @@ func (s *MultiTenantService) createOrder(c echo.Context) error { return response.Created(c, order, fmt.Sprintf("Order created in tenant '%s' database", tenant)) } -// getOrderByTenant retrieves a specific order from a tenant database +// getOrderByTenant godoc +// @Summary Get order by tenant +// @Description Retrieve a specific order from a tenant's database +// @Tags orders +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Order ID" +// @Success 200 {object} response.Response "Order retrieved from tenant database" +// @Failure 400 {object} response.Response "Invalid order ID" +// @Failure 404 {object} response.Response "Tenant database or order not found" +// @Failure 500 {object} response.Response "Failed to query tenant database" +// @Router /orders/{tenant}/{id} [get] func (s *MultiTenantService) getOrderByTenant(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") @@ -152,7 +186,20 @@ func (s *MultiTenantService) getOrderByTenant(c echo.Context) error { return response.Success(c, order, fmt.Sprintf("Order retrieved from tenant '%s' database", tenant)) } -// updateOrder updates an order in the specified tenant database +// updateOrder godoc +// @Summary Update order in tenant database +// @Description Update an order in a specific tenant's database +// @Tags orders +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Order ID" +// @Param request body MultiTenantOrder true "Order update data" +// @Success 200 {object} response.Response "Order updated in tenant database" +// @Failure 400 {object} response.Response "Invalid order ID or update data" +// @Failure 404 {object} response.Response "Tenant database or order not found" +// @Failure 500 {object} response.Response "Failed to update order" +// @Router /orders/{tenant}/{id} [put] func (s *MultiTenantService) updateOrder(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") @@ -212,7 +259,19 @@ func (s *MultiTenantService) updateOrder(c echo.Context) error { return response.Success(c, nil, fmt.Sprintf("Order updated in tenant '%s' database", tenant)) } -// deleteOrder deletes an order from the specified tenant database +// deleteOrder godoc +// @Summary Delete order from tenant database +// @Description Delete an order from a specific tenant's database +// @Tags orders +// @Accept json +// @Produce json +// @Param tenant path string true "Tenant identifier" +// @Param id path string true "Order ID" +// @Success 200 {object} response.Response "Order deleted from tenant database" +// @Failure 400 {object} response.Response "Invalid order ID" +// @Failure 404 {object} response.Response "Tenant database or order not found" +// @Failure 500 {object} response.Response "Failed to delete order" +// @Router /orders/{tenant}/{id} [delete] func (s *MultiTenantService) deleteOrder(c echo.Context) error { tenant := c.Param("tenant") idStr := c.Param("id") diff --git a/internal/services/modules/tasks_service.go b/internal/services/modules/tasks_service.go index 57cbda1..f944ac1 100644 --- a/internal/services/modules/tasks_service.go +++ b/internal/services/modules/tasks_service.go @@ -62,6 +62,15 @@ func (s *TasksService) RegisterRoutes(g *echo.Group) { sub.DELETE("/:id", s.deleteTask) } +// listTasks godoc +// @Summary List all tasks +// @Description Retrieve all tasks from the database +// @Tags tasks +// @Accept json +// @Produce json +// @Success 200 {object} response.Response "Tasks retrieved successfully" +// @Failure 500 {object} response.Response "Failed to retrieve tasks" +// @Router /tasks [get] func (s *TasksService) listTasks(c echo.Context) error { var tasks []Task @@ -77,6 +86,17 @@ func (s *TasksService) listTasks(c echo.Context) error { return response.Success(c, tasks) } +// createTask godoc +// @Summary Create a new task +// @Description Create a new task in the database +// @Tags tasks +// @Accept json +// @Produce json +// @Param request body Task true "Task to create" +// @Success 201 {object} response.Response "Task created successfully" +// @Failure 400 {object} response.Response "Invalid input" +// @Failure 500 {object} response.Response "Failed to create task" +// @Router /tasks [post] func (s *TasksService) createTask(c echo.Context) error { task := new(Task) if err := c.Bind(task); err != nil { @@ -95,6 +115,19 @@ func (s *TasksService) createTask(c echo.Context) error { return response.Created(c, task) } +// updateTask godoc +// @Summary Update a task +// @Description Update an existing task by ID +// @Tags tasks +// @Accept json +// @Produce json +// @Param id path int true "Task ID" +// @Param request body Task true "Task data to update" +// @Success 200 {object} response.Response "Task updated successfully" +// @Failure 400 {object} response.Response "Invalid input" +// @Failure 404 {object} response.Response "Task not found" +// @Failure 500 {object} response.Response "Failed to update task" +// @Router /tasks/{id} [put] func (s *TasksService) updateTask(c echo.Context) error { id, _ := strconv.Atoi(c.Param("id")) var task Task @@ -120,6 +153,16 @@ func (s *TasksService) updateTask(c echo.Context) error { return response.Success(c, task) } +// deleteTask godoc +// @Summary Delete a task +// @Description Delete a task by ID +// @Tags tasks +// @Accept json +// @Produce json +// @Param id path int true "Task ID" +// @Success 200 {object} response.Response "Task deleted successfully" +// @Failure 500 {object} response.Response "Failed to delete task" +// @Router /tasks/{id} [delete] func (s *TasksService) deleteTask(c echo.Context) error { id, _ := strconv.Atoi(c.Param("id")) var task Task diff --git a/pkg/utils/parameter.go b/pkg/utils/parameter.go index e6efe11..b0a3303 100644 --- a/pkg/utils/parameter.go +++ b/pkg/utils/parameter.go @@ -20,6 +20,9 @@ type FlagDefinition struct { // ParsedFlags holds the parsed flag values type ParsedFlags struct { ConfigURL string // -c flag value + Port string // -port flag value + Verbose bool // -verbose flag value + Env string // -env flag value // Add new flags here as needed } @@ -56,6 +59,10 @@ func ParseFlags(flagDefinitions []FlagDefinition) (*ParsedFlags, error) { value = *ptr if def.Name == "c" { parsed.ConfigURL = *ptr + } else if def.Name == "port" { + parsed.Port = *ptr + } else if def.Name == "env" { + parsed.Env = *ptr } // Add new string flag assignments here case *int: @@ -63,6 +70,9 @@ func ParseFlags(flagDefinitions []FlagDefinition) (*ParsedFlags, error) { // Add new int flag assignments here case *bool: value = *ptr + if def.Name == "verbose" { + parsed.Verbose = *ptr + } // Add new bool flag assignments here } @@ -142,8 +152,9 @@ func PrintUsage(flagDefinitions []FlagDefinition, appName string) { } fmt.Println() fmt.Println("Examples:") - fmt.Println(" ./" + appName + " # Load config from local config.yaml") - fmt.Println(" ./" + appName + " -c http://example.com/config.yaml # Load config from URL") - fmt.Println(" ./" + appName + " -c https://config.example.com/app-config.yaml # Load config from HTTPS URL") + fmt.Printf(" ./%-40s # Load config from local config.yaml\n", appName) + fmt.Printf(" ./%s -c http://example.com/config.yaml\n", appName) + fmt.Printf(" ./%s -port 9090 -env production\n", appName) + fmt.Printf(" ./%s -c https://config.example.com/app.yaml -verbose\n", appName) fmt.Println() } diff --git a/scripts/swagger/swagger.go b/scripts/swagger/swagger.go new file mode 100644 index 0000000..5764ef0 --- /dev/null +++ b/scripts/swagger/swagger.go @@ -0,0 +1,565 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/exec" + "os/signal" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "time" +) + +// Configuration variables +var ( + MAIN_PATH = "./cmd/app/main.go" + DOCS_DIR = "docs" + SERVICES_DIR = "internal/services/modules" + OUTPUT_TYPES = "go,json,yaml" +) + +// ANSI Colors +const ( + RESET = "\033[0m" + BOLD = "\033[1m" + DIM = "\033[2m" + UNDERLINE = "\033[4m" + + // Pastel Palette + P_PURPLE = "\033[38;5;108m" + B_PURPLE = "\033[1;38;5;108m" + P_CYAN = "\033[38;5;117m" + B_CYAN = "\033[1;38;5;117m" + P_GREEN = "\033[38;5;108m" + B_GREEN = "\033[1;38;5;108m" + P_YELLOW = "\033[93m" + B_YELLOW = "\033[1;93m" + P_RED = "\033[91m" + B_RED = "\033[1;91m" + GRAY = "\033[38;5;242m" + WHITE = "\033[97m" + B_WHITE = "\033[1;97m" +) + +// Swagger configuration +type SwaggerConfig struct { + GeneralInfo bool + ScanServices bool + Verbose bool + DryRun bool +} + +// SwaggerContext holds the generation state +type SwaggerContext struct { + Config SwaggerConfig + ProjectDir string + DocsDir string + ServicesDir string +} + +// Logger for structured output +type Logger struct { + verbose bool +} + +func (l *Logger) Info(msg string, args ...interface{}) { + fmt.Printf("%s[INFO]%s %s\n", B_CYAN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Warn(msg string, args ...interface{}) { + fmt.Printf("%s[WARN]%s %s\n", B_YELLOW, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Error(msg string, args ...interface{}) { + fmt.Printf("%s[ERROR]%s %s\n", B_RED, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Debug(msg string, args ...interface{}) { + if l.verbose { + fmt.Printf("%s[DEBUG]%s %s\n", GRAY, RESET, fmt.Sprintf(msg, args...)) + } +} + +func (l *Logger) Success(msg string, args ...interface{}) { + fmt.Printf("%s[SUCCESS]%s %s\n", B_GREEN, RESET, fmt.Sprintf(msg, args...)) +} + +// NewLogger creates a new logger +func NewLogger(verbose bool) *Logger { + return &Logger{verbose: verbose} +} + +// clear console screen +func ClearScreen() { + var cmd *exec.Cmd + + switch runtime.GOOS { + case "windows": + cmd = exec.Command("cmd", "/c", "cls") + default: + cmd = exec.Command("clear") + } + + cmd.Stdout = os.Stdout + cmd.Run() +} + +// findProjectRoot searches up the directory tree for go.mod +func findProjectRoot(startDir string) (string, error) { + current := startDir + + for { + goModPath := filepath.Join(current, "go.mod") + if _, err := os.Stat(goModPath); err == nil { + return current, nil + } + + parent := filepath.Dir(current) + if parent == current { + break + } + current = parent + } + + return "", fmt.Errorf("go.mod not found in directory tree") +} + +// ensureProjectRoot finds the project root and changes to it +func (ctx *SwaggerContext) ensureProjectRoot(logger *Logger) error { + currentDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current directory: %w", err) + } + + logger.Info("Starting from: %s", currentDir) + + projectRoot, err := findProjectRoot(currentDir) + if err != nil { + return fmt.Errorf("failed to find project root: %w", err) + } + + if projectRoot != currentDir { + logger.Info("Changing to project root: %s", projectRoot) + if err := os.Chdir(projectRoot); err != nil { + return fmt.Errorf("failed to change directory to %s: %w", projectRoot, err) + } + + ctx.ProjectDir = projectRoot + ctx.DocsDir = filepath.Join(projectRoot, DOCS_DIR) + ctx.ServicesDir = filepath.Join(projectRoot, SERVICES_DIR) + + logger.Success("Now in project root") + } else { + logger.Info("Already in project root") + ctx.ProjectDir = projectRoot + ctx.DocsDir = filepath.Join(projectRoot, DOCS_DIR) + ctx.ServicesDir = filepath.Join(projectRoot, SERVICES_DIR) + } + + return nil +} + +// checkSwagInstalled checks if swag CLI is installed +func (ctx *SwaggerContext) checkSwagInstalled(logger *Logger) error { + logger.Info("Checking if swag CLI is installed...") + + cmd := exec.Command("swag", "version") + output, err := cmd.CombinedOutput() + if err != nil { + logger.Warn("swag CLI not found. Installing...") + if err := ctx.installSwag(logger); err != nil { + return fmt.Errorf("failed to install swag: %w", err) + } + logger.Success("swag CLI installed") + } else { + logger.Success("swag CLI found: %s", strings.TrimSpace(string(output))) + } + + return nil +} + +// installSwag installs swag using go install +func (ctx *SwaggerContext) installSwag(logger *Logger) error { + cmd := exec.Command("go", "install", "github.com/swaggo/swag/cmd/swag@latest") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +// APIEndpoint represents a discovered API endpoint +type APIEndpoint struct { + Method string + Path string + Summary string + Description string + Tags []string + Service string +} + +// ServiceInfo represents service information +type ServiceInfo struct { + Name string + FileName string + Endpoints []APIEndpoint + Structs []string + HasSwagTags bool +} + +// SwaggerTagInfo represents swagger annotation information +type SwaggerTagInfo struct { + Name string + Description string +} + +// analyzeAPIEndpoints scans for API endpoints and annotations +func (ctx *SwaggerContext) analyzeAPIEndpoints(logger *Logger) ([]ServiceInfo, error) { + logger.Info("Analyzing API endpoints and annotations...") + + services := []ServiceInfo{} + + // Read all service files + files, err := os.ReadDir(ctx.ServicesDir) + if err != nil { + return nil, fmt.Errorf("failed to read services directory: %w", err) + } + + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".go") { + continue + } + + filePath := filepath.Join(ctx.ServicesDir, file.Name()) + content, err := os.ReadFile(filePath) + if err != nil { + logger.Warn("Failed to read %s: %v", file.Name(), err) + continue + } + + serviceInfo := ctx.analyzeServiceFile(file.Name(), string(content), logger) + services = append(services, serviceInfo) + } + + return services, nil +} + +// analyzeServiceFile analyzes a single service file +func (ctx *SwaggerContext) analyzeServiceFile(fileName, content string, logger *Logger) ServiceInfo { + serviceName := strings.TrimSuffix(fileName, ".go") + serviceName = strings.ReplaceAll(serviceName, "_", " ") + serviceName = strings.Title(serviceName) + + info := ServiceInfo{ + Name: serviceName, + FileName: fileName, + } + + // Find swagger annotations + swagPattern := regexp.MustCompile(`//\s*@(Summary|Description|Tags|Router|Param|Success|Failure)\s+(.+)`) + matches := swagPattern.FindAllStringSubmatch(content, -1) + + if len(matches) > 0 { + info.HasSwagTags = true + } + + // Find endpoints + routerPattern := regexp.MustCompile(`//\s*@Router\s+([^\s]+)\s+\[(\w+)\]`) + routerMatches := routerPattern.FindAllStringSubmatch(content, -1) + + for _, match := range routerMatches { + endpoint := APIEndpoint{ + Path: match[1], + Method: match[2], + } + + // Find associated summary + summaryPattern := regexp.MustCompile(`//\s*@Summary\s+(.+)`) + summaryMatch := summaryPattern.FindStringSubmatch(content) + if summaryMatch != nil { + endpoint.Summary = summaryMatch[1] + } + + // Find associated description + descPattern := regexp.MustCompile(`//\s*@Description\s+(.+)`) + descMatch := descPattern.FindStringSubmatch(content) + if descMatch != nil { + endpoint.Description = descMatch[1] + } + + // Find tags + tagsPattern := regexp.MustCompile(`//\s*@Tags\s+(.+)`) + tagsMatch := tagsPattern.FindStringSubmatch(content) + if tagsMatch != nil { + endpoint.Tags = strings.Split(tagsMatch[1], ",") + } + + endpoint.Service = serviceName + info.Endpoints = append(info.Endpoints, endpoint) + } + + // Find struct definitions + structPattern := regexp.MustCompile(`type\s+(\w+)\s+struct`) + structMatches := structPattern.FindAllStringSubmatch(content, -1) + + for _, match := range structMatches { + info.Structs = append(info.Structs, match[1]) + } + + return info +} + +// displayAnalysis displays the analysis results +func (ctx *SwaggerContext) displayAnalysis(services []ServiceInfo, logger *Logger) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SWAGGER ANALYSIS RESULTS" + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) + + totalEndpoints := 0 + totalStructs := 0 + servicesWithAnnotations := 0 + + for _, service := range services { + if len(service.Endpoints) > 0 { + servicesWithAnnotations++ + } + + fmt.Println("") + fmt.Printf("%s%s%s\n", B_CYAN, service.Name, RESET) + fmt.Printf(" %sFile:%s %s\n", GRAY, RESET, service.FileName) + + if service.HasSwagTags { + fmt.Printf(" %sAnnotations:%s %s✓ Found%s\n", GRAY, RESET, B_GREEN, RESET) + } else { + fmt.Printf(" %sAnnotations:%s %s✗ Not found%s\n", GRAY, RESET, P_RED, RESET) + } + + if len(service.Endpoints) > 0 { + fmt.Printf(" %sEndpoints:%s %d\n", GRAY, RESET, len(service.Endpoints)) + for _, endpoint := range service.Endpoints { + fmt.Printf(" • %s %s %s %s\n", B_WHITE, endpoint.Method, RESET, endpoint.Path) + if endpoint.Summary != "" { + fmt.Printf(" %s%s%s\n", DIM, endpoint.Summary, RESET) + } + } + totalEndpoints += len(service.Endpoints) + } else { + fmt.Printf(" %sEndpoints:%s None\n", GRAY, RESET) + } + + if len(service.Structs) > 0 { + fmt.Printf(" %sStructs:%s %d\n", GRAY, RESET, len(service.Structs)) + for _, s := range service.Structs { + fmt.Printf(" • %s\n", s) + } + totalStructs += len(service.Structs) + } + } + + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Printf(" %sTotal Services:%s %d\n", B_PURPLE, RESET, len(services)) + fmt.Printf(" %sServices with Annotations:%s %d\n", B_PURPLE, RESET, servicesWithAnnotations) + fmt.Printf(" %sTotal Endpoints:%s %d\n", B_PURPLE, RESET, totalEndpoints) + fmt.Printf(" %sTotal Structs:%s %d\n", B_PURPLE, RESET, totalStructs) + fmt.Println(GRAY + "======================================================================" + RESET) +} + +// generateSwagger generates swagger documentation +func (ctx *SwaggerContext) generateSwagger(logger *Logger) error { + logger.Info("Generating Swagger documentation...") + + // Build swag command + args := []string{"init"} + args = append(args, "-g", MAIN_PATH) + args = append(args, "-o", DOCS_DIR) + args = append(args, "--outputTypes", OUTPUT_TYPES) + + if ctx.Config.Verbose { + args = append(args, "-v") + } + + logger.Debug("Running: swag %s", strings.Join(args, " ")) + + cmd := exec.Command("swag", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("swagger generation failed: %w", err) + } + + logger.Success("Swagger documentation generated") + return nil +} + +// verifyOutput verifies the generated files +func (ctx *SwaggerContext) verifyOutput(logger *Logger) error { + logger.Info("Verifying generated files...") + + expectedFiles := []string{ + "docs.go", + "swagger.json", + "swagger.yaml", + } + + for _, file := range expectedFiles { + filePath := filepath.Join(ctx.DocsDir, file) + if _, err := os.Stat(filePath); err != nil { + return fmt.Errorf("expected file not found: %s", file) + } + logger.Success("Found: %s", file) + } + + return nil +} + +// printBanner prints the application banner +func printBanner() { + fmt.Println("") + fmt.Println(" " + P_PURPLE + " /\\ " + RESET) + fmt.Println(" " + P_PURPLE + "( )" + RESET + " " + B_PURPLE + "Swagger Generator" + RESET + " " + GRAY + "for" + RESET + " " + B_WHITE + "Stackyard" + RESET) + fmt.Println(" " + P_PURPLE + " \\/ " + RESET) + fmt.Println(GRAY + "----------------------------------------------------------------------" + RESET) +} + +// printSuccess prints the success message +func printSuccess(docsDir string) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SUCCESS!" + RESET + " " + P_GREEN + "Swagger docs at:" + RESET + " " + UNDERLINE + B_WHITE + docsDir + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println("") + fmt.Println(" " + P_CYAN + "Generated files:" + RESET) + fmt.Println(" • docs/docs.go") + fmt.Println(" • docs/swagger.json") + fmt.Println(" • docs/swagger.yaml") + fmt.Println("") + fmt.Println(" " + P_CYAN + "Next steps:" + RESET) + fmt.Println(" 1. Add echo-swagger middleware to your server") + fmt.Println(" 2. Access Swagger UI at /swagger/index.html") + fmt.Println("") +} + +// setupSignalHandler sets up graceful shutdown on interrupt +func setupSignalHandler(cancel context.CancelFunc) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigChan + fmt.Println("\nReceived interrupt signal. Exiting...") + cancel() + os.Exit(1) + }() +} + +// askUserForConfirmation asks user to confirm before generation +func (ctx *SwaggerContext) askUserForConfirmation(logger *Logger) error { + if ctx.Config.DryRun { + logger.Info("Dry run mode - skipping generation") + return nil + } + + fmt.Printf("%sProceed with generation? (Y/n, timeout 10s): %s", B_YELLOW, RESET) + + inputChan := make(chan string, 1) + + go func() { + var choice string + fmt.Scanln(&choice) + inputChan <- choice + }() + + select { + case choice := <-inputChan: + if strings.ToLower(choice) == "n" || strings.ToLower(choice) == "no" { + logger.Info("Generation cancelled by user") + os.Exit(0) + } + logger.Success("Proceeding with generation") + case <-time.After(10 * time.Second): + logger.Info("Timeout reached. Proceeding with generation") + } + + return nil +} + +// main function +func main() { + ClearScreen() + + // Parse command line flags + var ( + verbose = flag.Bool("verbose", false, "Enable verbose logging") + dryRun = flag.Bool("dry-run", false, "Only analyze, don't generate") + ) + flag.Parse() + + // Initialize logger + logger := NewLogger(*verbose) + + // Print banner + printBanner() + + // Get project directory + projectDir, err := os.Getwd() + if err != nil { + logger.Error("Failed to get current directory: %v", err) + os.Exit(1) + } + + // Create swagger context + ctx := &SwaggerContext{ + Config: SwaggerConfig{ + GeneralInfo: true, + ScanServices: true, + Verbose: *verbose, + DryRun: *dryRun, + }, + ProjectDir: projectDir, + } + + // Create context with cancellation for graceful shutdown + _, cancel := context.WithCancel(context.Background()) + setupSignalHandler(cancel) + + // Execute swagger generation steps + steps := []struct { + name string + fn func(*Logger) error + }{ + {"Finding project root", ctx.ensureProjectRoot}, + {"Checking swag CLI", ctx.checkSwagInstalled}, + {"Analyzing API endpoints", func(l *Logger) error { + services, err := ctx.analyzeAPIEndpoints(l) + if err != nil { + return err + } + ctx.displayAnalysis(services, l) + return nil + }}, + {"Asking for confirmation", ctx.askUserForConfirmation}, + {"Generating swagger docs", ctx.generateSwagger}, + {"Verifying output", ctx.verifyOutput}, + } + + for i, step := range steps { + stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) + fmt.Printf("%s[%s]%s %s%s%s\n", B_PURPLE, stepNum, RESET, P_CYAN, step.name, RESET) + + if err := step.fn(logger); err != nil { + logger.Error("Step failed: %v", err) + os.Exit(1) + } + } + + // Print success message + if !ctx.Config.DryRun { + printSuccess(ctx.DocsDir) + } +} From 32ad40afe8926158599db34690d72656357d5739 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sat, 28 Mar 2026 21:01:16 +0700 Subject: [PATCH 15/18] refactor: service generator --- scripts/service/service.go | 686 +++++++++++++++++++++++++++++++++++++ scripts/service/structure | 79 +++++ 2 files changed, 765 insertions(+) create mode 100644 scripts/service/service.go create mode 100644 scripts/service/structure diff --git a/scripts/service/service.go b/scripts/service/service.go new file mode 100644 index 0000000..87fb04f --- /dev/null +++ b/scripts/service/service.go @@ -0,0 +1,686 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "sort" + "strings" + "syscall" + "time" +) + +// Configuration variables +var ( + SERVICES_DIR = "internal/services/modules" + MODULE_NAME = "stackyard" + STRUCTURE_DIR = "scripts/service" +) + +// ANSI Colors +const ( + RESET = "\033[0m" + BOLD = "\033[1m" + DIM = "\033[2m" + UNDERLINE = "\033[4m" + + // Pastel Palette + P_PURPLE = "\033[38;5;108m" + B_PURPLE = "\033[1;38;5;108m" + P_CYAN = "\033[38;5;117m" + B_CYAN = "\033[1;38;5;117m" + P_GREEN = "\033[38;5;108m" + B_GREEN = "\033[1;38;5;108m" + P_YELLOW = "\033[93m" + B_YELLOW = "\033[1;93m" + P_RED = "\033[91m" + B_RED = "\033[1;91m" + GRAY = "\033[38;5;242m" + WHITE = "\033[97m" + B_WHITE = "\033[1;97m" +) + +// Available dependencies +type Dependency struct { + Name string + Package string + Type string + Description string +} + +var AVAILABLE_DEPENDENCIES = []Dependency{ + { + Name: "PostgresManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.PostgresManager", + Description: "PostgreSQL database connection manager", + }, + { + Name: "PostgresConnectionManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.PostgresConnectionManager", + Description: "Multi-tenant PostgreSQL connection manager", + }, + { + Name: "MongoConnectionManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.MongoConnectionManager", + Description: "Multi-tenant MongoDB connection manager", + }, + { + Name: "RedisManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.RedisManager", + Description: "Redis cache manager", + }, + { + Name: "KafkaManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.KafkaManager", + Description: "Kafka message queue manager", + }, + { + Name: "MinIOManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.MinIOManager", + Description: "MinIO object storage manager", + }, + { + Name: "GrafanaManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.GrafanaManager", + Description: "Grafana monitoring dashboard manager", + }, + { + Name: "CronManager", + Package: "stackyard/pkg/infrastructure", + Type: "*infrastructure.CronManager", + Description: "Cron job scheduler manager", + }, +} + +// Service configuration +type ServiceConfig struct { + ServiceName string + WireName string + FileName string + Dependencies []Dependency + HasDependencies bool + Verbose bool + DryRun bool +} + +// ServiceContext holds the generation state +type ServiceContext struct { + Config ServiceConfig + ProjectDir string + ServicesDir string + StructureDir string +} + +// Logger for structured output +type Logger struct { + verbose bool +} + +func (l *Logger) Info(msg string, args ...interface{}) { + fmt.Printf("%s[INFO]%s %s\n", B_CYAN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Warn(msg string, args ...interface{}) { + fmt.Printf("%s[WARN]%s %s\n", B_YELLOW, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Error(msg string, args ...interface{}) { + fmt.Printf("%s[ERROR]%s %s\n", B_RED, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Debug(msg string, args ...interface{}) { + if l.verbose { + fmt.Printf("%s[DEBUG]%s %s\n", GRAY, RESET, fmt.Sprintf(msg, args...)) + } +} + +func (l *Logger) Success(msg string, args ...interface{}) { + fmt.Printf("%s[SUCCESS]%s %s\n", B_GREEN, RESET, fmt.Sprintf(msg, args...)) +} + +func (l *Logger) Prompt(msg string, args ...interface{}) { + fmt.Printf("%s[PROMPT]%s %s", B_YELLOW, RESET, fmt.Sprintf(msg, args...)) +} + +// NewLogger creates a new logger +func NewLogger(verbose bool) *Logger { + return &Logger{verbose: verbose} +} + +// clear console screen +func ClearScreen() { + var cmd *exec.Cmd + + switch runtime.GOOS { + case "windows": + cmd = exec.Command("cmd", "/c", "cls") + default: + cmd = exec.Command("clear") + } + + cmd.Stdout = os.Stdout + cmd.Run() +} + +// findProjectRoot searches up the directory tree for go.mod +func findProjectRoot(startDir string) (string, error) { + current := startDir + + for { + goModPath := filepath.Join(current, "go.mod") + if _, err := os.Stat(goModPath); err == nil { + return current, nil + } + + parent := filepath.Dir(current) + if parent == current { + break + } + current = parent + } + + return "", fmt.Errorf("go.mod not found in directory tree") +} + +// ensureProjectRoot finds the project root and changes to it +func (ctx *ServiceContext) ensureProjectRoot(logger *Logger) error { + currentDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current directory: %w", err) + } + + logger.Info("Starting from: %s", currentDir) + + projectRoot, err := findProjectRoot(currentDir) + if err != nil { + return fmt.Errorf("failed to find project root: %w", err) + } + + if projectRoot != currentDir { + logger.Info("Changing to project root: %s", projectRoot) + if err := os.Chdir(projectRoot); err != nil { + return fmt.Errorf("failed to change directory to %s: %w", projectRoot, err) + } + + ctx.ProjectDir = projectRoot + ctx.ServicesDir = filepath.Join(projectRoot, SERVICES_DIR) + ctx.StructureDir = filepath.Join(projectRoot, STRUCTURE_DIR) + + logger.Success("Now in project root") + } else { + logger.Info("Already in project root") + ctx.ProjectDir = projectRoot + ctx.ServicesDir = filepath.Join(projectRoot, SERVICES_DIR) + ctx.StructureDir = filepath.Join(projectRoot, STRUCTURE_DIR) + } + + return nil +} + +// promptServiceName prompts for the service name +func (ctx *ServiceContext) promptServiceName(logger *Logger) error { + logger.Prompt("Enter service name (e.g., Orders, Inventory): ") + + var serviceName string + fmt.Scanln(&serviceName) + + if serviceName == "" { + return fmt.Errorf("service name cannot be empty") + } + + // Capitalize first letter + serviceName = strings.ToUpper(serviceName[:1]) + serviceName[1:] + ctx.Config.ServiceName = serviceName + + logger.Success("Service name: %s", serviceName) + return nil +} + +// promptWireName prompts for the wire name +func (ctx *ServiceContext) promptWireName(logger *Logger) error { + // Generate default wire name from service name + defaultWireName := strings.ToLower(ctx.Config.ServiceName) + "-service" + logger.Prompt("Enter wire name (default: %s): ", defaultWireName) + + var wireName string + fmt.Scanln(&wireName) + + if wireName == "" { + wireName = defaultWireName + } + + ctx.Config.WireName = wireName + + logger.Success("Wire name: %s", wireName) + return nil +} + +// promptFileName prompts for the file name +func (ctx *ServiceContext) promptFileName(logger *Logger) error { + // Generate default file name from service name + defaultFileName := strings.ToLower(ctx.Config.ServiceName) + "_service.go" + logger.Prompt("Enter file name (default: %s): ", defaultFileName) + + var fileName string + fmt.Scanln(&fileName) + + if fileName == "" { + fileName = defaultFileName + } + + // Ensure .go extension + if !strings.HasSuffix(fileName, ".go") { + fileName += ".go" + } + + ctx.Config.FileName = fileName + + logger.Success("File name: %s", fileName) + return nil +} + +// promptDependencies prompts for dependencies with selection +func (ctx *ServiceContext) promptDependencies(logger *Logger) error { + logger.Info("Available dependencies:") + fmt.Println("") + + for i, dep := range AVAILABLE_DEPENDENCIES { + fmt.Printf(" %s[%d]%s %s%s%s - %s\n", B_CYAN, i+1, RESET, B_WHITE, dep.Name, RESET, dep.Description) + } + + fmt.Printf("\n %s[0]%s %sNone%s - No dependencies\n", B_CYAN, RESET, B_WHITE, RESET) + fmt.Println("") + + logger.Prompt("Enter dependency numbers (comma-separated, e.g., 1,3,5): ") + + var input string + fmt.Scanln(&input) + + if input == "" || input == "0" { + logger.Success("No dependencies selected") + return nil + } + + // Parse selected dependencies + selectedIndices := strings.Split(input, ",") + for _, idxStr := range selectedIndices { + idxStr = strings.TrimSpace(idxStr) + var idx int + if _, err := fmt.Sscanf(idxStr, "%d", &idx); err != nil { + logger.Warn("Invalid index: %s, skipping", idxStr) + continue + } + + if idx < 1 || idx > len(AVAILABLE_DEPENDENCIES) { + logger.Warn("Index out of range: %d, skipping", idx) + continue + } + + dep := AVAILABLE_DEPENDENCIES[idx-1] + ctx.Config.Dependencies = append(ctx.Config.Dependencies, dep) + logger.Success("Selected: %s", dep.Name) + } + + ctx.Config.HasDependencies = len(ctx.Config.Dependencies) > 0 + + return nil +} + +// displayConfiguration displays the service configuration +func (ctx *ServiceContext) displayConfiguration(logger *Logger) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SERVICE CONFIGURATION" + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) + + fmt.Printf(" %sService Name:%s %s\n", B_CYAN, RESET, ctx.Config.ServiceName) + fmt.Printf(" %sWire Name:%s %s\n", B_CYAN, RESET, ctx.Config.WireName) + fmt.Printf(" %sFile Name:%s %s\n", B_CYAN, RESET, ctx.Config.FileName) + fmt.Printf(" %sFile Path:%s %s\n", B_CYAN, RESET, filepath.Join(ctx.ServicesDir, ctx.Config.FileName)) + + if len(ctx.Config.Dependencies) > 0 { + fmt.Printf("\n %sDependencies:%s\n", B_CYAN, RESET) + for _, dep := range ctx.Config.Dependencies { + fmt.Printf(" • %s - %s\n", dep.Name, dep.Type) + } + } else { + fmt.Printf("\n %sDependencies:%s None\n", B_CYAN, RESET) + } + + fmt.Println(GRAY + "======================================================================" + RESET) +} + +// askUserForConfirmation asks user to confirm before generation +func (ctx *ServiceContext) askUserForConfirmation(logger *Logger) error { + if ctx.Config.DryRun { + logger.Info("Dry run mode - skipping generation") + return nil + } + + fmt.Printf("%sProceed with generation? (Y/n, timeout 10s): %s", B_YELLOW, RESET) + + inputChan := make(chan string, 1) + + go func() { + var choice string + fmt.Scanln(&choice) + inputChan <- choice + }() + + select { + case choice := <-inputChan: + if strings.ToLower(choice) == "n" || strings.ToLower(choice) == "no" { + logger.Info("Generation cancelled by user") + os.Exit(0) + } + logger.Success("Proceeding with generation") + case <-time.After(10 * time.Second): + logger.Info("Timeout reached. Proceeding with generation") + } + + return nil +} + +// readStructureTemplate reads the structure template file +func (ctx *ServiceContext) readStructureTemplate(logger *Logger) (string, error) { + structurePath := filepath.Join(ctx.StructureDir, "structure") + content, err := os.ReadFile(structurePath) + if err != nil { + return "", fmt.Errorf("failed to read structure template: %w", err) + } + return string(content), nil +} + +// buildImports builds the import statements +func (ctx *ServiceContext) buildImports() string { + if !ctx.Config.HasDependencies { + return "" + } + + // Use a map to deduplicate imports + importMap := make(map[string]bool) + for _, dep := range ctx.Config.Dependencies { + importMap[dep.Package] = true + } + + // Convert map keys to slice + var imports []string + for pkg := range importMap { + imports = append(imports, fmt.Sprintf(` "%s"`, pkg)) + } + + // Sort for consistent output + sort.Strings(imports) + + return strings.Join(imports, "\n") +} + +// buildFields builds the struct fields +func (ctx *ServiceContext) buildFields() string { + if !ctx.Config.HasDependencies { + return "" + } + + var fields []string + for _, dep := range ctx.Config.Dependencies { + fieldName := strings.ToLower(dep.Name[:1]) + dep.Name[1:] + fields = append(fields, fmt.Sprintf("\t%s %s", fieldName, dep.Type)) + } + + return strings.Join(fields, "\n") +} + +// buildParams builds the constructor parameters +func (ctx *ServiceContext) buildParams() string { + if !ctx.Config.HasDependencies { + return "" + } + + var params []string + for _, dep := range ctx.Config.Dependencies { + fieldName := strings.ToLower(dep.Name[:1]) + dep.Name[1:] + params = append(params, fmt.Sprintf("\t%s %s,", fieldName, dep.Type)) + } + + return strings.Join(params, "\n") +} + +// buildAssignments builds the constructor assignments +func (ctx *ServiceContext) buildAssignments() string { + if !ctx.Config.HasDependencies { + return "" + } + + var assignments []string + for _, dep := range ctx.Config.Dependencies { + fieldName := strings.ToLower(dep.Name[:1]) + dep.Name[1:] + assignments = append(assignments, fmt.Sprintf("\t\t%s: %s,", fieldName, fieldName)) + } + + return strings.Join(assignments, "\n") +} + +// buildInitFunction builds the init function for auto-registration +func (ctx *ServiceContext) buildInitFunction() string { + configKey := strings.ToLower(ctx.Config.ServiceName) + "_service" + + var dependencyChecks strings.Builder + var dependencyParams strings.Builder + + if ctx.Config.HasDependencies { + dependencyChecks.WriteString(` if deps == nil { + logger.Warn("Dependencies not available, skipping Service") + return nil + } + +`) + + for _, dep := range ctx.Config.Dependencies { + dependencyChecks.WriteString(fmt.Sprintf(` if deps.%s == nil { + logger.Warn("%s not available, skipping Service") + return nil + } + +`, dep.Name, dep.Name)) + + dependencyParams.WriteString(fmt.Sprintf(", deps.%s", dep.Name)) + } + } + + return fmt.Sprintf(`// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("%s", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("%s") { + return nil + } +%s return New%s(true%s, logger) + }) +}`, configKey, configKey, dependencyChecks.String(), ctx.Config.ServiceName, dependencyParams.String()) +} + +// generateService generates the service Go file +func (ctx *ServiceContext) generateService(logger *Logger) error { + logger.Info("Generating service file...") + + // Read the structure template + template, err := ctx.readStructureTemplate(logger) + if err != nil { + return err + } + + // Build replacement values + imports := ctx.buildImports() + fields := ctx.buildFields() + params := ctx.buildParams() + assignments := ctx.buildAssignments() + initFunction := ctx.buildInitFunction() + serviceNameLower := strings.ToLower(ctx.Config.ServiceName) + + // Replace placeholders + content := template + content = strings.ReplaceAll(content, "{{SERVICE_NAME}}", ctx.Config.ServiceName) + content = strings.ReplaceAll(content, "{{SERVICE_NAME_LOWER}}", serviceNameLower) + content = strings.ReplaceAll(content, "{{WIRE_NAME}}", ctx.Config.WireName) + content = strings.ReplaceAll(content, "{{IMPORTS}}", imports) + content = strings.ReplaceAll(content, "{{FIELDS}}", fields) + content = strings.ReplaceAll(content, "{{PARAMS}}", params) + content = strings.ReplaceAll(content, "{{ASSIGNMENTS}}", assignments) + content = strings.ReplaceAll(content, "{{INIT_FUNCTION}}", initFunction) + + // Clean up extra newlines + content = strings.ReplaceAll(content, "\n\n\n", "\n\n") + + // Write the file + filePath := filepath.Join(ctx.ServicesDir, ctx.Config.FileName) + if err := os.WriteFile(filePath, []byte(content), 0644); err != nil { + return fmt.Errorf("failed to write file: %w", err) + } + + logger.Success("Service file generated: %s", filePath) + return nil +} + +// displaySummary displays the generation summary +func (ctx *ServiceContext) displaySummary(logger *Logger) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "GENERATION SUMMARY" + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) + + fmt.Printf(" %s✓%s Service file created: %s\n", B_GREEN, RESET, ctx.Config.FileName) + fmt.Printf(" %s✓%s Service struct: %s\n", B_GREEN, RESET, ctx.Config.ServiceName) + fmt.Printf(" %s✓%s Wire name: %s\n", B_GREEN, RESET, ctx.Config.WireName) + fmt.Printf(" %s✓%s Auto-registration: Enabled\n", B_GREEN, RESET) + + if len(ctx.Config.Dependencies) > 0 { + fmt.Printf(" %s✓%s Dependencies: %d configured\n", B_GREEN, RESET, len(ctx.Config.Dependencies)) + } + + fmt.Println("") + fmt.Println(" " + P_CYAN + "Next steps:" + RESET) + fmt.Println(" 1. Add service to config.yaml:") + fmt.Printf(" services:\n %s: true\n", strings.ToLower(ctx.Config.ServiceName)+"_service") + fmt.Println("") + fmt.Println(" 2. Implement business logic in handler methods") + fmt.Println(" 3. Add swagger annotations for API documentation") + fmt.Println(" 4. Test the service endpoints") + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) +} + +// setupSignalHandler sets up graceful shutdown on interrupt +func setupSignalHandler(cancel context.CancelFunc) { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigChan + fmt.Println("\nReceived interrupt signal. Exiting...") + cancel() + os.Exit(1) + }() +} + +// printBanner prints the application banner +func printBanner() { + fmt.Println("") + fmt.Println(" " + P_PURPLE + " /\\ " + RESET) + fmt.Println(" " + P_PURPLE + "( )" + RESET + " " + B_PURPLE + "Service Generator" + RESET + " " + GRAY + "for" + RESET + " " + B_WHITE + "Stackyard" + RESET) + fmt.Println(" " + P_PURPLE + " \\/ " + RESET) + fmt.Println(GRAY + "----------------------------------------------------------------------" + RESET) +} + +// printSuccess prints the success message +func printSuccess(fileName string) { + fmt.Println("") + fmt.Println(GRAY + "======================================================================" + RESET) + fmt.Println(" " + B_PURPLE + "SUCCESS!" + RESET + " " + P_GREEN + "Service generated:" + RESET + " " + UNDERLINE + B_WHITE + fileName + RESET) + fmt.Println(GRAY + "======================================================================" + RESET) +} + +// main function +func main() { + ClearScreen() + + // Parse command line flags + var ( + verbose = flag.Bool("verbose", false, "Enable verbose logging") + dryRun = flag.Bool("dry-run", false, "Only analyze, don't generate") + ) + flag.Parse() + + // Initialize logger + logger := NewLogger(*verbose) + + // Print banner + printBanner() + + // Get project directory + projectDir, err := os.Getwd() + if err != nil { + logger.Error("Failed to get current directory: %v", err) + os.Exit(1) + } + + // Create service context + ctx := &ServiceContext{ + Config: ServiceConfig{ + Verbose: *verbose, + DryRun: *dryRun, + }, + ProjectDir: projectDir, + } + + // Create context with cancellation for graceful shutdown + _, cancel := context.WithCancel(context.Background()) + setupSignalHandler(cancel) + + // Execute service generation steps + steps := []struct { + name string + fn func(*Logger) error + }{ + {"Finding project root", ctx.ensureProjectRoot}, + {"Prompting for service name", ctx.promptServiceName}, + {"Prompting for wire name", ctx.promptWireName}, + {"Prompting for file name", ctx.promptFileName}, + {"Prompting for dependencies", ctx.promptDependencies}, + {"Displaying configuration", func(l *Logger) error { + ctx.displayConfiguration(l) + return nil + }}, + {"Asking for confirmation", ctx.askUserForConfirmation}, + {"Generating service file", ctx.generateService}, + {"Displaying summary", func(l *Logger) error { + ctx.displaySummary(l) + return nil + }}, + } + + for i, step := range steps { + stepNum := fmt.Sprintf("%d/%d", i+1, len(steps)) + fmt.Printf("%s[%s]%s %s%s%s\n", B_PURPLE, stepNum, RESET, P_CYAN, step.name, RESET) + + if err := step.fn(logger); err != nil { + logger.Error("Step failed: %v", err) + os.Exit(1) + } + } + + // Print success message + if !ctx.Config.DryRun { + printSuccess(ctx.Config.FileName) + } +} diff --git a/scripts/service/structure b/scripts/service/structure new file mode 100644 index 0000000..5aee6d5 --- /dev/null +++ b/scripts/service/structure @@ -0,0 +1,79 @@ +package modules + +import ( + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" + + "github.com/labstack/echo/v4" +{{IMPORTS}} +) + +type {{SERVICE_NAME}} struct { + enabled bool +{{FIELDS}} + logger *logger.Logger +} + +func New{{SERVICE_NAME}}( + enabled bool, +{{PARAMS}} + logger *logger.Logger, +) *{{SERVICE_NAME}} { + return &{{SERVICE_NAME}}{ + enabled: enabled, +{{ASSIGNMENTS}} + logger: logger, + } +} + +func (s *{{SERVICE_NAME}}) Name() string { return "{{SERVICE_NAME}} Service" } +func (s *{{SERVICE_NAME}}) WireName() string { return "{{WIRE_NAME}}" } +func (s *{{SERVICE_NAME}}) Enabled() bool { return s.enabled } +func (s *{{SERVICE_NAME}}) Get() interface{} { return s } +func (s *{{SERVICE_NAME}}) Endpoints() []string { + return []string{"/{{SERVICE_NAME_LOWER}}"} +} + +// RegisterRoutes registers the service routes +func (s *{{SERVICE_NAME}}) RegisterRoutes(g *echo.Group) { + sub := g.Group("/{{SERVICE_NAME_LOWER}}") + sub.GET("", s.listHandler) + sub.POST("", s.createHandler) + sub.GET("/:id", s.getHandler) + sub.PUT("/:id", s.updateHandler) + sub.DELETE("/:id", s.deleteHandler) +} + +// Handler methods (implement your business logic here) + +func (s *{{SERVICE_NAME}}) listHandler(c echo.Context) error { + // TODO: Implement list logic + return response.Success(c, []interface{}{}, "List endpoint") +} + +func (s *{{SERVICE_NAME}}) createHandler(c echo.Context) error { + // TODO: Implement create logic + return response.Created(c, nil, "Create endpoint") +} + +func (s *{{SERVICE_NAME}}) getHandler(c echo.Context) error { + // TODO: Implement get logic + id := c.Param("id") + return response.Success(c, map[string]string{"id": id}, "Get endpoint") +} + +func (s *{{SERVICE_NAME}}) updateHandler(c echo.Context) error { + // TODO: Implement update logic + id := c.Param("id") + return response.Success(c, map[string]string{"id": id}, "Update endpoint") +} + +func (s *{{SERVICE_NAME}}) deleteHandler(c echo.Context) error { + // TODO: Implement delete logic + return response.NoContent(c) +} + +{{INIT_FUNCTION}} \ No newline at end of file From 8af93b3bf4074317f8f2f85af532ebcef12486e1 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sat, 28 Mar 2026 22:14:50 +0700 Subject: [PATCH 16/18] feat(service-generator): add optional test file generation --- docs_wiki/blueprint/blueprint.txt | 54 +++++++++++++++++++++++ scripts/service/service.go | 72 +++++++++++++++++++++++++++++++ scripts/service/structure_test | 38 ++++++++++++++++ 3 files changed, 164 insertions(+) create mode 100644 scripts/service/structure_test diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index 3e6f020..f8832cb 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -819,6 +819,60 @@ Components are shut down in reverse order to ensure dependencies are handled cor The project includes comprehensive build scripts for automated compilation, backup management, and deployment across multiple platforms. +### 9.1.1 Service Generator (`scripts/service/service.go`) + +**Purpose**: Interactive service scaffolding tool that generates boilerplate service code and optional test files. + +**Features:** +- **Interactive Prompts**: Guides through service name, wire name, file name, and dependency selection +- **Dependency Selection**: Choose from 8 available infrastructure dependencies +- **Test Generation**: Optional test file generation with configurable prompt (default: no) +- **Template-Based**: Uses separate template files for service and test code +- **Import Deduplication**: Automatically deduplicates imports when multiple dependencies share the same package +- **Auto-Registration**: Generates init() function for automatic service registration + +**Available Dependencies:** +1. PostgresManager - PostgreSQL database connection manager +2. PostgresConnectionManager - Multi-tenant PostgreSQL connection manager +3. MongoConnectionManager - Multi-tenant MongoDB connection manager +4. RedisManager - Redis cache manager +5. KafkaManager - Kafka message queue manager +6. MinIOManager - MinIO object storage manager +7. GrafanaManager - Grafana monitoring dashboard manager +8. CronManager - Cron job scheduler manager + +**Template Files:** +- `scripts/service/structure` - Service code template +- `scripts/service/structure_test` - Test code template + +**Usage:** +```bash +# Interactive mode +go run scripts/service/service.go + +# Dry run (analyze only) +go run scripts/service/service.go --dry-run + +# Verbose output +go run scripts/service/service.go --verbose +``` + +**Generated Output:** +- Service file: `internal/services/modules/{service_name}_service.go` +- Test file: `tests/services/{service_name}_service_test.go` (optional) + +**Service Template Includes:** +- Service struct with enabled flag and dependency fields +- Constructor with dependency injection +- Interface implementation (Name, WireName, Enabled, Endpoints, Get, RegisterRoutes) +- Handler methods (list, create, get, update, delete) +- Auto-registration init function with dependency checks + +**Test Template Includes:** +- Service creation tests +- Disabled service behavior tests +- Endpoint listing tests + #### 9.1.1 Go Build Script (`scripts/build.go`) **Version**: Go-based build system (replaces shell scripts) diff --git a/scripts/service/service.go b/scripts/service/service.go index 87fb04f..c30f5bc 100644 --- a/scripts/service/service.go +++ b/scripts/service/service.go @@ -20,6 +20,7 @@ var ( SERVICES_DIR = "internal/services/modules" MODULE_NAME = "stackyard" STRUCTURE_DIR = "scripts/service" + TESTS_DIR = "tests/services" ) // ANSI Colors @@ -111,6 +112,7 @@ type ServiceConfig struct { FileName string Dependencies []Dependency HasDependencies bool + GenerateTests bool Verbose bool DryRun bool } @@ -121,6 +123,7 @@ type ServiceContext struct { ProjectDir string ServicesDir string StructureDir string + TestsDir string } // Logger for structured output @@ -338,6 +341,73 @@ func (ctx *ServiceContext) promptDependencies(logger *Logger) error { return nil } +// promptGenerateTests prompts for test file generation +func (ctx *ServiceContext) promptGenerateTests(logger *Logger) error { + logger.Prompt("Generate test file? (y/N, default: N): ") + + var input string + fmt.Scanln(&input) + + if strings.ToLower(input) == "y" || strings.ToLower(input) == "yes" { + ctx.Config.GenerateTests = true + logger.Success("Test file will be generated") + } else { + ctx.Config.GenerateTests = false + logger.Info("Skipping test file generation") + } + + return nil +} + +// buildConstructorArgs builds the constructor arguments for tests +func (ctx *ServiceContext) buildConstructorArgs() string { + if !ctx.Config.HasDependencies { + return ", nil" + } + + var args []string + for range ctx.Config.Dependencies { + args = append(args, "nil") + } + + return ", " + strings.Join(args, ", ") + ", nil" +} + +// generateTestFile generates the test file +func (ctx *ServiceContext) generateTestFile(logger *Logger) error { + if !ctx.Config.GenerateTests { + return nil + } + + logger.Info("Generating test file...") + + // Read the test structure template + structurePath := filepath.Join(ctx.StructureDir, "structure_test") + template, err := os.ReadFile(structurePath) + if err != nil { + return fmt.Errorf("failed to read test structure template: %w", err) + } + + content := string(template) + content = strings.ReplaceAll(content, "{{SERVICE_NAME}}", ctx.Config.ServiceName) + content = strings.ReplaceAll(content, "{{SERVICE_NAME_LOWER}}", strings.ToLower(ctx.Config.ServiceName)) + content = strings.ReplaceAll(content, "{{WIRE_NAME}}", ctx.Config.WireName) + content = strings.ReplaceAll(content, "{{CONSTRUCTOR_ARGS}}", ctx.buildConstructorArgs()) + + // Clean up extra newlines + content = strings.ReplaceAll(content, "\n\n\n", "\n\n") + + // Write the file + testFileName := strings.ToLower(ctx.Config.ServiceName) + "_service_test.go" + filePath := filepath.Join(ctx.ProjectDir, TESTS_DIR, testFileName) + if err := os.WriteFile(filePath, []byte(content), 0644); err != nil { + return fmt.Errorf("failed to write test file: %w", err) + } + + logger.Success("Test file generated: %s", testFileName) + return nil +} + // displayConfiguration displays the service configuration func (ctx *ServiceContext) displayConfiguration(logger *Logger) { fmt.Println("") @@ -657,12 +727,14 @@ func main() { {"Prompting for wire name", ctx.promptWireName}, {"Prompting for file name", ctx.promptFileName}, {"Prompting for dependencies", ctx.promptDependencies}, + {"Prompting for test generation", ctx.promptGenerateTests}, {"Displaying configuration", func(l *Logger) error { ctx.displayConfiguration(l) return nil }}, {"Asking for confirmation", ctx.askUserForConfirmation}, {"Generating service file", ctx.generateService}, + {"Generating test file", ctx.generateTestFile}, {"Displaying summary", func(l *Logger) error { ctx.displaySummary(l) return nil diff --git a/scripts/service/structure_test b/scripts/service/structure_test new file mode 100644 index 0000000..ccf2c0d --- /dev/null +++ b/scripts/service/structure_test @@ -0,0 +1,38 @@ +package services_test + +import ( + "testing" + + "stackyard/internal/services/modules" +) + +func TestNew{{SERVICE_NAME}}(t *testing.T) { + service := modules.New{{SERVICE_NAME}}(true{{CONSTRUCTOR_ARGS}}) + if service == nil { + t.Fatal("expected service to be created") + } + if !service.Enabled() { + t.Error("expected service to be enabled") + } + if service.Name() != "{{SERVICE_NAME}} Service" { + t.Errorf("expected name '{{SERVICE_NAME}} Service', got %q", service.Name()) + } + if service.WireName() != "{{WIRE_NAME}}" { + t.Errorf("expected wire name '{{WIRE_NAME}}', got %q", service.WireName()) + } +} + +func Test{{SERVICE_NAME}}Disabled(t *testing.T) { + service := modules.New{{SERVICE_NAME}}(false{{CONSTRUCTOR_ARGS}}) + if service.Enabled() { + t.Error("expected service to be disabled") + } +} + +func Test{{SERVICE_NAME}}Endpoints(t *testing.T) { + service := modules.New{{SERVICE_NAME}}(true{{CONSTRUCTOR_ARGS}}) + endpoints := service.Endpoints() + if len(endpoints) == 0 { + t.Error("expected at least one endpoint") + } +} From bc481a9bcc897346095ac8d506e531b69eb558f6 Mon Sep 17 00:00:00 2001 From: "Gab." Date: Sun, 29 Mar 2026 05:39:20 +0700 Subject: [PATCH 17/18] feat(minio): Integrate MinIO as top-level service and update configuration --- config.yaml | 26 +++++++++++++------------- config/config.go | 1 + internal/server/server.go | 13 +++++++++++-- pkg/registry/dependencies.go | 3 +++ resource_windows_386.syso | Bin 708 -> 716 bytes resource_windows_amd64.syso | Bin 708 -> 716 bytes resource_windows_arm.syso | Bin 708 -> 716 bytes resource_windows_arm64.syso | Bin 708 -> 716 bytes 8 files changed, 28 insertions(+), 15 deletions(-) diff --git a/config.yaml b/config.yaml index db2cf09..dd159a9 100644 --- a/config.yaml +++ b/config.yaml @@ -90,20 +90,20 @@ monitoring: max_photo_size_mb: 2 upload_dir: "web/monitoring/uploads" - minio: - enabled: true - endpoint: "localhost:9003" - access_key_id: "minioadmin" - secret_access_key: "minioadmin" - use_ssl: false - bucket_name: "main" +minio: + enabled: true + endpoint: "localhost:9003" + access_key_id: "minioadmin" + secret_access_key: "minioadmin" + use_ssl: false + bucket_name: "main" - external: - services: - - name: "Google" - url: "https://google.com" - - name: "Local API" - url: "http://localhost:8080/health" +external: + services: + - name: "Google" + url: "https://google.com" + - name: "Local API" + url: "http://localhost:8080/health" cron: enabled: true diff --git a/config/config.go b/config/config.go index 6dc211b..8244f64 100644 --- a/config/config.go +++ b/config/config.go @@ -44,6 +44,7 @@ type Config struct { Grafana GrafanaConfig `mapstructure:"grafana"` Monitoring MonitoringConfig `mapstructure:"monitoring"` Cron CronConfig `mapstructure:"cron"` + MinIO MinIOConfig `mapstructure:"minio"` Encryption EncryptionConfig `mapstructure:"encryption"` } diff --git a/internal/server/server.go b/internal/server/server.go index 67017e0..ff91da1 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -77,11 +77,19 @@ func (s *Server) Start() error { s.infraInitManager = infrastructure.NewInfraInitManager(s.logger) s.logger.Info("Starting async infrastructure initialization...") - redisManager, kafkaManager, _, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager := + redisManager, kafkaManager, minIOManager, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager := s.infraInitManager.StartAsyncInitialization(s.config, s.logger) s.dependencies = registry.NewDependencies( - redisManager, kafkaManager, nil, postgresConnectionManager, nil, mongoConnectionManager, grafanaManager, cronManager, + redisManager, + kafkaManager, + nil, + postgresConnectionManager, + nil, + mongoConnectionManager, + grafanaManager, + cronManager, + minIOManager, ) s.setConnectionDefaults(postgresConnectionManager, mongoConnectionManager) @@ -197,6 +205,7 @@ func (s *Server) GetStatus() map[string]interface{} { "mongo": checkEnabled(s.config.Mongo.Enabled || s.config.MongoMultiConfig.Enabled, s.dependencies.MongoManager), "grafana": checkEnabled(s.config.Grafana.Enabled, s.dependencies.GrafanaManager), "cron": checkEnabled(s.config.Cron.Enabled, s.dependencies.CronManager), + "minio": checkEnabled(s.config.MinIO.Enabled, s.dependencies.MinIOManager), } return map[string]interface{}{ diff --git a/pkg/registry/dependencies.go b/pkg/registry/dependencies.go index 19df0dc..e7eeb7f 100644 --- a/pkg/registry/dependencies.go +++ b/pkg/registry/dependencies.go @@ -14,6 +14,7 @@ type Dependencies struct { MongoConnectionManager *infrastructure.MongoConnectionManager GrafanaManager *infrastructure.GrafanaManager CronManager *infrastructure.CronManager + MinIOManager *infrastructure.MinIOManager } // NewDependencies creates a new dependencies container @@ -26,6 +27,7 @@ func NewDependencies( mongoConnectionManager *infrastructure.MongoConnectionManager, grafanaManager *infrastructure.GrafanaManager, cronManager *infrastructure.CronManager, + minIOManager *infrastructure.MinIOManager, ) *Dependencies { return &Dependencies{ RedisManager: redisManager, @@ -36,5 +38,6 @@ func NewDependencies( MongoConnectionManager: mongoConnectionManager, GrafanaManager: grafanaManager, CronManager: cronManager, + MinIOManager: minIOManager, } } diff --git a/resource_windows_386.syso b/resource_windows_386.syso index e2465ab7f33c557bc7d63a507774f2f090ab5b1a..32d4f33992fa93a1aa2f712099e56a8f960b5708 100644 GIT binary patch delta 75 zcmX@YdWMzLhmny11h!4&l;$j8VqmZVa@I`LjpmgAa$rDW;?!uyiisbU8AB$EGDb7| UFbGX9W>lSgg;8kpB1R=f01@F2kN^Mx delta 81 zcmX@ZdW4nJhmny11lCRDl;+G}VqmZVa+XZgjph{qa$rDU;?!uyf{7oM8GR;;GDb7I aFz`(-W>n>LVklrxU~p&1-@KAhnGpb0d=F~? diff --git a/resource_windows_amd64.syso b/resource_windows_amd64.syso index 5b7261b22f858d7bb3bff0a5f8b2133f5256832b..baf17b6ab8604223c0e84a23aa446a48a0dd0e94 100644 GIT binary patch delta 75 zcmX@YdWMxVrHzpR1h!4&l;$j8VqmZVa@I`LjpmgAa$rDW;?!uyiisbU8AB$EGDb7| UFbGX9W>lSgg;8kpB1R=f06a<%EdT%j delta 81 zcmX@ZdW4lTrHzpR1lCRDl;+G}VqmZVa+XZgjph{qa$rDU;?!uyf{7oM8GR;;GDb7I aFz`(-W>n>LVklrxU~p&1-@KAhnGpbklMo01 diff --git a/resource_windows_arm.syso b/resource_windows_arm.syso index 6e58f6ea9b98ea395ed848a0762577ead25cbcfe..8bfb9c46fc43248176dd17f97d169e325e1a7c66 100644 GIT binary patch delta 75 zcmX@YdWMzr2qPl{2yC0kDa~2H#K2$!LJ%&0o~3Zu~GMT|;}05Y)<2mk;8 delta 81 zcmX@ZdW4np2qPl{2&|jPDb1O|#K2$!K2yC0kDa~2H#K2$!LJ%&0o~3Zu~GMT|;}07d=~Q2+n{ delta 81 zcmX@ZdW4lTWfda>2&|jPDb1O|#K2$!K Date: Mon, 30 Mar 2026 19:52:28 +0700 Subject: [PATCH 18/18] refactor(infrastructure): introduce component registry pattern Implement unified infrastructure component management with automatic registration. Each manager (Kafka, MinIO, HTTP) now implements InfrastructureComponent interface with Name(), Close(), and GetStatus() methods. Move external services configuration under monitoring section for better organization. Update HTTP manager's GetStatus to return structured map instead of slice. Add comprehensive documentation for registry pattern and service helper utilities. --- config.yaml | 13 +- docs_wiki/blueprint/blueprint.txt | 101 +++++++ internal/monitoring/handlers.go | 8 +- internal/server/server.go | 67 ++++- internal/services/modules/grafana_service.go | 12 +- internal/services/modules/mongodb_service.go | 12 +- .../services/modules/multi_tenant_service.go | 12 +- internal/services/modules/tasks_service.go | 13 +- pkg/infrastructure/async_init.go | 247 ++---------------- pkg/infrastructure/component.go | 21 ++ pkg/infrastructure/cron_manager.go | 35 +++ pkg/infrastructure/grafana.go | 14 + pkg/infrastructure/http_monitor.go | 24 +- pkg/infrastructure/kafka.go | 14 + pkg/infrastructure/minio.go | 15 ++ pkg/infrastructure/mongo.go | 31 ++- pkg/infrastructure/postgres.go | 32 ++- pkg/infrastructure/redis.go | 15 ++ pkg/infrastructure/registry.go | 96 +++++++ pkg/infrastructure/system_monitor.go | 25 ++ pkg/registry/service_helper.go | 109 ++++++++ scripts/service/service.go | 65 +++-- 22 files changed, 698 insertions(+), 283 deletions(-) create mode 100644 pkg/infrastructure/component.go create mode 100644 pkg/infrastructure/registry.go create mode 100644 pkg/registry/service_helper.go diff --git a/config.yaml b/config.yaml index dd159a9..d09c100 100644 --- a/config.yaml +++ b/config.yaml @@ -89,6 +89,12 @@ monitoring: subtitle: "Monitoring Dashboard" max_photo_size_mb: 2 upload_dir: "web/monitoring/uploads" + external: + services: + - name: "Google" + url: "https://google.com" + - name: "Local API" + url: "http://localhost:8080/health" minio: enabled: true @@ -98,13 +104,6 @@ minio: use_ssl: false bucket_name: "main" -external: - services: - - name: "Google" - url: "https://google.com" - - name: "Local API" - url: "http://localhost:8080/health" - cron: enabled: true jobs: diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index f8832cb..a703166 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -584,6 +584,107 @@ type GrafanaService struct { The application implements a comprehensive async infrastructure system that ensures all database operations, caching, message queuing, and file operations run asynchronously to avoid blocking the main application thread. This implementation uses Go's goroutines, channels, and worker pools to provide non-blocking operations while maintaining thread safety. +### 8.1.1 Infrastructure Component Registry + +The application uses a dynamic infrastructure component registry that provides a unified interface for managing all infrastructure components. + +**Core Interface:** +```go +type InfrastructureComponent interface { + Name() string + Close() error + GetStatus() map[string]interface{} +} +``` + +**Component Registry:** +```go +type ComponentRegistry struct { + components map[string]InfrastructureComponent + factories map[string]ComponentFactory + mu sync.RWMutex +} +``` + +**Auto-Registration Pattern:** +Each infrastructure manager registers itself via `init()` function: +```go +func init() { + RegisterComponent("redis", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Redis.Enabled { + return nil, nil + } + return NewRedisClient(cfg.Redis) + }) +} +``` + +**Supported Components:** +- `redis` - Redis cache manager +- `kafka` - Kafka message queue manager +- `postgres` - PostgreSQL connection manager (single or multi-tenant) +- `mongo` - MongoDB connection manager (single or multi-tenant) +- `minio` - MinIO object storage manager +- `grafana` - Grafana monitoring dashboard manager +- `cron` - Cron job scheduler manager +- `system` - System monitoring manager +- `http` - HTTP external service monitor + +### 8.1.2 Service Helper Pattern + +The application provides a `ServiceHelper` utility that simplifies dependency resolution for services. + +**Service Helper:** +```go +type ServiceHelper struct { + config *config.Config + logger *logger.Logger + deps *Dependencies +} + +func NewServiceHelper(config *config.Config, logger *logger.Logger, deps *Dependencies) *ServiceHelper +``` + +**Type-Safe Dependency Getters:** +```go +func (h *ServiceHelper) GetRedis() (*infrastructure.RedisManager, bool) +func (h *ServiceHelper) GetKafka() (*infrastructure.KafkaManager, bool) +func (h *ServiceHelper) GetPostgres() (*infrastructure.PostgresManager, bool) +func (h *ServiceHelper) GetPostgresConnection() (*infrastructure.PostgresConnectionManager, bool) +func (h *ServiceHelper) GetMongo() (*infrastructure.MongoManager, bool) +func (h *ServiceHelper) GetMongoConnection() (*infrastructure.MongoConnectionManager, bool) +func (h *ServiceHelper) GetGrafana() (*infrastructure.GrafanaManager, bool) +func (h *ServiceHelper) GetCron() (*infrastructure.CronManager, bool) +func (h *ServiceHelper) GetMinIO() (*infrastructure.MinIOManager, bool) +``` + +**Service Registration Pattern:** +```go +func init() { + registry.RegisterService("service_name", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("service_name") { + return nil + } + + dependencyManager, ok := helper.GetDependencyName() + if !helper.RequireDependency("DependencyName", ok) { + return nil + } + + return NewServiceName(dependencyManager, true, logger) + }) +} +``` + +**Benefits:** +- **Type Safety**: Getters return typed managers with explicit errors +- **DRY Principle**: No duplicated nil-checking logic +- **Consistency**: All services follow the same dependency resolution pattern +- **Better Error Messages**: Clear error messages when dependencies unavailable +- **Maintainability**: Single place to change dependency logic + ### 8.2 Simplified Service Registration The service registration system has been completely simplified to make adding new services straightforward: diff --git a/internal/monitoring/handlers.go b/internal/monitoring/handlers.go index cd11783..4184a37 100644 --- a/internal/monitoring/handlers.go +++ b/internal/monitoring/handlers.go @@ -169,11 +169,11 @@ func (h *Handler) getStatus(c echo.Context) error { // Handle both single and multiple PostgreSQL connections if h.postgresConnectionManager != nil || (h.config.PostgresMultiConfig.Enabled && len(h.config.PostgresMultiConfig.Connections) > 0) { // For multiple connections, format the status for frontend compatibility - var pgStatus map[string]map[string]interface{} + var pgStatus map[string]interface{} if h.postgresConnectionManager != nil { pgStatus = h.postgresConnectionManager.GetStatus() } else { - pgStatus = make(map[string]map[string]interface{}) + pgStatus = make(map[string]interface{}) } var connectionStatuses = make(map[string]interface{}) @@ -217,11 +217,11 @@ func (h *Handler) getStatus(c echo.Context) error { // Handle both single and multiple MongoDB connections if h.mongoConnectionManager != nil || (h.config.MongoMultiConfig.Enabled && len(h.config.MongoMultiConfig.Connections) > 0) { // For multiple connections, format the status for frontend compatibility - var mongoStatus map[string]map[string]interface{} + var mongoStatus map[string]interface{} if h.mongoConnectionManager != nil { mongoStatus = h.mongoConnectionManager.GetStatus() } else { - mongoStatus = make(map[string]map[string]interface{}) + mongoStatus = make(map[string]interface{}) } var connectionStatuses = make(map[string]interface{}) diff --git a/internal/server/server.go b/internal/server/server.go index ff91da1..461c054 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -77,22 +77,67 @@ func (s *Server) Start() error { s.infraInitManager = infrastructure.NewInfraInitManager(s.logger) s.logger.Info("Starting async infrastructure initialization...") - redisManager, kafkaManager, minIOManager, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager := - s.infraInitManager.StartAsyncInitialization(s.config, s.logger) + componentRegistry := s.infraInitManager.StartAsyncInitialization(s.config, s.logger) + + // Get components from registry + redisManager, _ := componentRegistry.Get("redis") + kafkaManager, _ := componentRegistry.Get("kafka") + minioManager, _ := componentRegistry.Get("minio") + postgresManager, _ := componentRegistry.Get("postgres") + mongoManager, _ := componentRegistry.Get("mongo") + grafanaManager, _ := componentRegistry.Get("grafana") + cronManager, _ := componentRegistry.Get("cron") + + // Type assert to get the concrete types + var redisMgr *infrastructure.RedisManager + var kafkaMgr *infrastructure.KafkaManager + var minioMgr *infrastructure.MinIOManager + var postgresConnMgr *infrastructure.PostgresConnectionManager + var mongoConnMgr *infrastructure.MongoConnectionManager + var grafanaMgr *infrastructure.GrafanaManager + var cronMgr *infrastructure.CronManager + + if rm, ok := redisManager.(*infrastructure.RedisManager); ok { + redisMgr = rm + } + if km, ok := kafkaManager.(*infrastructure.KafkaManager); ok { + kafkaMgr = km + } + if mm, ok := minioManager.(*infrastructure.MinIOManager); ok { + minioMgr = mm + } + if pm, ok := postgresManager.(*infrastructure.PostgresConnectionManager); ok { + postgresConnMgr = pm + } else if _, ok := postgresManager.(*infrastructure.PostgresManager); ok { + // Handle single connection case + s.logger.Info("PostgreSQL single connection manager detected") + } + if mm, ok := mongoManager.(*infrastructure.MongoConnectionManager); ok { + mongoConnMgr = mm + } else if _, ok := mongoManager.(*infrastructure.MongoManager); ok { + // Handle single connection case + s.logger.Info("MongoDB single connection manager detected") + } + if gm, ok := grafanaManager.(*infrastructure.GrafanaManager); ok { + grafanaMgr = gm + } + if cm, ok := cronManager.(*infrastructure.CronManager); ok { + cronMgr = cm + } s.dependencies = registry.NewDependencies( - redisManager, - kafkaManager, + redisMgr, + kafkaMgr, nil, - postgresConnectionManager, + postgresConnMgr, nil, - mongoConnectionManager, - grafanaManager, - cronManager, - minIOManager, + mongoConnMgr, + grafanaMgr, + cronMgr, + minioMgr, ) - s.setConnectionDefaults(postgresConnectionManager, mongoConnectionManager) + s.setConnectionDefaults(postgresConnMgr, mongoConnMgr) s.logger.Info("Initializing Middleware...") middleware.InitMiddlewares(s.echo, middleware.Config{ @@ -124,7 +169,7 @@ func (s *Server) Start() error { if s.config.Monitoring.Enabled { servicesList := s.buildServicesList(serviceRegistry) - go monitoring.Start(s.config.Monitoring, s.config, s, s.broadcaster, redisManager, s.dependencies.PostgresManager, postgresConnectionManager, s.dependencies.MongoManager, mongoConnectionManager, kafkaManager, cronManager, servicesList, s.logger) + go monitoring.Start(s.config.Monitoring, s.config, s, s.broadcaster, redisMgr, s.dependencies.PostgresManager, postgresConnMgr, s.dependencies.MongoManager, mongoConnMgr, kafkaMgr, cronMgr, servicesList, s.logger) s.logger.Info("Monitoring interface started", "port", s.config.Monitoring.Port, "services_count", len(servicesList)) } diff --git a/internal/services/modules/grafana_service.go b/internal/services/modules/grafana_service.go index 4e797a3..914c551 100644 --- a/internal/services/modules/grafana_service.go +++ b/internal/services/modules/grafana_service.go @@ -290,13 +290,17 @@ func (s *GrafanaService) getHealth(c echo.Context) error { // Auto-registration function - called when package is imported func init() { registry.RegisterService("grafana_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("grafana_service") { + helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("grafana_service") { return nil } - if deps == nil || deps.GrafanaManager == nil { - logger.Warn("Grafana manager not available, skipping Grafana Service") + + grafanaManager, ok := helper.GetGrafana() + if !helper.RequireDependency("GrafanaManager", ok) { return nil } - return NewGrafanaService(deps.GrafanaManager, true, logger) + + return NewGrafanaService(grafanaManager, true, logger) }) } diff --git a/internal/services/modules/mongodb_service.go b/internal/services/modules/mongodb_service.go index 0848c24..e1b933c 100644 --- a/internal/services/modules/mongodb_service.go +++ b/internal/services/modules/mongodb_service.go @@ -469,13 +469,17 @@ func (s *MongoDBService) getProductAnalytics(c echo.Context) error { // Auto-registration function - called when package is imported func init() { registry.RegisterService("mongodb_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("mongodb_service") { + helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("mongodb_service") { return nil } - if deps == nil || deps.MongoConnectionManager == nil { - logger.Warn("MongoDB connections not available, skipping MongoDB Service") + + mongoConnectionManager, ok := helper.GetMongoConnection() + if !helper.RequireDependency("MongoConnectionManager", ok) { return nil } - return NewMongoDBService(deps.MongoConnectionManager, true, logger) + + return NewMongoDBService(mongoConnectionManager, true, logger) }) } diff --git a/internal/services/modules/multi_tenant_service.go b/internal/services/modules/multi_tenant_service.go index 490fec7..54ea913 100644 --- a/internal/services/modules/multi_tenant_service.go +++ b/internal/services/modules/multi_tenant_service.go @@ -302,13 +302,17 @@ func (s *MultiTenantService) deleteOrder(c echo.Context) error { // Auto-registration function - called when package is imported func init() { registry.RegisterService("multi_tenant_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("multi_tenant_service") { + helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("multi_tenant_service") { return nil } - if deps == nil || deps.PostgresConnectionManager == nil { - logger.Warn("PostgreSQL connections not available, skipping Multi-Tenant Service") + + postgresConnectionManager, ok := helper.GetPostgresConnection() + if !helper.RequireDependency("PostgresConnectionManager", ok) { return nil } - return NewMultiTenantService(deps.PostgresConnectionManager, true, logger) + + return NewMultiTenantService(postgresConnectionManager, true, logger) }) } diff --git a/internal/services/modules/tasks_service.go b/internal/services/modules/tasks_service.go index f944ac1..be8e136 100644 --- a/internal/services/modules/tasks_service.go +++ b/internal/services/modules/tasks_service.go @@ -182,6 +182,17 @@ func (s *TasksService) deleteTask(c echo.Context) error { // Auto-registration function - called when package is imported func init() { registry.RegisterService("tasks_service", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - return NewTasksService(deps.PostgresManager, config.Services.IsEnabled("tasks_service"), logger) + helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("tasks_service") { + return nil + } + + postgresManager, ok := helper.GetPostgres() + if !helper.RequireDependency("PostgresManager", ok) { + return nil + } + + return NewTasksService(postgresManager, true, logger) }) } diff --git a/pkg/infrastructure/async_init.go b/pkg/infrastructure/async_init.go index fdbfd83..28c8ab9 100644 --- a/pkg/infrastructure/async_init.go +++ b/pkg/infrastructure/async_init.go @@ -36,248 +36,43 @@ func NewInfraInitManager(logger *logger.Logger) *InfraInitManager { } // StartAsyncInitialization begins asynchronous initialization of all infrastructure components -func (im *InfraInitManager) StartAsyncInitialization(cfg *config.Config, logger *logger.Logger) ( - *RedisManager, - *KafkaManager, - *MinIOManager, - *PostgresConnectionManager, - *MongoConnectionManager, - *GrafanaManager, - *CronManager, -) { - var ( - redisManager *RedisManager - kafkaManager *KafkaManager - minioManager *MinIOManager - postgresConnectionManager *PostgresConnectionManager - mongoConnectionManager *MongoConnectionManager - grafanaManager *GrafanaManager - cronManager *CronManager - ) +func (im *InfraInitManager) StartAsyncInitialization(cfg *config.Config, logger *logger.Logger) *ComponentRegistry { + registry := GetGlobalRegistry() - // Initialize components synchronously to avoid race conditions - // Only the connection testing/health checks are done asynchronously - - // Redis - if cfg.Redis.Enabled { - rdb, err := NewRedisClient(cfg.Redis) - if err != nil { - logger.Error("Failed to initialize Redis", err) - } else { - redisManager = rdb - logger.Info("Redis initialized") - } - } - - // Kafka - if cfg.Kafka.Enabled { - km, err := NewKafkaManager(cfg.Kafka, logger) - if err != nil { - logger.Error("Failed to initialize Kafka", err) - } else { - kafkaManager = km - logger.Info("Kafka initialized") - } - } - - // MinIO - if cfg.Monitoring.MinIO.Endpoint != "" { - minio, err := NewMinIOManager(cfg.Monitoring.MinIO) - if err != nil { - logger.Error("Failed to initialize MinIO", err) - } else { - minioManager = minio - logger.Info("MinIO initialized") - } - } - - // PostgreSQL - if cfg.Postgres.Enabled || cfg.PostgresMultiConfig.Enabled { - if cfg.PostgresMultiConfig.Enabled && len(cfg.PostgresMultiConfig.Connections) > 0 { - connManager, err := NewPostgresConnectionManager(cfg.PostgresMultiConfig) - if err != nil { - logger.Error("Failed to initialize PostgreSQL connections", err) - } else { - postgresConnectionManager = connManager - logger.Info("PostgreSQL connections initialized") - } - } else if cfg.Postgres.Enabled { - connManager, err := NewPostgresConnectionManager(config.PostgresMultiConfig{ - Enabled: true, - Connections: []config.PostgresConnectionConfig{ - { - Name: "default", - Enabled: true, - Host: cfg.Postgres.Host, - Port: cfg.Postgres.Port, - User: cfg.Postgres.User, - Password: cfg.Postgres.Password, - DBName: cfg.Postgres.DBName, - SSLMode: cfg.Postgres.SSLMode, - }, - }, - }) - if err != nil { - logger.Error("Failed to initialize PostgreSQL", err) - } else { - postgresConnectionManager = connManager - logger.Info("PostgreSQL initialized (single connection)") - } - } - } - - // MongoDB - if cfg.Mongo.Enabled || cfg.MongoMultiConfig.Enabled { - if cfg.MongoMultiConfig.Enabled && len(cfg.MongoMultiConfig.Connections) > 0 { - connManager, err := NewMongoConnectionManager(cfg.MongoMultiConfig, logger) - if err != nil { - logger.Error("Failed to initialize MongoDB connections", err) - } else { - mongoConnectionManager = connManager - logger.Info("MongoDB connections initialized") - } - } else if cfg.Mongo.Enabled { - connManager, err := NewMongoConnectionManager(config.MongoMultiConfig{ - Enabled: true, - Connections: []config.MongoConnectionConfig{ - { - Name: "default", - Enabled: true, - URI: cfg.Mongo.URI, - Database: cfg.Mongo.Database, - }, - }, - }, logger) - if err != nil { - logger.Error("Failed to initialize MongoDB", err) - } else { - mongoConnectionManager = connManager - logger.Info("MongoDB initialized (single connection)") - } - } - } - - // Grafana - if cfg.Grafana.Enabled { - gm, err := NewGrafanaManager(cfg.Grafana, logger) - if err != nil { - logger.Error("Failed to initialize Grafana", err) - } else { - grafanaManager = gm - logger.Info("Grafana initialized") - } - } - - // Cron (initialize synchronously with jobs) - if cfg.Cron.Enabled { - cronManager = NewCronManager() - - // Add cron jobs synchronously - for name, schedule := range cfg.Cron.Jobs { - jobName := name - jobSchedule := schedule - _, err := cronManager.AddAsyncJob(jobName, jobSchedule, func() { - logger.Info("Executing Cron Job (Async)", "job", jobName) - }) - if err != nil { - logger.Error("Failed to schedule cron job", err, "job", jobName) - } else { - logger.Info("Cron job scheduled", "job", jobName, "schedule", jobSchedule) - } - } - - cronManager.Start() - logger.Info("Cron jobs initialized with async execution") + // Initialize all registered components + if err := registry.Initialize(cfg, logger); err != nil { + logger.Error("Failed to initialize infrastructure components", err) } // Start async health checks and monitoring (non-blocking) - components := []struct { - name string - check func() - }{ - { - name: "redis", - check: func() { - if redisManager != nil { - // Redis manager already performs health checks in GetStatus() - status := redisManager.GetStatus() - if connected, ok := status["connected"].(bool); ok && connected { - logger.Debug("Redis health check passed") - } else { - logger.Warn("Redis health check failed") - } - } - }, - }, - { - name: "kafka", - check: func() { - if kafkaManager != nil { - // Kafka manager handles its own async health checks - logger.Debug("Kafka health monitoring active") - } - }, - }, - { - name: "minio", - check: func() { - if minioManager != nil { - // MinIO async health checks if needed - logger.Debug("MinIO health monitoring active") - } - }, - }, - { - name: "postgres", - check: func() { - if postgresConnectionManager != nil { - // Connection manager handles health checks internally - logger.Debug("PostgreSQL health monitoring active") - } - }, - }, - { - name: "mongodb", - check: func() { - if mongoConnectionManager != nil { - // Connection manager handles health checks internally - logger.Debug("MongoDB health monitoring active") - } - }, - }, - { - name: "cron", - check: func() { - if cronManager != nil { - // Cron manager is already initialized and running - logger.Debug("Cron jobs active", "count", len(cronManager.GetJobs())) - } - }, - }, - } - - // Start health monitoring asynchronously - for _, comp := range components { - comp := comp // Capture loop variable - go func(name string, checkFn func()) { + components := registry.GetAll() + for name, component := range components { + name := name + component := component + go func(compName string, comp InfrastructureComponent) { // Update status to initialized - im.updateStatus(name, &InfraInitStatus{ - Name: name, + im.updateStatus(compName, &InfraInitStatus{ + Name: compName, Initialized: true, StartTime: time.Now(), Duration: time.Since(time.Now()), // Minimal duration Progress: 1.0, }) - // Perform ongoing health checks - checkFn() - }(comp.name, comp.check) + // Perform health check + status := comp.GetStatus() + if connected, ok := status["connected"].(bool); ok && connected { + logger.Debug(compName + " health check passed") + } else { + logger.Warn(compName + " health check failed or not applicable") + } + }(name, component) } // Signal that all synchronous initialization is complete close(im.doneChan) - return redisManager, kafkaManager, minioManager, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager + return registry } // updateStatus updates the initialization status of a component diff --git a/pkg/infrastructure/component.go b/pkg/infrastructure/component.go new file mode 100644 index 0000000..5c11b67 --- /dev/null +++ b/pkg/infrastructure/component.go @@ -0,0 +1,21 @@ +package infrastructure + +import ( +"stackyard/config" +"stackyard/pkg/logger" +) + +// InfrastructureComponent defines the interface that all infrastructure managers must implement +type InfrastructureComponent interface { + // Name returns the display name of the component + Name() string + + // Close gracefully shuts down the component + Close() error + + // GetStatus returns the current status of the component + GetStatus() map[string]interface{} +} + +// ComponentFactory is a function that creates an infrastructure component +type ComponentFactory func(cfg *config.Config, logger *logger.Logger) (InfrastructureComponent, error) diff --git a/pkg/infrastructure/cron_manager.go b/pkg/infrastructure/cron_manager.go index ad6459e..5be88ca 100644 --- a/pkg/infrastructure/cron_manager.go +++ b/pkg/infrastructure/cron_manager.go @@ -2,6 +2,8 @@ package infrastructure import ( "fmt" + "stackyard/config" + "stackyard/pkg/logger" "sync" "time" @@ -24,6 +26,11 @@ type CronManager struct { pool *WorkerPool // Worker pool for async job execution } +// Name returns the display name of the component +func (c *CronManager) Name() string { + return "Cron Scheduler" +} + func NewCronManager() *CronManager { // Initialize worker pool for async job execution pool := NewWorkerPool(5) // Small pool for cron jobs @@ -246,3 +253,31 @@ func (c *CronManager) Close() error { } return nil } + +func init() { + RegisterComponent("cron", func(cfg *config.Config, l *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Cron.Enabled { + return nil, nil + } + cronManager := NewCronManager() + + // Add configured cron jobs + for name, schedule := range cfg.Cron.Jobs { + jobName := name + jobSchedule := schedule + _, err := cronManager.AddAsyncJob(jobName, jobSchedule, func() { + l.Info("Executing Cron Job", "job", jobName) + }) + if err != nil { + l.Error("Failed to schedule cron job", err, "job", jobName) + } else { + l.Info("Cron job scheduled", "job", jobName, "schedule", jobSchedule) + } + } + + cronManager.Start() + l.Info("Cron jobs initialized with async execution") + + return cronManager, nil + }) +} diff --git a/pkg/infrastructure/grafana.go b/pkg/infrastructure/grafana.go index 822e2e5..23398cf 100644 --- a/pkg/infrastructure/grafana.go +++ b/pkg/infrastructure/grafana.go @@ -173,6 +173,11 @@ type GrafanaAnnotation struct { Data map[string]interface{} `json:"data,omitempty"` } +// Name returns the display name of the component +func (gm *GrafanaManager) Name() string { + return "Grafana" +} + // NewGrafanaManager creates a new Grafana manager func NewGrafanaManager(cfg config.GrafanaConfig, logger *logger.Logger) (*GrafanaManager, error) { if !cfg.Enabled { @@ -662,3 +667,12 @@ func (gm *GrafanaManager) Close() error { } return nil } + +func init() { + RegisterComponent("grafana", func(cfg *config.Config, l *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Grafana.Enabled { + return nil, nil + } + return NewGrafanaManager(cfg.Grafana, l) + }) +} diff --git a/pkg/infrastructure/http_monitor.go b/pkg/infrastructure/http_monitor.go index 6efe130..1943d6f 100644 --- a/pkg/infrastructure/http_monitor.go +++ b/pkg/infrastructure/http_monitor.go @@ -3,6 +3,7 @@ package infrastructure import ( "net/http" "stackyard/config" + "stackyard/pkg/logger" "time" ) @@ -11,6 +12,11 @@ type HttpManager struct { Client *http.Client } +// Name returns the display name of the component +func (h *HttpManager) Name() string { + return "HTTP Monitor" +} + func NewHttpManager(cfg config.ExternalConfig) *HttpManager { return &HttpManager{ Services: cfg.Services, @@ -20,7 +26,7 @@ func NewHttpManager(cfg config.ExternalConfig) *HttpManager { } } -func (h *HttpManager) GetStatus() []map[string]interface{} { +func (h *HttpManager) GetStatus() map[string]interface{} { results := []map[string]interface{}{} for _, svc := range h.Services { @@ -49,5 +55,19 @@ func (h *HttpManager) GetStatus() []map[string]interface{} { }) } - return results + return map[string]interface{}{ + "services": results, + } +} + +// Close closes the HTTP monitor client +func (h *HttpManager) Close() error { + h.Client.CloseIdleConnections() + return nil +} + +func init() { + RegisterComponent("http", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + return NewHttpManager(cfg.Monitoring.External), nil + }) } diff --git a/pkg/infrastructure/kafka.go b/pkg/infrastructure/kafka.go index 8a78a98..fba69d4 100644 --- a/pkg/infrastructure/kafka.go +++ b/pkg/infrastructure/kafka.go @@ -17,6 +17,11 @@ type KafkaManager struct { Pool *WorkerPool // Async worker pool } +// Name returns the display name of the component +func (k *KafkaManager) Name() string { + return "Kafka" +} + func NewKafkaManager(cfg config.KafkaConfig, logger *logger.Logger) (*KafkaManager, error) { if !cfg.Enabled { return nil, nil @@ -211,3 +216,12 @@ func (k *KafkaManager) Close() error { } return nil } + +func init() { + RegisterComponent("kafka", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Kafka.Enabled { + return nil, nil + } + return NewKafkaManager(cfg.Kafka, log) + }) +} diff --git a/pkg/infrastructure/minio.go b/pkg/infrastructure/minio.go index 36862a0..466cbdb 100644 --- a/pkg/infrastructure/minio.go +++ b/pkg/infrastructure/minio.go @@ -4,6 +4,7 @@ import ( "context" "io" "stackyard/config" + "stackyard/pkg/logger" "time" "github.com/minio/minio-go/v7" @@ -17,6 +18,11 @@ type MinIOManager struct { Pool *WorkerPool // Async worker pool } +// Name returns the display name of the component +func (m *MinIOManager) Name() string { + return "MinIO" +} + func NewMinIOManager(cfg config.MinIOConfig) (*MinIOManager, error) { if !cfg.Enabled || cfg.Endpoint == "" { return &MinIOManager{Connected: false}, nil @@ -225,3 +231,12 @@ func (m *MinIOManager) Close() error { } return nil } + +func init() { + RegisterComponent("minio", func(cfg *config.Config, l *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Monitoring.MinIO.Enabled { + return nil, nil + } + return NewMinIOManager(cfg.Monitoring.MinIO) + }) +} diff --git a/pkg/infrastructure/mongo.go b/pkg/infrastructure/mongo.go index 92dd0b2..8e8af6d 100644 --- a/pkg/infrastructure/mongo.go +++ b/pkg/infrastructure/mongo.go @@ -22,11 +22,21 @@ type MongoManager struct { Pool *WorkerPool // Async worker pool } +// Name returns the display name of the component +func (m *MongoManager) Name() string { + return "MongoDB" +} + type MongoConnectionManager struct { connections map[string]*MongoManager mu sync.RWMutex } +// Name returns the display name of the component +func (m *MongoConnectionManager) Name() string { + return "MongoDB Connection Manager" +} + func NewMongoDB(cfg config.MongoConfig, l *logger.Logger) (*MongoManager, error) { if !cfg.Enabled { return nil, nil @@ -153,10 +163,10 @@ func (m *MongoConnectionManager) GetAllConnections() map[string]*MongoManager { } // GetStatus returns status for all connections -func (m *MongoConnectionManager) GetStatus() map[string]map[string]interface{} { +func (m *MongoConnectionManager) GetStatus() map[string]interface{} { m.mu.RLock() defer m.mu.RUnlock() - status := make(map[string]map[string]interface{}) + status := make(map[string]interface{}) for name, conn := range m.connections { status[name] = conn.GetStatus() @@ -165,6 +175,11 @@ func (m *MongoConnectionManager) GetStatus() map[string]map[string]interface{} { return status } +// Close closes all connections (implements InfrastructureComponent) +func (m *MongoConnectionManager) Close() error { + return m.CloseAll() +} + // CloseAll closes all connections func (m *MongoConnectionManager) CloseAll() error { m.mu.Lock() @@ -553,3 +568,15 @@ func (m *MongoManager) Close() error { } return nil } + +func init() { + RegisterComponent("mongo", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Mongo.Enabled && !cfg.MongoMultiConfig.Enabled { + return nil, nil + } + if cfg.MongoMultiConfig.Enabled { + return NewMongoConnectionManager(cfg.MongoMultiConfig, log) + } + return NewMongoDB(cfg.Mongo, log) + }) +} diff --git a/pkg/infrastructure/postgres.go b/pkg/infrastructure/postgres.go index c723efe..1745441 100644 --- a/pkg/infrastructure/postgres.go +++ b/pkg/infrastructure/postgres.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "stackyard/config" + "stackyard/pkg/logger" "sync" _ "github.com/jackc/pgx/v5/stdlib" @@ -23,6 +24,16 @@ type PostgresConnectionManager struct { mu sync.RWMutex } +// Name returns the display name of the component +func (p *PostgresManager) Name() string { + return "PostgreSQL" +} + +// Name returns the display name of the component +func (m *PostgresConnectionManager) Name() string { + return "PostgreSQL Connection Manager" +} + func NewPostgresDB(cfg config.PostgresConfig) (*PostgresManager, error) { if !cfg.Enabled { return nil, nil @@ -131,10 +142,10 @@ func (m *PostgresConnectionManager) GetAllConnections() map[string]*PostgresMana } // GetStatus returns status for all connections -func (m *PostgresConnectionManager) GetStatus() map[string]map[string]interface{} { +func (m *PostgresConnectionManager) GetStatus() map[string]interface{} { m.mu.RLock() defer m.mu.RUnlock() - status := make(map[string]map[string]interface{}) + status := make(map[string]interface{}) for name, conn := range m.connections { status[name] = conn.GetStatus() @@ -143,6 +154,11 @@ func (m *PostgresConnectionManager) GetStatus() map[string]map[string]interface{ return status } +// Close closes all connections (implements InfrastructureComponent) +func (m *PostgresConnectionManager) Close() error { + return m.CloseAll() +} + // CloseAll closes all connections func (m *PostgresConnectionManager) CloseAll() error { m.mu.Lock() @@ -528,3 +544,15 @@ func (p *PostgresManager) Close() error { } return nil } + +func init() { + RegisterComponent("postgres", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Postgres.Enabled && !cfg.PostgresMultiConfig.Enabled { + return nil, nil + } + if cfg.PostgresMultiConfig.Enabled { + return NewPostgresConnectionManager(cfg.PostgresMultiConfig) + } + return NewPostgresDB(cfg.Postgres) + }) +} diff --git a/pkg/infrastructure/redis.go b/pkg/infrastructure/redis.go index 809a882..c63efc7 100644 --- a/pkg/infrastructure/redis.go +++ b/pkg/infrastructure/redis.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "stackyard/config" + "stackyard/pkg/logger" "time" "github.com/redis/go-redis/v9" @@ -14,6 +15,11 @@ type RedisManager struct { Pool *WorkerPool // Async worker pool } +// Name returns the display name of the component +func (r *RedisManager) Name() string { + return "Redis" +} + func NewRedisClient(cfg config.RedisConfig) (*RedisManager, error) { if !cfg.Enabled { return nil, nil @@ -232,3 +238,12 @@ func (r *RedisManager) Close() error { } return nil } + +func init() { + RegisterComponent("redis", func(cfg *config.Config, log *logger.Logger) (InfrastructureComponent, error) { + if !cfg.Redis.Enabled { + return nil, nil + } + return NewRedisClient(cfg.Redis) + }) +} diff --git a/pkg/infrastructure/registry.go b/pkg/infrastructure/registry.go new file mode 100644 index 0000000..b4fc7ac --- /dev/null +++ b/pkg/infrastructure/registry.go @@ -0,0 +1,96 @@ +package infrastructure + +import ( + "fmt" + "stackyard/config" + "stackyard/pkg/logger" + "sync" +) + +// ComponentRegistry manages all infrastructure components +type ComponentRegistry struct { + components map[string]InfrastructureComponent + factories map[string]ComponentFactory + mu sync.RWMutex +} + +// Global registry instance +var ( + globalRegistry *ComponentRegistry + registryOnce sync.Once +) + +// GetGlobalRegistry returns the singleton registry instance +func GetGlobalRegistry() *ComponentRegistry { + registryOnce.Do(func() { + globalRegistry = &ComponentRegistry{ + components: make(map[string]InfrastructureComponent), + factories: make(map[string]ComponentFactory), + } + }) + return globalRegistry +} + +// RegisterComponent registers a component factory with the global registry +func RegisterComponent(name string, factory ComponentFactory) { + GetGlobalRegistry().Register(name, factory) +} + +// Register registers a component factory +func (r *ComponentRegistry) Register(name string, factory ComponentFactory) { + r.mu.Lock() + defer r.mu.Unlock() + r.factories[name] = factory +} + +// Initialize initializes all registered components +func (r *ComponentRegistry) Initialize(cfg *config.Config, logger *logger.Logger) error { + r.mu.Lock() + defer r.mu.Unlock() + + for name, factory := range r.factories { + component, err := factory(cfg, logger) + if err != nil { + logger.Error("Failed to initialize "+name, err) + continue + } + if component != nil { + r.components[name] = component + logger.Info(name + " initialized") + } + } + return nil +} + +// Get retrieves a component by name +func (r *ComponentRegistry) Get(name string) (InfrastructureComponent, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + component, exists := r.components[name] + return component, exists +} + +// GetAll returns all components +func (r *ComponentRegistry) GetAll() map[string]InfrastructureComponent { + r.mu.RLock() + defer r.mu.RUnlock() + result := make(map[string]InfrastructureComponent) + for k, v := range r.components { + result[k] = v + } + return result +} + +// CloseAll closes all components +func (r *ComponentRegistry) CloseAll() []error { + r.mu.Lock() + defer r.mu.Unlock() + + var errors []error + for name, component := range r.components { + if err := component.Close(); err != nil { + errors = append(errors, fmt.Errorf("%s: %w", name, err)) + } + } + return errors +} diff --git a/pkg/infrastructure/system_monitor.go b/pkg/infrastructure/system_monitor.go index ac3072c..74c6aa2 100644 --- a/pkg/infrastructure/system_monitor.go +++ b/pkg/infrastructure/system_monitor.go @@ -5,6 +5,8 @@ import ( "net" "os" "runtime" + "stackyard/config" + "stackyard/pkg/logger" "time" "github.com/shirou/gopsutil/v3/cpu" @@ -14,6 +16,11 @@ import ( type SystemManager struct{} +// Name returns the display name of the component +func (s *SystemManager) Name() string { + return "System Monitor" +} + func NewSystemManager() *SystemManager { return &SystemManager{} } @@ -82,3 +89,21 @@ func (s *SystemManager) GetHostInfo() map[string]string { "arch": runtime.GOARCH, } } + +func init() { + RegisterComponent("system", func(cfg *config.Config, l *logger.Logger) (InfrastructureComponent, error) { + return NewSystemManager(), nil + }) +} + +// Close closes the system monitor (no-op for system monitor) +func (s *SystemManager) Close() error { + return nil +} + +// GetStatus returns the current status of the system monitor +func (s *SystemManager) GetStatus() map[string]interface{} { + return map[string]interface{}{ + "active": true, + } +} diff --git a/pkg/registry/service_helper.go b/pkg/registry/service_helper.go new file mode 100644 index 0000000..f5d07b5 --- /dev/null +++ b/pkg/registry/service_helper.go @@ -0,0 +1,109 @@ +package registry + +import ( + "stackyard/config" + "stackyard/pkg/infrastructure" + "stackyard/pkg/logger" +) + +// ServiceHelper helps services with dependency validation +type ServiceHelper struct { + config *config.Config + logger *logger.Logger + deps *Dependencies +} + +// NewServiceHelper creates a new service helper +func NewServiceHelper(config *config.Config, logger *logger.Logger, deps *Dependencies) *ServiceHelper { + return &ServiceHelper{ + config: config, + logger: logger, + deps: deps, + } +} + +// RequireDependency validates dependency is available +func (h *ServiceHelper) RequireDependency(name string, available bool) bool { + if !available { + h.logger.Warn(name + " not available, skipping service") + return false + } + return true +} + +// IsServiceEnabled checks if service is enabled in config +func (h *ServiceHelper) IsServiceEnabled(serviceName string) bool { + return h.config.Services.IsEnabled(serviceName) +} + +// GetRedis returns Redis manager or error if not available +func (h *ServiceHelper) GetRedis() (*infrastructure.RedisManager, bool) { + if h.deps.RedisManager == nil { + return nil, false + } + return h.deps.RedisManager, true +} + +// GetKafka returns Kafka manager or error if not available +func (h *ServiceHelper) GetKafka() (*infrastructure.KafkaManager, bool) { + if h.deps.KafkaManager == nil { + return nil, false + } + return h.deps.KafkaManager, true +} + +// GetPostgres returns PostgreSQL manager (single connection) or error +func (h *ServiceHelper) GetPostgres() (*infrastructure.PostgresManager, bool) { + if h.deps.PostgresManager == nil { + return nil, false + } + return h.deps.PostgresManager, true +} + +// GetPostgresConnection returns PostgreSQL connection manager (multi-tenant) or error +func (h *ServiceHelper) GetPostgresConnection() (*infrastructure.PostgresConnectionManager, bool) { + if h.deps.PostgresConnectionManager == nil { + return nil, false + } + return h.deps.PostgresConnectionManager, true +} + +// GetMongo returns MongoDB manager (single connection) or error +func (h *ServiceHelper) GetMongo() (*infrastructure.MongoManager, bool) { + if h.deps.MongoManager == nil { + return nil, false + } + return h.deps.MongoManager, true +} + +// GetMongoConnection returns MongoDB connection manager (multi-tenant) or error +func (h *ServiceHelper) GetMongoConnection() (*infrastructure.MongoConnectionManager, bool) { + if h.deps.MongoConnectionManager == nil { + return nil, false + } + return h.deps.MongoConnectionManager, true +} + +// GetGrafana returns Grafana manager or error if not available +func (h *ServiceHelper) GetGrafana() (*infrastructure.GrafanaManager, bool) { + if h.deps.GrafanaManager == nil { + return nil, false + } + return h.deps.GrafanaManager, true +} + +// GetCron returns Cron manager or error if not available +func (h *ServiceHelper) GetCron() (*infrastructure.CronManager, bool) { + if h.deps.CronManager == nil { + return nil, false + } + return h.deps.CronManager, true +} + +// GetMinIO returns MinIO manager or error if not available +func (h *ServiceHelper) GetMinIO() (*infrastructure.MinIOManager, bool) { + if h.deps.MinIOManager == nil { + return nil, false + } + return h.deps.MinIOManager, true +} diff --git a/scripts/service/service.go b/scripts/service/service.go index c30f5bc..4d56d40 100644 --- a/scripts/service/service.go +++ b/scripts/service/service.go @@ -550,34 +550,67 @@ func (ctx *ServiceContext) buildInitFunction() string { var dependencyParams strings.Builder if ctx.Config.HasDependencies { - dependencyChecks.WriteString(` if deps == nil { - logger.Warn("Dependencies not available, skipping Service") - return nil - } - + dependencyChecks.WriteString(` helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("` + configKey + `") { + return nil + } + `) for _, dep := range ctx.Config.Dependencies { - dependencyChecks.WriteString(fmt.Sprintf(` if deps.%s == nil { - logger.Warn("%s not available, skipping Service") - return nil - } - -`, dep.Name, dep.Name)) + varName := strings.ToLower(dep.Name[:1]) + dep.Name[1:] + + // Map dependency names to helper method names + var helperMethod string + switch dep.Name { + case "RedisManager": + helperMethod = "GetRedis" + case "KafkaManager": + helperMethod = "GetKafka" + case "PostgresManager": + helperMethod = "GetPostgres" + case "PostgresConnectionManager": + helperMethod = "GetPostgresConnection" + case "MongoManager": + helperMethod = "GetMongo" + case "MongoConnectionManager": + helperMethod = "GetMongoConnection" + case "GrafanaManager": + helperMethod = "GetGrafana" + case "CronManager": + helperMethod = "GetCron" + case "MinIOManager": + helperMethod = "GetMinIO" + default: + helperMethod = "Get" + dep.Name + } + + dependencyChecks.WriteString(fmt.Sprintf(` %s, ok := helper.%s() + if !helper.RequireDependency("%s", ok) { + return nil + } + +`, varName, helperMethod, dep.Name)) - dependencyParams.WriteString(fmt.Sprintf(", deps.%s", dep.Name)) + dependencyParams.WriteString(fmt.Sprintf(", %s", varName)) + } + } else { + dependencyChecks.WriteString(` helper := registry.NewServiceHelper(config, logger, deps) + + if !helper.IsServiceEnabled("` + configKey + `") { + return nil } + +`) } return fmt.Sprintf(`// Auto-registration function - called when package is imported func init() { registry.RegisterService("%s", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { - if !config.Services.IsEnabled("%s") { - return nil - } %s return New%s(true%s, logger) }) -}`, configKey, configKey, dependencyChecks.String(), ctx.Config.ServiceName, dependencyParams.String()) +}`, configKey, dependencyChecks.String(), ctx.Config.ServiceName, dependencyParams.String()) } // generateService generates the service Go file