From 2835fd5fb08775a34bbb5fc25bcfd3a72199ba25 Mon Sep 17 00:00:00 2001 From: yusing Date: Sun, 4 Jan 2026 20:30:58 +0800 Subject: [PATCH 01/51] fix(autocert): ensure extra certificate registration and renewal scheduling Extra providers were not being properly initialized during NewProvider(), causing certificate registration and renewal scheduling to be skipped. - Add ConfigExtra type with idx field for provider indexing - Add MergeExtraConfig() for inheriting main provider settings - Add setupExtraProviders() for recursive extra provider initialization - Refactor NewProvider to return error and call setupExtraProviders() - Add provider-scoped logger with "main" or "extra[N]" name - Add batch operations: ObtainCertIfNotExistsAll(), ObtainCertAll() - Add ForceExpiryAll() with completion tracking via WaitRenewalDone() - Add RenewMode (force/ifNeeded) for controlling renewal behavior - Add PrintCertExpiriesAll() for logging all provider certificate expiries Summary of staged changes: - config.go: Added ConfigExtra type, MergeExtraConfig(), recursive validation with path uniqueness checking - provider.go: Added provider indexing, scoped logger, batch cert operations, force renewal with completion tracking, RenewMode control - setup.go: New file with setupExtraProviders() for proper extra provider initialization - setup_test.go: New tests for extra provider setup - multi_cert_test.go: New tests for multi-certificate functionality - renew.go: Updated to use new provider API with error handling - state.go: Updated to handle NewProvider error return --- internal/api/v1/cert/renew.go | 47 +- internal/autocert/config.go | 229 +++--- internal/autocert/config_test.go | 41 +- internal/autocert/paths.go | 1 - internal/autocert/provider.go | 357 ++++++--- .../autocert/provider_test/custom_test.go | 710 ++++++++++-------- .../provider_test/extra_validation_test.go | 32 - .../autocert/provider_test/multi_cert_test.go | 90 +++ internal/autocert/provider_test/sni_test.go | 93 ++- internal/autocert/setup.go | 97 +-- internal/autocert/setup_test.go | 82 ++ internal/autocert/types/provider.go | 4 +- internal/config/state.go | 18 +- internal/dnsproviders/dummy.go | 2 +- 14 files changed, 1103 insertions(+), 700 deletions(-) delete mode 100644 internal/autocert/provider_test/extra_validation_test.go create mode 100644 internal/autocert/provider_test/multi_cert_test.go create mode 100644 internal/autocert/setup_test.go diff --git a/internal/api/v1/cert/renew.go b/internal/api/v1/cert/renew.go index 816e458a..d81e2562 100644 --- a/internal/api/v1/cert/renew.go +++ b/internal/api/v1/cert/renew.go @@ -9,7 +9,6 @@ import ( "github.com/yusing/godoxy/internal/autocert" "github.com/yusing/godoxy/internal/logging/memlogger" apitypes "github.com/yusing/goutils/apitypes" - gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/http/websocket" ) @@ -40,33 +39,33 @@ func Renew(c *gin.Context) { logs, cancel := memlogger.Events() defer cancel() - done := make(chan struct{}) - go func() { - defer close(done) + // Stream logs until WebSocket connection closes (renewal runs in background) + for { + select { + case <-manager.Context().Done(): + return + case l := <-logs: + if err != nil { + return + } - err = autocert.ObtainCert() - if err != nil { - gperr.LogError("failed to obtain cert", err) - _ = manager.WriteData(websocket.TextMessage, []byte(err.Error()), 10*time.Second) - } else { - log.Info().Msg("cert obtained successfully") + err = manager.WriteData(websocket.TextMessage, l, 10*time.Second) + if err != nil { + return + } + } } }() - for { - select { - case l := <-logs: - if err != nil { - return - } - - err = manager.WriteData(websocket.TextMessage, l, 10*time.Second) - if err != nil { - return - } - case <-done: - return - } + // renewal happens in background + ok := autocert.ForceExpiryAll() + if !ok { + log.Error().Msg("cert renewal already in progress") + time.Sleep(1 * time.Second) // wait for the log above to be sent + return } + log.Info().Msg("cert force renewal requested") + + autocert.WaitRenewalDone(manager.Context()) } diff --git a/internal/autocert/config.go b/internal/autocert/config.go index 43e2aa70..b1f035e3 100644 --- a/internal/autocert/config.go +++ b/internal/autocert/config.go @@ -5,6 +5,7 @@ import ( "crypto/elliptic" "crypto/rand" "crypto/x509" + "fmt" "net/http" "os" "regexp" @@ -19,13 +20,14 @@ import ( strutils "github.com/yusing/goutils/strings" ) +type ConfigExtra Config type Config struct { Email string `json:"email,omitempty"` Domains []string `json:"domains,omitempty"` CertPath string `json:"cert_path,omitempty"` KeyPath string `json:"key_path,omitempty"` - Extra []Config `json:"extra,omitempty"` - ACMEKeyPath string `json:"acme_key_path,omitempty"` + Extra []ConfigExtra `json:"extra,omitempty"` + ACMEKeyPath string `json:"acme_key_path,omitempty"` // shared by all extra providers Provider string `json:"provider,omitempty"` Options map[string]strutils.Redacted `json:"options,omitempty"` @@ -42,15 +44,12 @@ type Config struct { HTTPClient *http.Client `json:"-"` // for tests only challengeProvider challenge.Provider + + idx int // 0: main, 1+: extra[i] } var ( - ErrMissingDomain = gperr.New("missing field 'domains'") - ErrMissingEmail = gperr.New("missing field 'email'") - ErrMissingProvider = gperr.New("missing field 'provider'") - ErrMissingCADirURL = gperr.New("missing field 'ca_dir_url'") - ErrMissingCertPath = gperr.New("missing field 'cert_path'") - ErrMissingKeyPath = gperr.New("missing field 'key_path'") + ErrMissingField = gperr.New("missing field") ErrDuplicatedPath = gperr.New("duplicated path") ErrInvalidDomain = gperr.New("invalid domain") ErrUnknownProvider = gperr.New("unknown provider") @@ -66,95 +65,22 @@ var domainOrWildcardRE = regexp.MustCompile(`^\*?([^.]+\.)+[^.]+$`) // Validate implements the utils.CustomValidator interface. func (cfg *Config) Validate() gperr.Error { - if cfg == nil { - return nil - } + seenPaths := make(map[string]int) // path -> provider idx (0 for main, 1+ for extras) + return cfg.validate(seenPaths) +} +func (cfg *ConfigExtra) Validate() gperr.Error { + return nil // done by main config's validate +} + +func (cfg *ConfigExtra) AsConfig() *Config { + return (*Config)(cfg) +} + +func (cfg *Config) validate(seenPaths map[string]int) gperr.Error { if cfg.Provider == "" { cfg.Provider = ProviderLocal } - - b := gperr.NewBuilder("autocert errors") - if len(cfg.Extra) > 0 { - seenCertPaths := make(map[string]int, len(cfg.Extra)) - seenKeyPaths := make(map[string]int, len(cfg.Extra)) - for i := range cfg.Extra { - if cfg.Extra[i].CertPath == "" { - b.Add(ErrMissingCertPath.Subjectf("extra[%d].cert_path", i)) - } - if cfg.Extra[i].KeyPath == "" { - b.Add(ErrMissingKeyPath.Subjectf("extra[%d].key_path", i)) - } - if cfg.Extra[i].CertPath != "" { - if first, ok := seenCertPaths[cfg.Extra[i].CertPath]; ok { - b.Add(ErrDuplicatedPath.Subjectf("extra[%d].cert_path", i).Withf("first: %d", first)) - } else { - seenCertPaths[cfg.Extra[i].CertPath] = i - } - } - if cfg.Extra[i].KeyPath != "" { - if first, ok := seenKeyPaths[cfg.Extra[i].KeyPath]; ok { - b.Add(ErrDuplicatedPath.Subjectf("extra[%d].key_path", i).Withf("first: %d", first)) - } else { - seenKeyPaths[cfg.Extra[i].KeyPath] = i - } - } - } - } - - if cfg.Provider == ProviderCustom && cfg.CADirURL == "" { - b.Add(ErrMissingCADirURL) - } - - if cfg.Provider != ProviderLocal && cfg.Provider != ProviderPseudo { - if len(cfg.Domains) == 0 { - b.Add(ErrMissingDomain) - } - if cfg.Email == "" { - b.Add(ErrMissingEmail) - } - if cfg.Provider != ProviderCustom { - for i, d := range cfg.Domains { - if !domainOrWildcardRE.MatchString(d) { - b.Add(ErrInvalidDomain.Subjectf("domains[%d]", i)) - } - } - } - // check if provider is implemented - providerConstructor, ok := Providers[cfg.Provider] - if !ok { - if cfg.Provider != ProviderCustom { - b.Add(ErrUnknownProvider. - Subject(cfg.Provider). - With(gperr.DoYouMeanField(cfg.Provider, Providers))) - } - } else { - provider, err := providerConstructor(cfg.Options) - if err != nil { - b.Add(err) - } else { - cfg.challengeProvider = provider - } - } - } - - if cfg.challengeProvider == nil { - cfg.challengeProvider, _ = Providers[ProviderLocal](nil) - } - return b.Error() -} - -func (cfg *Config) dns01Options() []dns01.ChallengeOption { - return []dns01.ChallengeOption{ - dns01.CondOption(len(cfg.Resolvers) > 0, dns01.AddRecursiveNameservers(cfg.Resolvers)), - } -} - -func (cfg *Config) GetLegoConfig() (*User, *lego.Config, gperr.Error) { - if err := cfg.Validate(); err != nil { - return nil, nil, err - } - if cfg.CertPath == "" { cfg.CertPath = CertFileDefault } @@ -165,6 +91,83 @@ func (cfg *Config) GetLegoConfig() (*User, *lego.Config, gperr.Error) { cfg.ACMEKeyPath = ACMEKeyFileDefault } + b := gperr.NewBuilder("certificate error") + + // check if cert_path is unique + if first, ok := seenPaths[cfg.CertPath]; ok { + b.Add(ErrDuplicatedPath.Subjectf("cert_path %s", cfg.CertPath).Withf("first seen in %s", fmt.Sprintf("extra[%d]", first))) + } else { + seenPaths[cfg.CertPath] = cfg.idx + } + + // check if key_path is unique + if first, ok := seenPaths[cfg.KeyPath]; ok { + b.Add(ErrDuplicatedPath.Subjectf("key_path %s", cfg.KeyPath).Withf("first seen in %s", fmt.Sprintf("extra[%d]", first))) + } else { + seenPaths[cfg.KeyPath] = cfg.idx + } + + if cfg.Provider == ProviderCustom && cfg.CADirURL == "" { + b.Add(ErrMissingField.Subject("ca_dir_url")) + } + + if cfg.Provider != ProviderLocal && cfg.Provider != ProviderPseudo { + if len(cfg.Domains) == 0 { + b.Add(ErrMissingField.Subject("domains")) + } + if cfg.Email == "" { + b.Add(ErrMissingField.Subject("email")) + } + if cfg.Provider != ProviderCustom { + for i, d := range cfg.Domains { + if !domainOrWildcardRE.MatchString(d) { + b.Add(ErrInvalidDomain.Subjectf("domains[%d]", i)) + } + } + } + } + + // check if provider is implemented + providerConstructor, ok := Providers[cfg.Provider] + if !ok { + if cfg.Provider != ProviderCustom { + b.Add(ErrUnknownProvider. + Subject(cfg.Provider). + With(gperr.DoYouMeanField(cfg.Provider, Providers))) + } + } else { + provider, err := providerConstructor(cfg.Options) + if err != nil { + b.Add(err) + } else { + cfg.challengeProvider = provider + } + } + + if cfg.challengeProvider == nil { + cfg.challengeProvider, _ = Providers[ProviderLocal](nil) + } + + if len(cfg.Extra) > 0 { + for i := range cfg.Extra { + cfg.Extra[i] = MergeExtraConfig(cfg, &cfg.Extra[i]) + cfg.Extra[i].AsConfig().idx = i + 1 + err := cfg.Extra[i].AsConfig().validate(seenPaths) + if err != nil { + b.Add(err.Subjectf("extra[%d]", i)) + } + } + } + return b.Error() +} + +func (cfg *Config) dns01Options() []dns01.ChallengeOption { + return []dns01.ChallengeOption{ + dns01.CondOption(len(cfg.Resolvers) > 0, dns01.AddRecursiveNameservers(cfg.Resolvers)), + } +} + +func (cfg *Config) GetLegoConfig() (*User, *lego.Config, error) { var privKey *ecdsa.PrivateKey var err error @@ -208,6 +211,46 @@ func (cfg *Config) GetLegoConfig() (*User, *lego.Config, gperr.Error) { return user, legoCfg, nil } +func MergeExtraConfig(mainCfg *Config, extraCfg *ConfigExtra) ConfigExtra { + merged := ConfigExtra(*mainCfg) + merged.Extra = nil + merged.CertPath = extraCfg.CertPath + merged.KeyPath = extraCfg.KeyPath + // NOTE: Using same ACME key as main provider + + if extraCfg.Provider != "" { + merged.Provider = extraCfg.Provider + } + if extraCfg.Email != "" { + merged.Email = extraCfg.Email + } + if len(extraCfg.Domains) > 0 { + merged.Domains = extraCfg.Domains + } + if len(extraCfg.Options) > 0 { + merged.Options = extraCfg.Options + } + if len(extraCfg.Resolvers) > 0 { + merged.Resolvers = extraCfg.Resolvers + } + if extraCfg.CADirURL != "" { + merged.CADirURL = extraCfg.CADirURL + } + if len(extraCfg.CACerts) > 0 { + merged.CACerts = extraCfg.CACerts + } + if extraCfg.EABKid != "" { + merged.EABKid = extraCfg.EABKid + } + if extraCfg.EABHmac != "" { + merged.EABHmac = extraCfg.EABHmac + } + if extraCfg.HTTPClient != nil { + merged.HTTPClient = extraCfg.HTTPClient + } + return merged +} + func (cfg *Config) LoadACMEKey() (*ecdsa.PrivateKey, error) { if common.IsTest { return nil, os.ErrNotExist diff --git a/internal/autocert/config_test.go b/internal/autocert/config_test.go index 58782366..6bb53de1 100644 --- a/internal/autocert/config_test.go +++ b/internal/autocert/config_test.go @@ -1,27 +1,32 @@ -package autocert +package autocert_test import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/yusing/godoxy/internal/autocert" + "github.com/yusing/godoxy/internal/dnsproviders" "github.com/yusing/godoxy/internal/serialization" ) func TestEABConfigRequired(t *testing.T) { + dnsproviders.InitProviders() + tests := []struct { name string - cfg *Config + cfg *autocert.Config wantErr bool }{ - {name: "Missing EABKid", cfg: &Config{EABHmac: "1234567890"}, wantErr: true}, - {name: "Missing EABHmac", cfg: &Config{EABKid: "1234567890"}, wantErr: true}, - {name: "Valid EAB", cfg: &Config{EABKid: "1234567890", EABHmac: "1234567890"}, wantErr: false}, + {name: "Missing EABKid", cfg: &autocert.Config{EABHmac: "1234567890"}, wantErr: true}, + {name: "Missing EABHmac", cfg: &autocert.Config{EABKid: "1234567890"}, wantErr: true}, + {name: "Valid EAB", cfg: &autocert.Config{EABKid: "1234567890", EABHmac: "1234567890"}, wantErr: false}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { yaml := fmt.Appendf(nil, "eab_kid: %s\neab_hmac: %s", test.cfg.EABKid, test.cfg.EABHmac) - cfg := Config{} + cfg := autocert.Config{} err := serialization.UnmarshalValidateYAML(yaml, &cfg) if (err != nil) != test.wantErr { t.Errorf("Validate() error = %v, wantErr %v", err, test.wantErr) @@ -29,3 +34,27 @@ func TestEABConfigRequired(t *testing.T) { }) } } + +func TestExtraCertKeyPathsUnique(t *testing.T) { + t.Run("duplicate cert_path rejected", func(t *testing.T) { + cfg := &autocert.Config{ + Provider: autocert.ProviderLocal, + Extra: []autocert.ConfigExtra{ + {CertPath: "a.crt", KeyPath: "a.key"}, + {CertPath: "a.crt", KeyPath: "b.key"}, + }, + } + require.Error(t, cfg.Validate()) + }) + + t.Run("duplicate key_path rejected", func(t *testing.T) { + cfg := &autocert.Config{ + Provider: autocert.ProviderLocal, + Extra: []autocert.ConfigExtra{ + {CertPath: "a.crt", KeyPath: "a.key"}, + {CertPath: "b.crt", KeyPath: "a.key"}, + }, + } + require.Error(t, cfg.Validate()) + }) +} diff --git a/internal/autocert/paths.go b/internal/autocert/paths.go index c486f061..573d51e1 100644 --- a/internal/autocert/paths.go +++ b/internal/autocert/paths.go @@ -5,5 +5,4 @@ const ( CertFileDefault = certBasePath + "cert.crt" KeyFileDefault = certBasePath + "priv.key" ACMEKeyFileDefault = certBasePath + "acme.key" - LastFailureFile = certBasePath + ".last_failure" ) diff --git a/internal/autocert/provider.go b/internal/autocert/provider.go index 163d6bbc..bff4e55a 100644 --- a/internal/autocert/provider.go +++ b/internal/autocert/provider.go @@ -1,16 +1,19 @@ package autocert import ( + "context" "crypto/sha256" "crypto/tls" "crypto/x509" "errors" "fmt" + "io/fs" "maps" "os" "path/filepath" "slices" "strings" + "sync" "sync/atomic" "time" @@ -28,6 +31,8 @@ import ( type ( Provider struct { + logger zerolog.Logger + cfg *Config user *User legoCfg *lego.Config @@ -42,12 +47,18 @@ type ( extraProviders []*Provider sniMatcher sniMatcher + + forceRenewalCh chan struct{} + forceRenewalDoneCh atomic.Value // chan struct{} + + scheduleRenewalOnce sync.Once } CertExpiries map[string]time.Time + RenewMode uint8 ) -var ErrGetCertFailure = errors.New("get certificate failed") +var ErrNoCertificate = errors.New("no certificate found") const ( // renew failed for whatever reason, 1 hour cooldown @@ -56,21 +67,36 @@ const ( requestCooldownDuration = 15 * time.Second ) +const ( + renewModeForce = iota + renewModeIfNeeded +) + // could be nil var ActiveProvider atomic.Pointer[Provider] -func NewProvider(cfg *Config, user *User, legoCfg *lego.Config) *Provider { - return &Provider{ +func NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, error) { + p := &Provider{ cfg: cfg, user: user, legoCfg: legoCfg, lastFailureFile: lastFailureFileFor(cfg.CertPath, cfg.KeyPath), + forceRenewalCh: make(chan struct{}, 1), } + if cfg.idx == 0 { + p.logger = log.With().Str("provider", "main").Logger() + } else { + p.logger = log.With().Str("provider", fmt.Sprintf("extra[%d]", cfg.idx)).Logger() + } + if err := p.setupExtraProviders(); err != nil { + return nil, err + } + return p, nil } func (p *Provider) GetCert(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { if p.tlsCert == nil { - return nil, ErrGetCertFailure + return nil, ErrNoCertificate } if hello == nil || hello.ServerName == "" { return p.tlsCert, nil @@ -82,7 +108,14 @@ func (p *Provider) GetCert(hello *tls.ClientHelloInfo) (*tls.Certificate, error) } func (p *Provider) GetName() string { - return p.cfg.Provider + if p.cfg.idx == 0 { + return "main" + } + return fmt.Sprintf("extra[%d]", p.cfg.idx) +} + +func (p *Provider) fmtError(err error) error { + return gperr.PrependSubject(fmt.Sprintf("provider: %s", p.GetName()), err) } func (p *Provider) GetCertPath() string { @@ -129,45 +162,88 @@ func (p *Provider) ClearLastFailure() error { return nil } p.lastFailure = time.Time{} - return os.Remove(p.lastFailureFile) + err := os.Remove(p.lastFailureFile) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil } -func (p *Provider) ObtainCert() error { - if len(p.extraProviders) > 0 { - errs := gperr.NewGroup("autocert errors") - errs.Go(p.obtainCertSelf) - for _, ep := range p.extraProviders { - errs.Go(ep.obtainCertSelf) - } - if err := errs.Wait().Error(); err != nil { - return err - } - p.rebuildSNIMatcher() +// allProviders returns all providers including this provider and all extra providers. +func (p *Provider) allProviders() []*Provider { + return append([]*Provider{p}, p.extraProviders...) +} + +// ObtainCertIfNotExistsAll obtains a new certificate for this provider and all extra providers if they do not exist. +func (p *Provider) ObtainCertIfNotExistsAll() error { + errs := gperr.NewGroup("obtain cert error") + + for _, provider := range p.allProviders() { + errs.Go(func() error { + if err := provider.obtainCertIfNotExists(); err != nil { + return fmt.Errorf("failed to obtain cert for %s: %w", provider.GetName(), err) + } + return nil + }) + } + + p.rebuildSNIMatcher() + return errs.Wait().Error() +} + +// obtainCertIfNotExists obtains a new certificate for this provider if it does not exist. +func (p *Provider) obtainCertIfNotExists() error { + err := p.LoadCert() + if err == nil { return nil } - return p.obtainCertSelf() + + if !errors.Is(err, fs.ErrNotExist) { + return err + } + + // check last failure + lastFailure, err := p.GetLastFailure() + if err != nil { + return fmt.Errorf("failed to get last failure: %w", err) + } + if !lastFailure.IsZero() && time.Since(lastFailure) < requestCooldownDuration { + return fmt.Errorf("still in cooldown until %s", strutils.FormatTime(lastFailure.Add(requestCooldownDuration).Local())) + } + + p.logger.Info().Msg("cert not found, obtaining new cert") + return p.ObtainCert() } -func (p *Provider) obtainCertSelf() error { +// ObtainCertAll renews existing certificates or obtains new certificates for this provider and all extra providers. +func (p *Provider) ObtainCertAll() error { + errs := gperr.NewGroup("obtain cert error") + for _, provider := range p.allProviders() { + errs.Go(func() error { + if err := provider.obtainCertIfNotExists(); err != nil { + return fmt.Errorf("failed to obtain cert for %s: %w", provider.GetName(), err) + } + return nil + }) + } + return errs.Wait().Error() +} + +// ObtainCert renews existing certificate or obtains a new certificate for this provider. +func (p *Provider) ObtainCert() error { if p.cfg.Provider == ProviderLocal { return nil } if p.cfg.Provider == ProviderPseudo { - log.Info().Msg("init client for pseudo provider") + p.logger.Info().Msg("init client for pseudo provider") <-time.After(time.Second) - log.Info().Msg("registering acme for pseudo provider") + p.logger.Info().Msg("registering acme for pseudo provider") <-time.After(time.Second) - log.Info().Msg("obtained cert for pseudo provider") + p.logger.Info().Msg("obtained cert for pseudo provider") return nil } - if lastFailure, err := p.GetLastFailure(); err != nil { - return err - } else if time.Since(lastFailure) < requestCooldownDuration { - return fmt.Errorf("%w: still in cooldown until %s", ErrGetCertFailure, strutils.FormatTime(lastFailure.Add(requestCooldownDuration).Local())) - } - if p.client == nil { if err := p.initClient(); err != nil { return err @@ -227,6 +303,7 @@ func (p *Provider) obtainCertSelf() error { } p.tlsCert = &tlsCert p.certExpiries = expiries + p.rebuildSNIMatcher() if err := p.ClearLastFailure(); err != nil { return fmt.Errorf("failed to clear last failure: %w", err) @@ -235,19 +312,37 @@ func (p *Provider) obtainCertSelf() error { } func (p *Provider) LoadCert() error { + var errs gperr.Builder cert, err := tls.LoadX509KeyPair(p.cfg.CertPath, p.cfg.KeyPath) if err != nil { - return fmt.Errorf("load SSL certificate: %w", err) + errs.Addf("load SSL certificate: %w", p.fmtError(err)) } + expiries, err := getCertExpiries(&cert) if err != nil { - return fmt.Errorf("parse SSL certificate: %w", err) + errs.Addf("parse SSL certificate: %w", p.fmtError(err)) } + p.tlsCert = &cert p.certExpiries = expiries - log.Info().Msgf("next cert renewal in %s", strutils.FormatDuration(time.Until(p.ShouldRenewOn()))) - return p.renewIfNeeded() + for _, ep := range p.extraProviders { + if err := ep.LoadCert(); err != nil { + errs.Add(err) + } + } + + p.rebuildSNIMatcher() + return errs.Error() +} + +// PrintCertExpiriesAll prints the certificate expiries for this provider and all extra providers. +func (p *Provider) PrintCertExpiriesAll() { + for _, provider := range p.allProviders() { + for domain, expiry := range provider.certExpiries { + p.logger.Info().Str("domain", domain).Msgf("certificate expire on %s", strutils.FormatTime(expiry)) + } + } } // ShouldRenewOn returns the time at which the certificate should be renewed. @@ -255,65 +350,129 @@ func (p *Provider) ShouldRenewOn() time.Time { for _, expiry := range p.certExpiries { return expiry.AddDate(0, -1, 0) // 1 month before } - // this line should never be reached - panic("no certificate available") + // this line should never be reached in production, but will be useful for testing + return time.Now().AddDate(0, 1, 0) // 1 month after } -func (p *Provider) ScheduleRenewal(parent task.Parent) { +// ForceExpiryAll triggers immediate certificate renewal for this provider and all extra providers. +// Returns true if the renewal was triggered, false if the renewal was dropped. +// +// If at least one renewal is triggered, returns true. +func (p *Provider) ForceExpiryAll() (ok bool) { + doneCh := make(chan struct{}) + if swapped := p.forceRenewalDoneCh.CompareAndSwap(nil, doneCh); !swapped { // already in progress + close(doneCh) + return false + } + + select { + case p.forceRenewalCh <- struct{}{}: + ok = true + default: + } + + for _, ep := range p.extraProviders { + if ep.ForceExpiryAll() { + ok = true + } + } + + return ok +} + +// WaitRenewalDone waits for the renewal to complete. +// Returns false if the renewal was dropped. +func (p *Provider) WaitRenewalDone(ctx context.Context) bool { + done, ok := p.forceRenewalDoneCh.Load().(chan struct{}) + if !ok || done == nil { + return false + } + select { + case <-done: + case <-ctx.Done(): + return false + } + + for _, ep := range p.extraProviders { + if !ep.WaitRenewalDone(ctx) { + return false + } + } + return true +} + +// ScheduleRenewalAll schedules the renewal of the certificate for this provider and all extra providers. +func (p *Provider) ScheduleRenewalAll(parent task.Parent) { + p.scheduleRenewalOnce.Do(func() { + p.scheduleRenewal(parent) + }) + for _, ep := range p.extraProviders { + ep.scheduleRenewalOnce.Do(func() { + ep.scheduleRenewal(parent) + }) + } +} + +var emptyForceRenewalDoneCh any = chan struct{}(nil) + +// scheduleRenewal schedules the renewal of the certificate for this provider. +func (p *Provider) scheduleRenewal(parent task.Parent) { if p.GetName() == ProviderLocal || p.GetName() == ProviderPseudo { return } - go func() { - renewalTime := p.ShouldRenewOn() - timer := time.NewTimer(time.Until(renewalTime)) - defer timer.Stop() - task := parent.Subtask("cert-renew-scheduler:"+filepath.Base(p.cfg.CertPath), true) + timer := time.NewTimer(time.Until(p.ShouldRenewOn())) + task := parent.Subtask("cert-renew-scheduler:"+filepath.Base(p.cfg.CertPath), true) + + renew := func(renewMode RenewMode) { + defer func() { + if done, ok := p.forceRenewalDoneCh.Swap(emptyForceRenewalDoneCh).(chan struct{}); ok && done != nil { + close(done) + } + }() + + renewed, err := p.renew(renewMode) + if err != nil { + gperr.LogWarn("autocert: cert renew failed", p.fmtError(err)) + notif.Notify(¬if.LogMessage{ + Level: zerolog.ErrorLevel, + Title: fmt.Sprintf("SSL certificate renewal failed for %s", p.GetName()), + Body: notif.MessageBody(err.Error()), + }) + return + } + if renewed { + p.rebuildSNIMatcher() + + notif.Notify(¬if.LogMessage{ + Level: zerolog.InfoLevel, + Title: fmt.Sprintf("SSL certificate renewed for %s", p.GetName()), + Body: notif.ListBody(p.cfg.Domains), + }) + + // Reset on success + if err := p.ClearLastFailure(); err != nil { + gperr.LogWarn("autocert: failed to clear last failure", p.fmtError(err)) + } + timer.Reset(time.Until(p.ShouldRenewOn())) + } + } + + go func() { + defer timer.Stop() defer task.Finish(nil) for { select { case <-task.Context().Done(): return + case <-p.forceRenewalCh: + renew(renewModeForce) case <-timer.C: - // Retry after 1 hour on failure - lastFailure, err := p.GetLastFailure() - if err != nil { - gperr.LogWarn("autocert: failed to get last failure", err) - continue - } - if !lastFailure.IsZero() && time.Since(lastFailure) < renewalCooldownDuration { - continue - } - if err := p.renewIfNeeded(); err != nil { - gperr.LogWarn("autocert: cert renew failed", err) - if err := p.UpdateLastFailure(); err != nil { - gperr.LogWarn("autocert: failed to update last failure", err) - } - notif.Notify(¬if.LogMessage{ - Level: zerolog.ErrorLevel, - Title: "SSL certificate renewal failed", - Body: notif.MessageBody(err.Error()), - }) - continue - } - notif.Notify(¬if.LogMessage{ - Level: zerolog.InfoLevel, - Title: "SSL certificate renewed", - Body: notif.ListBody(p.cfg.Domains), - }) - // Reset on success - if err := p.ClearLastFailure(); err != nil { - gperr.LogWarn("autocert: failed to clear last failure", err) - } - renewalTime = p.ShouldRenewOn() - timer.Reset(time.Until(renewalTime)) + renew(renewModeIfNeeded) } } }() - for _, ep := range p.extraProviders { - ep.ScheduleRenewal(parent) - } } func (p *Provider) initClient() error { @@ -409,21 +568,42 @@ func (p *Provider) certState() CertState { return CertStateValid } -func (p *Provider) renewIfNeeded() error { +func (p *Provider) renew(mode RenewMode) (renewed bool, err error) { if p.cfg.Provider == ProviderLocal { - return nil + return false, nil } - switch p.certState() { - case CertStateExpired: - log.Info().Msg("certs expired, renewing") - case CertStateMismatch: - log.Info().Msg("cert domains mismatch with config, renewing") - default: - return nil + if mode != renewModeForce { + // Retry after 1 hour on failure + lastFailure, err := p.GetLastFailure() + if err != nil { + return false, fmt.Errorf("failed to get last failure: %w", err) + } + if !lastFailure.IsZero() && time.Since(lastFailure) < renewalCooldownDuration { + until := lastFailure.Add(renewalCooldownDuration).Local() + return false, fmt.Errorf("still in cooldown until %s", strutils.FormatTime(until)) + } } - return p.obtainCertSelf() + if mode == renewModeIfNeeded { + switch p.certState() { + case CertStateExpired: + log.Info().Msg("certs expired, renewing") + case CertStateMismatch: + log.Info().Msg("cert domains mismatch with config, renewing") + default: + return false, nil + } + } + + if mode == renewModeForce { + log.Info().Msg("force renewing cert by user request") + } + + if err := p.ObtainCert(); err != nil { + return false, err + } + return true, nil } func getCertExpiries(cert *tls.Certificate) (CertExpiries, error) { @@ -445,15 +625,16 @@ func getCertExpiries(cert *tls.Certificate) (CertExpiries, error) { } func lastFailureFileFor(certPath, keyPath string) string { - if certPath == "" && keyPath == "" { - return LastFailureFile - } dir := filepath.Dir(certPath) sum := sha256.Sum256([]byte(certPath + "|" + keyPath)) return filepath.Join(dir, fmt.Sprintf(".last_failure-%x", sum[:6])) } func (p *Provider) rebuildSNIMatcher() { + if p.cfg.idx != 0 { // only main provider has extra providers + return + } + p.sniMatcher = sniMatcher{} p.sniMatcher.addProvider(p) for _, ep := range p.extraProviders { diff --git a/internal/autocert/provider_test/custom_test.go b/internal/autocert/provider_test/custom_test.go index d62ad77e..30ae2544 100644 --- a/internal/autocert/provider_test/custom_test.go +++ b/internal/autocert/provider_test/custom_test.go @@ -10,12 +10,15 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" + "fmt" "io" "math/big" "net" "net/http" "net/http/httptest" + "sort" "strings" + "sync" "testing" "time" @@ -24,6 +27,368 @@ import ( "github.com/yusing/godoxy/internal/dnsproviders" ) +// TestACMEServer implements a minimal ACME server for testing with request tracking. +type TestACMEServer struct { + server *httptest.Server + caCert *x509.Certificate + caKey *rsa.PrivateKey + clientCSRs map[string]*x509.CertificateRequest + orderDomains map[string][]string + authzDomains map[string]string + orderSeq int + certRequestCount map[string]int + renewalRequestCount map[string]int + mu sync.Mutex +} + +func newTestACMEServer(t *testing.T) *TestACMEServer { + t.Helper() + + // Generate CA certificate and key + caKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + caTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Test CA"}, + Country: []string{"US"}, + Province: []string{""}, + Locality: []string{"Test"}, + StreetAddress: []string{""}, + PostalCode: []string{""}, + }, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + caCertDER, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &caKey.PublicKey, caKey) + require.NoError(t, err) + + caCert, err := x509.ParseCertificate(caCertDER) + require.NoError(t, err) + + acme := &TestACMEServer{ + caCert: caCert, + caKey: caKey, + clientCSRs: make(map[string]*x509.CertificateRequest), + orderDomains: make(map[string][]string), + authzDomains: make(map[string]string), + orderSeq: 0, + certRequestCount: make(map[string]int), + renewalRequestCount: make(map[string]int), + } + + mux := http.NewServeMux() + acme.setupRoutes(mux) + + acme.server = httptest.NewUnstartedServer(mux) + acme.server.TLS = &tls.Config{ + Certificates: []tls.Certificate{ + { + Certificate: [][]byte{caCert.Raw}, + PrivateKey: caKey, + }, + }, + MinVersion: tls.VersionTLS12, + } + acme.server.StartTLS() + return acme +} + +func (s *TestACMEServer) Close() { + s.server.Close() +} + +func (s *TestACMEServer) URL() string { + return s.server.URL +} + +func (s *TestACMEServer) httpClient() *http.Client { + certPool := x509.NewCertPool() + certPool.AddCert(s.caCert) + + return &http.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 30 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, + TLSClientConfig: &tls.Config{ + RootCAs: certPool, + MinVersion: tls.VersionTLS12, + }, + }, + } +} + +func (s *TestACMEServer) setupRoutes(mux *http.ServeMux) { + mux.HandleFunc("/acme/acme/directory", s.handleDirectory) + mux.HandleFunc("/acme/new-nonce", s.handleNewNonce) + mux.HandleFunc("/acme/new-account", s.handleNewAccount) + mux.HandleFunc("/acme/new-order", s.handleNewOrder) + mux.HandleFunc("/acme/authz/", s.handleAuthorization) + mux.HandleFunc("/acme/chall/", s.handleChallenge) + mux.HandleFunc("/acme/order/", s.handleOrder) + mux.HandleFunc("/acme/cert/", s.handleCertificate) +} + +func (s *TestACMEServer) handleDirectory(w http.ResponseWriter, r *http.Request) { + directory := map[string]any{ + "newNonce": s.server.URL + "/acme/new-nonce", + "newAccount": s.server.URL + "/acme/new-account", + "newOrder": s.server.URL + "/acme/new-order", + "keyChange": s.server.URL + "/acme/key-change", + "meta": map[string]any{ + "termsOfService": s.server.URL + "/terms", + }, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(directory) +} + +func (s *TestACMEServer) handleNewNonce(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "test-nonce-12345") + w.WriteHeader(http.StatusOK) +} + +func (s *TestACMEServer) handleNewAccount(w http.ResponseWriter, r *http.Request) { + account := map[string]any{ + "status": "valid", + "contact": []string{"mailto:test@example.com"}, + "orders": s.server.URL + "/acme/orders", + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Location", s.server.URL+"/acme/account/1") + w.Header().Set("Replay-Nonce", "test-nonce-67890") + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(account) +} + +func (s *TestACMEServer) handleNewOrder(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + var jws struct { + Payload string `json:"payload"` + } + json.Unmarshal(body, &jws) + payloadBytes, _ := base64.RawURLEncoding.DecodeString(jws.Payload) + var orderReq struct { + Identifiers []map[string]string `json:"identifiers"` + } + json.Unmarshal(payloadBytes, &orderReq) + + domains := []string{} + for _, id := range orderReq.Identifiers { + domains = append(domains, id["value"]) + } + sort.Strings(domains) + domainKey := strings.Join(domains, ",") + + s.mu.Lock() + s.orderSeq++ + orderID := fmt.Sprintf("test-order-%d", s.orderSeq) + authzID := fmt.Sprintf("test-authz-%d", s.orderSeq) + s.orderDomains[orderID] = domains + if len(domains) > 0 { + s.authzDomains[authzID] = domains[0] + } + s.certRequestCount[domainKey]++ + s.mu.Unlock() + + order := map[string]any{ + "status": "ready", + "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), + "identifiers": orderReq.Identifiers, + "authorizations": []string{s.server.URL + "/acme/authz/" + authzID}, + "finalize": s.server.URL + "/acme/order/" + orderID + "/finalize", + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Location", s.server.URL+"/acme/order/"+orderID) + w.Header().Set("Replay-Nonce", "test-nonce-order") + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(order) +} + +func (s *TestACMEServer) handleAuthorization(w http.ResponseWriter, r *http.Request) { + authzID := strings.TrimPrefix(r.URL.Path, "/acme/authz/") + domain := s.authzDomains[authzID] + if domain == "" { + domain = "test.example.com" + } + authz := map[string]any{ + "status": "valid", + "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), + "identifier": map[string]string{"type": "dns", "value": domain}, + "challenges": []map[string]any{ + { + "type": "dns-01", + "status": "valid", + "url": s.server.URL + "/acme/chall/test-chall-789", + "token": "test-token-abc123", + }, + }, + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Replay-Nonce", "test-nonce-authz") + json.NewEncoder(w).Encode(authz) +} + +func (s *TestACMEServer) handleChallenge(w http.ResponseWriter, r *http.Request) { + challenge := map[string]any{ + "type": "dns-01", + "status": "valid", + "url": r.URL.String(), + "token": "test-token-abc123", + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Replay-Nonce", "test-nonce-chall") + json.NewEncoder(w).Encode(challenge) +} + +func (s *TestACMEServer) handleOrder(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/finalize") { + s.handleFinalize(w, r) + return + } + + orderID := strings.TrimPrefix(r.URL.Path, "/acme/order/") + domains := s.orderDomains[orderID] + if len(domains) == 0 { + domains = []string{"test.example.com"} + } + certURL := s.server.URL + "/acme/cert/" + orderID + order := map[string]any{ + "status": "valid", + "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), + "identifiers": func() []map[string]string { + out := make([]map[string]string, 0, len(domains)) + for _, d := range domains { + out = append(out, map[string]string{"type": "dns", "value": d}) + } + return out + }(), + "certificate": certURL, + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Replay-Nonce", "test-nonce-order-get") + json.NewEncoder(w).Encode(order) +} + +func (s *TestACMEServer) handleFinalize(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read request", http.StatusBadRequest) + return + } + + csr, err := s.extractCSRFromJWS(body) + if err != nil { + http.Error(w, "Invalid CSR: "+err.Error(), http.StatusBadRequest) + return + } + + orderID := strings.TrimSuffix(strings.TrimPrefix(r.URL.Path, "/acme/order/"), "/finalize") + s.mu.Lock() + s.clientCSRs[orderID] = csr + + // Detect renewal: if we already have a certificate for these domains, it's a renewal + domains := csr.DNSNames + sort.Strings(domains) + domainKey := strings.Join(domains, ",") + + if s.certRequestCount[domainKey] > 1 { + s.renewalRequestCount[domainKey]++ + } + s.mu.Unlock() + + certURL := s.server.URL + "/acme/cert/" + orderID + order := map[string]any{ + "status": "valid", + "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), + "identifiers": func() []map[string]string { + out := make([]map[string]string, 0, len(domains)) + for _, d := range domains { + out = append(out, map[string]string{"type": "dns", "value": d}) + } + return out + }(), + "certificate": certURL, + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Location", strings.TrimSuffix(r.URL.String(), "/finalize")) + w.Header().Set("Replay-Nonce", "test-nonce-finalize") + json.NewEncoder(w).Encode(order) +} + +func (s *TestACMEServer) extractCSRFromJWS(jwsData []byte) (*x509.CertificateRequest, error) { + var jws struct { + Payload string `json:"payload"` + } + if err := json.Unmarshal(jwsData, &jws); err != nil { + return nil, err + } + payloadBytes, err := base64.RawURLEncoding.DecodeString(jws.Payload) + if err != nil { + return nil, err + } + var finalizeReq struct { + CSR string `json:"csr"` + } + if err := json.Unmarshal(payloadBytes, &finalizeReq); err != nil { + return nil, err + } + csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeReq.CSR) + if err != nil { + return nil, err + } + return x509.ParseCertificateRequest(csrBytes) +} + +func (s *TestACMEServer) handleCertificate(w http.ResponseWriter, r *http.Request) { + orderID := strings.TrimPrefix(r.URL.Path, "/acme/cert/") + csr, exists := s.clientCSRs[orderID] + if !exists { + http.Error(w, "No CSR found for order", http.StatusBadRequest) + return + } + + template := &x509.Certificate{ + SerialNumber: big.NewInt(2), + Subject: pkix.Name{ + Organization: []string{"Test Cert"}, + Country: []string{"US"}, + }, + DNSNames: csr.DNSNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(90 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, s.caCert, csr.PublicKey, s.caKey) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + caPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: s.caCert.Raw}) + + w.Header().Set("Content-Type", "application/pem-certificate-chain") + w.Header().Set("Replay-Nonce", "test-nonce-cert") + w.Write(append(certPEM, caPEM...)) +} + func TestMain(m *testing.M) { dnsproviders.InitProviders() m.Run() @@ -41,7 +406,7 @@ func TestCustomProvider(t *testing.T) { ACMEKeyPath: "certs/custom-acme.key", } - err := cfg.Validate() + err := error(cfg.Validate()) require.NoError(t, err) user, legoCfg, err := cfg.GetLegoConfig() @@ -62,7 +427,8 @@ func TestCustomProvider(t *testing.T) { err := cfg.Validate() require.Error(t, err) - require.Contains(t, err.Error(), "missing field 'ca_dir_url'") + require.Contains(t, err.Error(), "missing field") + require.Contains(t, err.Error(), "ca_dir_url") }) t.Run("custom provider with step-ca internal CA", func(t *testing.T) { @@ -76,7 +442,7 @@ func TestCustomProvider(t *testing.T) { ACMEKeyPath: "certs/internal-acme.key", } - err := cfg.Validate() + err := error(cfg.Validate()) require.NoError(t, err) user, legoCfg, err := cfg.GetLegoConfig() @@ -86,9 +452,10 @@ func TestCustomProvider(t *testing.T) { require.Equal(t, "https://step-ca.internal:443/acme/acme/directory", legoCfg.CADirURL) require.Equal(t, "admin@internal.com", user.Email) - provider := autocert.NewProvider(cfg, user, legoCfg) + provider, err := autocert.NewProvider(cfg, user, legoCfg) + require.NoError(t, err) require.NotNil(t, provider) - require.Equal(t, autocert.ProviderCustom, provider.GetName()) + require.Equal(t, "main", provider.GetName()) require.Equal(t, "certs/internal.crt", provider.GetCertPath()) require.Equal(t, "certs/internal.key", provider.GetKeyPath()) }) @@ -119,7 +486,8 @@ func TestObtainCertFromCustomProvider(t *testing.T) { require.NotNil(t, user) require.NotNil(t, legoCfg) - provider := autocert.NewProvider(cfg, user, legoCfg) + provider, err := autocert.NewProvider(cfg, user, legoCfg) + require.NoError(t, err) require.NotNil(t, provider) // Test obtaining certificate @@ -161,7 +529,8 @@ func TestObtainCertFromCustomProvider(t *testing.T) { require.NotNil(t, user) require.NotNil(t, legoCfg) - provider := autocert.NewProvider(cfg, user, legoCfg) + provider, err := autocert.NewProvider(cfg, user, legoCfg) + require.NoError(t, err) require.NotNil(t, provider) err = provider.ObtainCert() @@ -178,330 +547,3 @@ func TestObtainCertFromCustomProvider(t *testing.T) { require.True(t, time.Now().After(x509Cert.NotBefore)) }) } - -// testACMEServer implements a minimal ACME server for testing. -type testACMEServer struct { - server *httptest.Server - caCert *x509.Certificate - caKey *rsa.PrivateKey - clientCSRs map[string]*x509.CertificateRequest - orderID string -} - -func newTestACMEServer(t *testing.T) *testACMEServer { - t.Helper() - - // Generate CA certificate and key - caKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - caTemplate := &x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"Test CA"}, - Country: []string{"US"}, - Province: []string{""}, - Locality: []string{"Test"}, - StreetAddress: []string{""}, - PostalCode: []string{""}, - }, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(365 * 24 * time.Hour), - IsCA: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - } - - caCertDER, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &caKey.PublicKey, caKey) - require.NoError(t, err) - - caCert, err := x509.ParseCertificate(caCertDER) - require.NoError(t, err) - - acme := &testACMEServer{ - caCert: caCert, - caKey: caKey, - clientCSRs: make(map[string]*x509.CertificateRequest), - orderID: "test-order-123", - } - - mux := http.NewServeMux() - acme.setupRoutes(mux) - - acme.server = httptest.NewUnstartedServer(mux) - acme.server.TLS = &tls.Config{ - Certificates: []tls.Certificate{ - { - Certificate: [][]byte{caCert.Raw}, - PrivateKey: caKey, - }, - }, - MinVersion: tls.VersionTLS12, - } - acme.server.StartTLS() - return acme -} - -func (s *testACMEServer) Close() { - s.server.Close() -} - -func (s *testACMEServer) URL() string { - return s.server.URL -} - -func (s *testACMEServer) httpClient() *http.Client { - certPool := x509.NewCertPool() - certPool.AddCert(s.caCert) - - return &http.Client{ - Transport: &http.Transport{ - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 30 * time.Second, - ResponseHeaderTimeout: 30 * time.Second, - TLSClientConfig: &tls.Config{ - RootCAs: certPool, - MinVersion: tls.VersionTLS12, - }, - }, - } -} - -func (s *testACMEServer) setupRoutes(mux *http.ServeMux) { - // ACME directory endpoint - mux.HandleFunc("/acme/acme/directory", s.handleDirectory) - - // ACME endpoints - mux.HandleFunc("/acme/new-nonce", s.handleNewNonce) - mux.HandleFunc("/acme/new-account", s.handleNewAccount) - mux.HandleFunc("/acme/new-order", s.handleNewOrder) - mux.HandleFunc("/acme/authz/", s.handleAuthorization) - mux.HandleFunc("/acme/chall/", s.handleChallenge) - mux.HandleFunc("/acme/order/", s.handleOrder) - mux.HandleFunc("/acme/cert/", s.handleCertificate) -} - -func (s *testACMEServer) handleDirectory(w http.ResponseWriter, r *http.Request) { - directory := map[string]interface{}{ - "newNonce": s.server.URL + "/acme/new-nonce", - "newAccount": s.server.URL + "/acme/new-account", - "newOrder": s.server.URL + "/acme/new-order", - "keyChange": s.server.URL + "/acme/key-change", - "meta": map[string]interface{}{ - "termsOfService": s.server.URL + "/terms", - }, - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(directory) -} - -func (s *testACMEServer) handleNewNonce(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Replay-Nonce", "test-nonce-12345") - w.WriteHeader(http.StatusOK) -} - -func (s *testACMEServer) handleNewAccount(w http.ResponseWriter, r *http.Request) { - account := map[string]interface{}{ - "status": "valid", - "contact": []string{"mailto:test@example.com"}, - "orders": s.server.URL + "/acme/orders", - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Location", s.server.URL+"/acme/account/1") - w.Header().Set("Replay-Nonce", "test-nonce-67890") - w.WriteHeader(http.StatusCreated) - json.NewEncoder(w).Encode(account) -} - -func (s *testACMEServer) handleNewOrder(w http.ResponseWriter, r *http.Request) { - authzID := "test-authz-456" - - order := map[string]interface{}{ - "status": "ready", // Skip pending state for simplicity - "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), - "identifiers": []map[string]string{{"type": "dns", "value": "test.example.com"}}, - "authorizations": []string{s.server.URL + "/acme/authz/" + authzID}, - "finalize": s.server.URL + "/acme/order/" + s.orderID + "/finalize", - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Location", s.server.URL+"/acme/order/"+s.orderID) - w.Header().Set("Replay-Nonce", "test-nonce-order") - w.WriteHeader(http.StatusCreated) - json.NewEncoder(w).Encode(order) -} - -func (s *testACMEServer) handleAuthorization(w http.ResponseWriter, r *http.Request) { - authz := map[string]interface{}{ - "status": "valid", // Skip challenge validation for simplicity - "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), - "identifier": map[string]string{"type": "dns", "value": "test.example.com"}, - "challenges": []map[string]interface{}{ - { - "type": "dns-01", - "status": "valid", - "url": s.server.URL + "/acme/chall/test-chall-789", - "token": "test-token-abc123", - }, - }, - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Replay-Nonce", "test-nonce-authz") - json.NewEncoder(w).Encode(authz) -} - -func (s *testACMEServer) handleChallenge(w http.ResponseWriter, r *http.Request) { - challenge := map[string]interface{}{ - "type": "dns-01", - "status": "valid", - "url": r.URL.String(), - "token": "test-token-abc123", - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Replay-Nonce", "test-nonce-chall") - json.NewEncoder(w).Encode(challenge) -} - -func (s *testACMEServer) handleOrder(w http.ResponseWriter, r *http.Request) { - if strings.HasSuffix(r.URL.Path, "/finalize") { - s.handleFinalize(w, r) - return - } - - certURL := s.server.URL + "/acme/cert/" + s.orderID - order := map[string]interface{}{ - "status": "valid", - "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), - "identifiers": []map[string]string{{"type": "dns", "value": "test.example.com"}}, - "certificate": certURL, - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Replay-Nonce", "test-nonce-order-get") - json.NewEncoder(w).Encode(order) -} - -func (s *testACMEServer) handleFinalize(w http.ResponseWriter, r *http.Request) { - // Read the JWS payload - body, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, "Failed to read request", http.StatusBadRequest) - return - } - - // Extract CSR from JWS payload - csr, err := s.extractCSRFromJWS(body) - if err != nil { - http.Error(w, "Invalid CSR: "+err.Error(), http.StatusBadRequest) - return - } - - // Store the CSR for certificate generation - s.clientCSRs[s.orderID] = csr - - certURL := s.server.URL + "/acme/cert/" + s.orderID - order := map[string]interface{}{ - "status": "valid", - "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), - "identifiers": []map[string]string{{"type": "dns", "value": "test.example.com"}}, - "certificate": certURL, - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Location", strings.TrimSuffix(r.URL.String(), "/finalize")) - w.Header().Set("Replay-Nonce", "test-nonce-finalize") - json.NewEncoder(w).Encode(order) -} - -func (s *testACMEServer) extractCSRFromJWS(jwsData []byte) (*x509.CertificateRequest, error) { - // Parse the JWS structure - var jws struct { - Protected string `json:"protected"` - Payload string `json:"payload"` - Signature string `json:"signature"` - } - - if err := json.Unmarshal(jwsData, &jws); err != nil { - return nil, err - } - - // Decode the payload - payloadBytes, err := base64.RawURLEncoding.DecodeString(jws.Payload) - if err != nil { - return nil, err - } - - // Parse the finalize request - var finalizeReq struct { - CSR string `json:"csr"` - } - - if err := json.Unmarshal(payloadBytes, &finalizeReq); err != nil { - return nil, err - } - - // Decode the CSR - csrBytes, err := base64.RawURLEncoding.DecodeString(finalizeReq.CSR) - if err != nil { - return nil, err - } - - // Parse the CSR - csr, err := x509.ParseCertificateRequest(csrBytes) - if err != nil { - return nil, err - } - - return csr, nil -} - -func (s *testACMEServer) handleCertificate(w http.ResponseWriter, r *http.Request) { - // Extract order ID from URL - orderID := strings.TrimPrefix(r.URL.Path, "/acme/cert/") - - // Get the CSR for this order - csr, exists := s.clientCSRs[orderID] - if !exists { - http.Error(w, "No CSR found for order", http.StatusBadRequest) - return - } - - // Create certificate using the public key from the client's CSR - template := &x509.Certificate{ - SerialNumber: big.NewInt(2), - Subject: pkix.Name{ - Organization: []string{"Test Cert"}, - Country: []string{"US"}, - }, - DNSNames: csr.DNSNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(90 * 24 * time.Hour), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - // Use the public key from the CSR and sign with CA key - certDER, err := x509.CreateCertificate(rand.Reader, template, s.caCert, csr.PublicKey, s.caKey) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Return certificate chain - certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) - caPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: s.caCert.Raw}) - - w.Header().Set("Content-Type", "application/pem-certificate-chain") - w.Header().Set("Replay-Nonce", "test-nonce-cert") - w.Write(append(certPEM, caPEM...)) -} diff --git a/internal/autocert/provider_test/extra_validation_test.go b/internal/autocert/provider_test/extra_validation_test.go deleted file mode 100644 index 3fbb5174..00000000 --- a/internal/autocert/provider_test/extra_validation_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package provider_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/yusing/godoxy/internal/autocert" -) - -func TestExtraCertKeyPathsUnique(t *testing.T) { - t.Run("duplicate cert_path rejected", func(t *testing.T) { - cfg := &autocert.Config{ - Provider: autocert.ProviderLocal, - Extra: []autocert.Config{ - {CertPath: "a.crt", KeyPath: "a.key"}, - {CertPath: "a.crt", KeyPath: "b.key"}, - }, - } - require.Error(t, cfg.Validate()) - }) - - t.Run("duplicate key_path rejected", func(t *testing.T) { - cfg := &autocert.Config{ - Provider: autocert.ProviderLocal, - Extra: []autocert.Config{ - {CertPath: "a.crt", KeyPath: "a.key"}, - {CertPath: "b.crt", KeyPath: "a.key"}, - }, - } - require.Error(t, cfg.Validate()) - }) -} diff --git a/internal/autocert/provider_test/multi_cert_test.go b/internal/autocert/provider_test/multi_cert_test.go new file mode 100644 index 00000000..d77afe1f --- /dev/null +++ b/internal/autocert/provider_test/multi_cert_test.go @@ -0,0 +1,90 @@ +//nolint:errchkjson,errcheck +package provider_test + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + "github.com/yusing/godoxy/internal/autocert" + "github.com/yusing/godoxy/internal/serialization" + "github.com/yusing/goutils/task" +) + +func buildMultiCertYAML(serverURL string) []byte { + return fmt.Appendf(nil, ` +email: main@example.com +domains: [main.example.com] +provider: custom +ca_dir_url: %s/acme/acme/directory +cert_path: certs/main.crt +key_path: certs/main.key +extra: + - email: extra1@example.com + domains: [extra1.example.com] + cert_path: certs/extra1.crt + key_path: certs/extra1.key + - email: extra2@example.com + domains: [extra2.example.com] + cert_path: certs/extra2.crt + key_path: certs/extra2.key +`, serverURL) +} + +func TestMultipleCertificatesLifecycle(t *testing.T) { + acmeServer := newTestACMEServer(t) + defer acmeServer.Close() + + yamlConfig := buildMultiCertYAML(acmeServer.URL()) + var cfg autocert.Config + cfg.HTTPClient = acmeServer.httpClient() + + /* unmarshal yaml config with multiple certs */ + err := error(serialization.UnmarshalValidateYAML(yamlConfig, &cfg)) + require.NoError(t, err) + require.Equal(t, []string{"main.example.com"}, cfg.Domains) + require.Len(t, cfg.Extra, 2) + require.Equal(t, []string{"extra1.example.com"}, cfg.Extra[0].Domains) + require.Equal(t, []string{"extra2.example.com"}, cfg.Extra[1].Domains) + + var provider *autocert.Provider + + /* initialize autocert with multi-cert config */ + user, legoCfg, gerr := cfg.GetLegoConfig() + require.NoError(t, gerr) + provider, err = autocert.NewProvider(&cfg, user, legoCfg) + require.NoError(t, err) + require.NotNil(t, provider) + + // Start renewal scheduler + root := task.RootTask("test", false) + defer root.Finish(nil) + provider.ScheduleRenewalAll(root) + + require.Equal(t, "custom", cfg.Provider) + require.Equal(t, "custom", cfg.Extra[0].Provider) + require.Equal(t, "custom", cfg.Extra[1].Provider) + + /* track cert requests for all configs */ + os.MkdirAll("certs", 0755) + defer os.RemoveAll("certs") + + err = provider.ObtainCertIfNotExistsAll() + require.NoError(t, err) + + require.Equal(t, 1, acmeServer.certRequestCount["main.example.com"]) + require.Equal(t, 1, acmeServer.certRequestCount["extra1.example.com"]) + require.Equal(t, 1, acmeServer.certRequestCount["extra2.example.com"]) + + /* track renewal scheduling and requests */ + + // force renewal for all providers and wait for completion + ok := provider.ForceExpiryAll() + require.True(t, ok) + provider.WaitRenewalDone(t.Context()) + + require.Equal(t, 1, acmeServer.renewalRequestCount["main.example.com"]) + require.Equal(t, 1, acmeServer.renewalRequestCount["extra1.example.com"]) + require.Equal(t, 1, acmeServer.renewalRequestCount["extra2.example.com"]) +} diff --git a/internal/autocert/provider_test/sni_test.go b/internal/autocert/provider_test/sni_test.go index 766593cd..01a07b8f 100644 --- a/internal/autocert/provider_test/sni_test.go +++ b/internal/autocert/provider_test/sni_test.go @@ -71,15 +71,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "a.internal.example.com"}) require.NoError(t, err) @@ -100,15 +103,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "foo.example.com"}) require.NoError(t, err) @@ -129,15 +135,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "unknown.domain.com"}) require.NoError(t, err) @@ -159,8 +168,11 @@ func TestGetCertBySNI(t *testing.T) { require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(nil) require.NoError(t, err) @@ -182,8 +194,11 @@ func TestGetCertBySNI(t *testing.T) { require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: ""}) require.NoError(t, err) @@ -204,15 +219,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "FOO.EXAMPLE.COM"}) require.NoError(t, err) @@ -233,15 +251,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: " foo.example.com. "}) require.NoError(t, err) @@ -262,15 +283,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert1, KeyPath: extraKey1}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "foo.a.example.com"}) require.NoError(t, err) @@ -292,8 +316,11 @@ func TestGetCertBySNI(t *testing.T) { require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "bar.example.com"}) require.NoError(t, err) @@ -317,7 +344,7 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert1, KeyPath: extraKey1}, {CertPath: extraCert2, KeyPath: extraKey2}, }, @@ -325,8 +352,11 @@ func TestGetCertBySNI(t *testing.T) { require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert1, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "foo.test.com"}) require.NoError(t, err) @@ -352,15 +382,18 @@ func TestGetCertBySNI(t *testing.T) { Provider: autocert.ProviderLocal, CertPath: mainCert, KeyPath: mainKey, - Extra: []autocert.Config{ + Extra: []autocert.ConfigExtra{ {CertPath: extraCert, KeyPath: extraKey}, }, } require.NoError(t, cfg.Validate()) - p := autocert.NewProvider(cfg, nil, nil) - require.NoError(t, p.Setup()) + p, err := autocert.NewProvider(cfg, nil, nil) + require.NoError(t, err) + + err = p.LoadCert() + require.NoError(t, err) cert1, err := p.GetCert(&tls.ClientHelloInfo{ServerName: "foo.example.com"}) require.NoError(t, err) diff --git a/internal/autocert/setup.go b/internal/autocert/setup.go index e114be50..119a8759 100644 --- a/internal/autocert/setup.go +++ b/internal/autocert/setup.go @@ -1,101 +1,30 @@ package autocert import ( - "errors" - "fmt" - "os" - - "github.com/rs/zerolog/log" gperr "github.com/yusing/goutils/errs" - strutils "github.com/yusing/goutils/strings" ) -func (p *Provider) Setup() (err error) { - if err = p.LoadCert(); err != nil { - if !errors.Is(err, os.ErrNotExist) { // ignore if cert doesn't exist - return err - } - log.Debug().Msg("obtaining cert due to error loading cert") - if err = p.ObtainCert(); err != nil { - return err - } - } - - if err = p.setupExtraProviders(); err != nil { - return err - } - - for _, expiry := range p.GetExpiries() { - log.Info().Msg("certificate expire on " + strutils.FormatTime(expiry)) - break - } - - return nil -} - -func (p *Provider) setupExtraProviders() error { - p.extraProviders = nil +func (p *Provider) setupExtraProviders() gperr.Error { p.sniMatcher = sniMatcher{} if len(p.cfg.Extra) == 0 { - p.rebuildSNIMatcher() return nil } - for i := range p.cfg.Extra { - merged := mergeExtraConfig(p.cfg, &p.cfg.Extra[i]) - user, legoCfg, err := merged.GetLegoConfig() + p.extraProviders = make([]*Provider, 0, len(p.cfg.Extra)) + + errs := gperr.NewBuilder("setup extra providers error") + for _, extra := range p.cfg.Extra { + user, legoCfg, err := extra.AsConfig().GetLegoConfig() if err != nil { - return err.Subjectf("extra[%d]", i) + errs.Add(p.fmtError(err)) + continue } - ep := NewProvider(&merged, user, legoCfg) - if err := ep.Setup(); err != nil { - return gperr.PrependSubject(fmt.Sprintf("extra[%d]", i), err) + ep, err := NewProvider(extra.AsConfig(), user, legoCfg) + if err != nil { + errs.Add(p.fmtError(err)) + continue } p.extraProviders = append(p.extraProviders, ep) } - p.rebuildSNIMatcher() - return nil -} - -func mergeExtraConfig(mainCfg *Config, extraCfg *Config) Config { - merged := *mainCfg - merged.Extra = nil - merged.CertPath = extraCfg.CertPath - merged.KeyPath = extraCfg.KeyPath - - if merged.Email == "" { - merged.Email = mainCfg.Email - } - - if len(extraCfg.Domains) > 0 { - merged.Domains = extraCfg.Domains - } - if extraCfg.ACMEKeyPath != "" { - merged.ACMEKeyPath = extraCfg.ACMEKeyPath - } - if extraCfg.Provider != "" { - merged.Provider = extraCfg.Provider - } - if len(extraCfg.Options) > 0 { - merged.Options = extraCfg.Options - } - if len(extraCfg.Resolvers) > 0 { - merged.Resolvers = extraCfg.Resolvers - } - if extraCfg.CADirURL != "" { - merged.CADirURL = extraCfg.CADirURL - } - if len(extraCfg.CACerts) > 0 { - merged.CACerts = extraCfg.CACerts - } - if extraCfg.EABKid != "" { - merged.EABKid = extraCfg.EABKid - } - if extraCfg.EABHmac != "" { - merged.EABHmac = extraCfg.EABHmac - } - if extraCfg.HTTPClient != nil { - merged.HTTPClient = extraCfg.HTTPClient - } - return merged + return errs.Error() } diff --git a/internal/autocert/setup_test.go b/internal/autocert/setup_test.go new file mode 100644 index 00000000..335c2e57 --- /dev/null +++ b/internal/autocert/setup_test.go @@ -0,0 +1,82 @@ +package autocert_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/yusing/godoxy/internal/autocert" + "github.com/yusing/godoxy/internal/dnsproviders" + "github.com/yusing/godoxy/internal/serialization" + strutils "github.com/yusing/goutils/strings" +) + +func TestSetupExtraProviders(t *testing.T) { + dnsproviders.InitProviders() + + cfgYAML := ` +email: test@example.com +domains: [example.com] +provider: custom +ca_dir_url: https://ca.example.com:9000/acme/acme/directory +cert_path: certs/test.crt +key_path: certs/test.key +options: {key: value} +resolvers: [8.8.8.8] +ca_certs: [ca.crt] +eab_kid: eabKid +eab_hmac: eabHmac +extra: + - cert_path: certs/extra.crt + key_path: certs/extra.key + - cert_path: certs/extra2.crt + key_path: certs/extra2.key + email: override@example.com + provider: pseudo + domains: [override.com] + ca_dir_url: https://ca2.example.com/directory + options: {opt2: val2} + resolvers: [1.1.1.1] + ca_certs: [ca2.crt] + eab_kid: eabKid2 + eab_hmac: eabHmac2 +` + + var cfg autocert.Config + err := error(serialization.UnmarshalValidateYAML([]byte(cfgYAML), &cfg)) + require.NoError(t, err) + + // Test: extra[0] inherits all fields from main except CertPath and KeyPath. + merged0 := cfg.Extra[0] + require.Equal(t, "certs/extra.crt", merged0.CertPath) + require.Equal(t, "certs/extra.key", merged0.KeyPath) + // Inherited fields from main config: + require.Equal(t, "test@example.com", merged0.Email) // inherited + require.Equal(t, "custom", merged0.Provider) // inherited + require.Equal(t, []string{"example.com"}, merged0.Domains) // inherited + require.Equal(t, "https://ca.example.com:9000/acme/acme/directory", merged0.CADirURL) // inherited + require.Equal(t, map[string]strutils.Redacted{"key": "value"}, merged0.Options) // inherited + require.Equal(t, []string{"8.8.8.8"}, merged0.Resolvers) // inherited + require.Equal(t, []string{"ca.crt"}, merged0.CACerts) // inherited + require.Equal(t, "eabKid", merged0.EABKid) // inherited + require.Equal(t, "eabHmac", merged0.EABHmac) // inherited + require.Equal(t, cfg.HTTPClient, merged0.HTTPClient) // inherited + require.Nil(t, merged0.Extra) + + // Test: extra[1] overrides some fields, and inherits others. + merged1 := cfg.Extra[1] + require.Equal(t, "certs/extra2.crt", merged1.CertPath) + require.Equal(t, "certs/extra2.key", merged1.KeyPath) + // Overridden fields: + require.Equal(t, "override@example.com", merged1.Email) // overridden + require.Equal(t, "pseudo", merged1.Provider) // overridden + require.Equal(t, []string{"override.com"}, merged1.Domains) // overridden + require.Equal(t, "https://ca2.example.com/directory", merged1.CADirURL) // overridden + require.Equal(t, map[string]strutils.Redacted{"opt2": "val2"}, merged1.Options) // overridden + require.Equal(t, []string{"1.1.1.1"}, merged1.Resolvers) // overridden + require.Equal(t, []string{"ca2.crt"}, merged1.CACerts) // overridden + require.Equal(t, "eabKid2", merged1.EABKid) // overridden + require.Equal(t, "eabHmac2", merged1.EABHmac) // overridden + // Inherited field: + require.Equal(t, cfg.HTTPClient, merged1.HTTPClient) // inherited + require.Nil(t, merged1.Extra) +} diff --git a/internal/autocert/types/provider.go b/internal/autocert/types/provider.go index 69fdf918..64b95224 100644 --- a/internal/autocert/types/provider.go +++ b/internal/autocert/types/provider.go @@ -9,6 +9,6 @@ import ( type Provider interface { Setup() error GetCert(*tls.ClientHelloInfo) (*tls.Certificate, error) - ScheduleRenewal(task.Parent) - ObtainCert() error + ScheduleRenewalAll(task.Parent) + ObtainCertAll() error } diff --git a/internal/config/state.go b/internal/config/state.go index bf3e027b..0586dca0 100644 --- a/internal/config/state.go +++ b/internal/config/state.go @@ -272,6 +272,7 @@ func (state *state) initAutoCert() error { autocertCfg := state.AutoCert if autocertCfg == nil { autocertCfg = new(autocert.Config) + _ = autocertCfg.Validate() } user, legoCfg, err := autocertCfg.GetLegoConfig() @@ -279,12 +280,19 @@ func (state *state) initAutoCert() error { return err } - state.autocertProvider = autocert.NewProvider(autocertCfg, user, legoCfg) - if err := state.autocertProvider.Setup(); err != nil { - return fmt.Errorf("autocert error: %w", err) - } else { - state.autocertProvider.ScheduleRenewal(state.task) + p, err := autocert.NewProvider(autocertCfg, user, legoCfg) + if err != nil { + return err } + + if err := p.ObtainCertIfNotExistsAll(); err != nil { + return err + } + + p.ScheduleRenewalAll(state.task) + p.PrintCertExpiriesAll() + + state.autocertProvider = p return nil } diff --git a/internal/dnsproviders/dummy.go b/internal/dnsproviders/dummy.go index 42ddb47a..999bf3c7 100644 --- a/internal/dnsproviders/dummy.go +++ b/internal/dnsproviders/dummy.go @@ -1,7 +1,7 @@ package dnsproviders type ( - DummyConfig struct{} + DummyConfig map[string]any DummyProvider struct{} ) From f1d906ac1174b69eade25e7bdf67f35f93030da3 Mon Sep 17 00:00:00 2001 From: yusing Date: Sun, 4 Jan 2026 20:31:11 +0800 Subject: [PATCH 02/51] fix(test): update test expectations --- internal/auth/oidc_test.go | 2 ++ internal/net/gphttp/middleware/bypass_test.go | 2 +- internal/route/route.go | 9 +++++++++ internal/route/rules/do_log_test.go | 4 ++-- internal/types/docker_provider_config_test.go | 3 +-- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/internal/auth/oidc_test.go b/internal/auth/oidc_test.go index 131e616b..4c9dc6bb 100644 --- a/internal/auth/oidc_test.go +++ b/internal/auth/oidc_test.go @@ -15,6 +15,7 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/yusing/godoxy/internal/common" "golang.org/x/oauth2" + "golang.org/x/time/rate" expect "github.com/yusing/goutils/testing" ) @@ -42,6 +43,7 @@ func setupMockOIDC(t *testing.T) { }), allowedUsers: []string{"test-user"}, allowedGroups: []string{"test-group1", "test-group2"}, + rateLimit: rate.NewLimiter(rate.Every(common.OIDCRateLimitPeriod), common.OIDCRateLimit), } } diff --git a/internal/net/gphttp/middleware/bypass_test.go b/internal/net/gphttp/middleware/bypass_test.go index 4a13d6d2..b4a72f45 100644 --- a/internal/net/gphttp/middleware/bypass_test.go +++ b/internal/net/gphttp/middleware/bypass_test.go @@ -106,7 +106,7 @@ func TestReverseProxyBypass(t *testing.T) { rp := reverseproxy.NewReverseProxy("test", url, fakeRoundTripper{}) err = PatchReverseProxy(rp, map[string]OptionsRaw{ "response": { - "bypass": "path glob(/test/*) | path /api", + "bypass": []string{"path glob(/test/*)", "path /api"}, "set_headers": map[string]string{ "Test-Header": "test-value", }, diff --git a/internal/route/route.go b/internal/route/route.go index 50e6d92a..ef5cec60 100644 --- a/internal/route/route.go +++ b/internal/route/route.go @@ -788,6 +788,15 @@ func (r *Route) Finalize() { } r.Port.Listening, r.Port.Proxy = lp, pp + + workingState := config.WorkingState.Load() + if workingState == nil { + if common.IsTest { // in tests, working state might be nil + return + } + panic("bug: working state is nil") + } + r.HealthCheck.ApplyDefaults(config.WorkingState.Load().Value().Defaults.HealthCheck) } diff --git a/internal/route/rules/do_log_test.go b/internal/route/rules/do_log_test.go index 94839441..b21b1956 100644 --- a/internal/route/rules/do_log_test.go +++ b/internal/route/rules/do_log_test.go @@ -270,7 +270,7 @@ func TestLogCommand_ConditionalLogging(t *testing.T) { errorContent, err := os.ReadFile(errorFile.Name()) require.NoError(t, err) errorLines := strings.Split(strings.TrimSpace(string(errorContent)), "\n") - assert.Len(t, errorLines, 2) + require.Len(t, errorLines, 2) assert.Equal(t, "ERROR: GET /notfound 404", errorLines[0]) assert.Equal(t, "ERROR: POST /error 500", errorLines[1]) } @@ -368,7 +368,7 @@ func TestLogCommand_FilePermissions(t *testing.T) { logContent := strings.TrimSpace(string(content)) lines := strings.Split(logContent, "\n") - assert.Len(t, lines, 2) + require.Len(t, lines, 2) assert.Equal(t, "GET 200", lines[0]) assert.Equal(t, "POST 200", lines[1]) } diff --git a/internal/types/docker_provider_config_test.go b/internal/types/docker_provider_config_test.go index 3339556a..093c7b32 100644 --- a/internal/types/docker_provider_config_test.go +++ b/internal/types/docker_provider_config_test.go @@ -1,7 +1,6 @@ package types import ( - "os" "testing" "github.com/stretchr/testify/assert" @@ -27,7 +26,7 @@ test: ca_file: /etc/ssl/ca.crt cert_file: /etc/ssl/cert.crt key_file: /etc/ssl/key.crt`), &cfg) - assert.Error(t, err, os.ErrNotExist) + assert.NoError(t, err) assert.Equal(t, &DockerProviderConfig{URL: "http://localhost:2375", TLS: &DockerTLSConfig{CAFile: "/etc/ssl/ca.crt", CertFile: "/etc/ssl/cert.crt", KeyFile: "/etc/ssl/key.crt"}}, cfg["test"]) }) } From 1b9cfa65404e0636b683fe7dd0496440ccce1cda Mon Sep 17 00:00:00 2001 From: yusing Date: Sun, 4 Jan 2026 20:40:38 +0800 Subject: [PATCH 03/51] fix(autocert): forceRenewalDoneCh was never closed --- internal/autocert/provider.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/autocert/provider.go b/internal/autocert/provider.go index bff4e55a..4be580c2 100644 --- a/internal/autocert/provider.go +++ b/internal/autocert/provider.go @@ -83,6 +83,8 @@ func NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, erro lastFailureFile: lastFailureFileFor(cfg.CertPath, cfg.KeyPath), forceRenewalCh: make(chan struct{}, 1), } + p.forceRenewalDoneCh.Store(emptyForceRenewalDoneCh) + if cfg.idx == 0 { p.logger = log.With().Str("provider", "main").Logger() } else { @@ -360,7 +362,7 @@ func (p *Provider) ShouldRenewOn() time.Time { // If at least one renewal is triggered, returns true. func (p *Provider) ForceExpiryAll() (ok bool) { doneCh := make(chan struct{}) - if swapped := p.forceRenewalDoneCh.CompareAndSwap(nil, doneCh); !swapped { // already in progress + if swapped := p.forceRenewalDoneCh.CompareAndSwap(emptyForceRenewalDoneCh, doneCh); !swapped { // already in progress close(doneCh) return false } From 45720db754d0e2304c13b6e7c9da938407adbde8 Mon Sep 17 00:00:00 2001 From: yusing Date: Sun, 4 Jan 2026 21:57:25 +0800 Subject: [PATCH 04/51] fix(Makefile): correct test command --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2a5a805a..b5ca0f3e 100755 --- a/Makefile +++ b/Makefile @@ -75,7 +75,7 @@ endif .PHONY: debug test: - go test -v -race ./internal/... + CGO_ENABLED=1 go test -v -race ${BUILD_FLAGS} ./internal/... docker-build-test: docker build -t godoxy . From 1c2515cb298d0cba59fc9ba93cc6773ae1b8591d Mon Sep 17 00:00:00 2001 From: yusing Date: Sun, 4 Jan 2026 22:01:48 +0800 Subject: [PATCH 05/51] chore(docs): add README.md across multiple packages --- internal/autocert/README.md | 560 +++++++++++++++++++++++++++++++ internal/idlewatcher/README.md | 355 ++++++++++++++++++++ internal/logging/README.md | 263 +++++++++++++++ internal/metrics/README.md | 285 ++++++++++++++++ internal/serialization/README.md | 303 +++++++++++++++++ 5 files changed, 1766 insertions(+) create mode 100644 internal/autocert/README.md create mode 100644 internal/idlewatcher/README.md create mode 100644 internal/logging/README.md create mode 100644 internal/metrics/README.md create mode 100644 internal/serialization/README.md diff --git a/internal/autocert/README.md b/internal/autocert/README.md new file mode 100644 index 00000000..1b3f4a22 --- /dev/null +++ b/internal/autocert/README.md @@ -0,0 +1,560 @@ +# Autocert Package + +Automated SSL certificate management using the ACME protocol (Let's Encrypt and compatible CAs). + +## Architecture Overview + +``` +┌────────────────────────────────────────────────────────────────────────────┐ +│ GoDoxy Proxy │ +├────────────────────────────────────────────────────────────────────────────┤ +│ ┌──────────────────────┐ ┌─────────────────────────────────────────┐ │ +│ │ Config.State │────▶│ autocert.Provider │ │ +│ │ (config loading) │ │ ┌───────────────────────────────────┐ │ │ +│ └──────────────────────┘ │ │ main Provider │ │ │ +│ │ │ - Primary certificate │ │ │ +│ │ │ - SNI matcher │ │ │ +│ │ │ - Renewal scheduler │ │ │ +│ │ └───────────────────────────────────┘ │ │ +│ │ ┌───────────────────────────────────┐ │ │ +│ │ │ extraProviders[] │ │ │ +│ │ │ - Additional certifictes │ │ │ +│ │ │ - Different domains/A │ │ │ +│ │ └───────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────┐ │ +│ │ TLS Handshake │ │ +│ │ GetCert(ClientHelloInf) │ │ +│ └────────────────────────────────┘ │ +└────────────────────────────────────────────────────────────────────────────┘ +``` + +## Certificate Lifecycle + +```mermaid +--- +config: + theme: redux-dark-color +--- +flowchart TD + A[Start] --> B[Load Existing Cert] + B --> C{Cert Exists?} + C -->|Yes| D[Load Cert from Disk] + C -->|No| E[Obtain New Cert] + + D --> F{Valid & Not Expired?} + F -->|Yes| G[Schedule Renewal] + F -->|No| H{Renewal Time?} + H -->|Yes| I[Renew Certificate] + H -->|No| G + + E --> J[Init ACME Client] + J --> K[Register Account] + K --> L[DNS-01 Challenge] + L --> M[Complete Challenge] + M --> N[Download Certificate] + N --> O[Save to Disk] + O --> G + + G --> P[Wait Until Renewal Time] + P --> Q[Trigger Renewal] + Q --> I + + I --> R[Renew via ACME] + R --> S{Same Domains?} + S -->|Yes| T[Bundle & Save] + S -->|No| U[Re-obtain Certificate] + U --> T + + T --> V[Update SNI Matcher] + V --> G + + style E fill:#90EE90 + style I fill:#FFD700 + style N fill:#90EE90 + style U fill:#FFA07A +``` + +## SNI Matching Flow + +When a TLS client connects with Server Name Indication (SNI), the proxy needs to select the correct certificate. + +```mermaid +flowchart LR + Client["TLS Client"] -->|ClientHello SNI| Proxy["GoDoxy Proxy"] + Proxy -->|Certificate| Client + + subgraph "SNI Matching Process" + direction TB + A[Extract SNI from ClientHello] --> B{Normalize SNI} + B --> C{Exact Match?} + C -->|Yes| D[Return cert] + C -->|No| E[Wildcard Suffix Tree] + E --> F{Match Found?} + F -->|Yes| D + F -->|No| G[Return default cert] + end + + style C fill:#90EE90 + style E fill:#87CEEB + style F fill:#FFD700 +``` + +### Suffix Tree Structure + +The `sniMatcher` uses an optimized suffix tree for efficient wildcard matching: + +``` +Certificate: *.example.com, example.com, *.api.example.com + +exact: + "example.com" → Provider_A + +root: + └── "com" + └── "example" + ├── "*" → Provider_A [wildcard at *.example.com] + └── "api" + └── "*" → Provider_B [wildcard at *.api.example.com] +``` + +## Key Components + +### Config + +Configuration for certificate management, loaded from `config/autocert.yml`. + +```go +type Config struct { + Email string // ACME account email + Domains []string // Domains to certifiy + CertPath string // Output cert path + KeyPath string // Output key path + Extra []ConfigExtra // Additional cert configs + ACMEKeyPath string // ACME account private key (shared by all extras) + Provider string // DNS provider name + Options map[string]strutils.Redacted // Provider-specific options + Resolvers []string // DNS resolvers for DNS-01 + CADirURL string // Custom ACME CA directory + CACerts []string // Custom CA certificates + EABKid string // External Account Binding Key ID + EABHmac string // External Account Binding HMAC + + idx int // 0: main, 1+: extra[i] +} + +type ConfigExtra Config +``` + +**Extra Provider Merging:** Extra configurations are merged with the main config using `MergeExtraConfig()`, inheriting most settings from the main provider while allowing per-certificate overrides for `Provider`, `Email`, `Domains`, `Options`, `Resolvers`, `CADirURL`, `CACerts`, `EABKid`, `EABHmac`, and `HTTPClient`. The `ACMEKeyPath` is shared across all providers. + +**Validation:** + +- Extra configs must have unique `cert_path` and `key_path` values (no duplicates across main or any extra provider) + +### ConfigExtra + +Extra certificate configuration type. Uses `MergeExtraConfig()` to inherit settings from the main provider: + +```go +func MergeExtraConfig(mainCfg *Config, extraCfg *ConfigExtra) ConfigExtra +``` + +Fields that can be overridden per extra provider: + +- `Provider` - DNS provider name +- `Email` - ACME account email +- `Domains` - Certificate domains +- `Options` - Provider-specific options +- `Resolvers` - DNS resolvers +- `CADirURL` - Custom ACME CA directory +- `CACerts` - Custom CA certificates +- `EABKid` / `EABHmac` - External Account Binding credentials +- `HTTPClient` - Custom HTTP client + +Fields inherited from main config (shared): + +- `ACMEKeyPath` - ACME account private key (same for all) + +**Provider Types:** + +- `local` - No ACME, use existing certificate (default) +- `pseudo` - Mock provider for testing +- `custom` - Custom ACME CA with `CADirURL` + +### Provider + +Main certificate management struct that handles: + +- Certificate issuance and renewal +- Loading certificates from disk +- SNI-based certificate selection +- Renewal scheduling + +```go +type Provider struct { + logger zerolog.Logger // Provider-scoped logger + + cfg *Config // Configuration + user *User // ACME account + legoCfg *lego.Config // LEGO client config + client *lego.Client // ACME client + lastFailure time.Time // Last renewal failure + legoCert *certificate.Resource // Cached cert resource + tlsCert *tls.Certificate // Parsed TLS certificate + certExpiries CertExpiries // Domain → expiry map + extraProviders []*Provider // Additional certificates + sniMatcher sniMatcher // SNI → Provider mapping + forceRenewalCh chan struct{} // Force renewal trigger channel + scheduleRenewalOnce sync.Once // Prevents duplicate renewal scheduling +} +``` + +**Logging:** Each provider has a scoped logger with provider name ("main" or "extra[N]") for consistent log context. + +**Key Methods:** + +- `NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, error)` - Creates provider and initializes extra providers atomically +- `GetCert(hello *tls.ClientHelloInfo)` - Returns certificate for TLS handshake +- `GetName()` - Returns provider name ("main" or "extra[N]") +- `ObtainCert()` - Obtains new certificate via ACME +- `ObtainCertAll()` - Renews/obtains certificates for main and all extra providers +- `ObtainCertIfNotExistsAll()` - Obtains certificates only if they don't exist on disk +- `ForceExpiryAll()` - Triggers forced certificate renewal for main and all extra providers +- `ScheduleRenewalAll(parent task.Parent)` - Schedules automatic renewal for all providers +- `PrintCertExpiriesAll()` - Logs certificate expiry dates for all providers + +### User + +ACME account representation implementing lego's `acme.User` interface. + +```go +type User struct { + Email string // Account email + Registration *registration.Resource // ACME registration + Key crypto.PrivateKey // Account key +} +``` + +### sniMatcher + +Efficient SNI-to-Provider lookup with exact and wildcard matching. + +```go +type sniMatcher struct { + exact map[string]*Provider // Exact domain matches + root sniTreeNode // Wildcard suffix tree +} + +type sniTreeNode struct { + children map[string]*sniTreeNode // DNS label → child node + wildcard *Provider // Wildcard match at this level +} +``` + +## DNS Providers + +Supported DNS providers for DNS-01 challenge validation: + +| Provider | Name | Description | +| ------------ | -------------- | ---------------------------------------- | +| Cloudflare | `cloudflare` | Cloudflare DNS | +| Route 53 | `route53` | AWS Route 53 | +| DigitalOcean | `digitalocean` | DigitalOcean DNS | +| GoDaddy | `godaddy` | GoDaddy DNS | +| OVH | `ovh` | OVHcloud DNS | +| CloudDNS | `clouddns` | Google Cloud DNS | +| AzureDNS | `azuredns` | Azure DNS | +| DuckDNS | `duckdns` | DuckDNS | +| and more... | | See `internal/dnsproviders/providers.go` | + +### Provider Configuration + +Each provider accepts configuration via the `options` map: + +```yaml +autocert: + provider: cloudflare + email: admin@example.com + domains: + - example.com + - "*.example.com" + options: + CF_API_TOKEN: your-api-token + CF_ZONE_API_TOKEN: your-zone-token + resolvers: + - 1.1.1.1:53 +``` + +## ACME Integration + +### Account Registration + +```mermaid +flowchart TD + A[Load or Generate ACME Key] --> B[Init LEGO Client] + B --> C[Resolve Account by Key] + C --> D{Account Exists?} + D -->|Yes| E[Continue with existing] + D -->|No| F{Has EAB?} + F -->|Yes| G[Register with EAB] + F -->|No| H[Register with TOS Agreement] + G --> I[Save Registration] + H --> I +``` + +### DNS-01 Challenge + +```mermaid +sequenceDiagram + participant C as ACME CA + participant P as GoDoxy + participant D as DNS Provider + + P->>C: Request certificate for domain + C->>P: Present DNS-01 challenge + P->>D: Create TXT record _acme-challenge.domain + D-->>P: Record created + P->>C: Challenge ready + C->>D: Verify DNS TXT record + D-->>C: Verification success + C->>P: Issue certificate + P->>D: Clean up TXT record +``` + +## Multi-Certificate Support + +The package supports multiple certificates through the `extra` configuration: + +```yaml +autocert: + provider: cloudflare + email: admin@example.com + domains: + - example.com + - "*.example.com" + cert_path: certs/example.com.crt + key_path: certs/example.com.key + extra: + - domains: + - api.example.com + - "*.api.example.com" + cert_path: certs/api.example.com.crt + key_path: certs/api.example.com.key + provider: cloudflare + email: admin@api.example.com +``` + +### Extra Provider Setup + +Extra providers are initialized atomically within `NewProvider()`: + +```mermaid +flowchart TD + A[NewProvider] --> B{Merge Config with Extra} + B --> C[Create Provider per Extra] + C --> D[Build SNI Matcher] + D --> E[Register in SNI Tree] + + style B fill:#87CEEB + style C fill:#FFD700 +``` + +## Renewal Scheduling + +### Renewal Timing + +- **Initial Check**: Certificate expiry is checked at startup +- **Renewal Window**: Renewal scheduled for 1 month before expiry +- **Cooldown on Failure**: 1-hour cooldown after failed renewal +- **Request Cooldown**: 15-second cooldown after startup (prevents rate limiting) +- **Force Renewal**: `forceRenewalCh` channel allows triggering immediate renewal + +### Force Renewal + +The `forceRenewalCh` channel (buffered size 1) enables immediate certificate renewal on demand: + +```go +// Trigger forced renewal for main and all extra providers +provider.ForceExpiryAll() +``` + +```mermaid +flowchart TD + A[Start] --> B[Calculate Renewal Time] + B --> C[expiry - 30 days] + C --> D[Start Timer] + + D --> E{Event?} + E -->|forceRenewalCh| F[Force Renewal] + E -->|Timer| G[Check Failure Cooldown] + E -->|Context Done| H[Exit] + + G --> H1{Recently Failed?} + H1 -->|Yes| I[Skip, Wait Next Event] + H1 -->|No| J[Attempt Renewal] + + J --> K{Renewal Success?} + K -->|Yes| L[Reset Failure, Notify Success] + K -->|No| M[Update Failure Time, Notify Failure] + + L --> N[Reset Timer] + I --> N + M --> D + + N --> D + + style F fill:#FFD700 + style J fill:#FFD700 + style K fill:#90EE90 + style M fill:#FFA07A +``` + +**Notifications:** Renewal success/failure triggers system notifications with provider name. + +### CertState + +Certificate state tracking: + +```go +const ( + CertStateValid // Certificate is valid and up-to-date + CertStateExpired // Certificate has expired or needs renewal + CertStateMismatch // Certificate domains don't match config +) +``` + +### RenewMode + +Controls renewal behavior: + +```go +const ( + renewModeForce // Force renewal, bypass cooldown and state check + renewModeIfNeeded // Renew only if expired or domain mismatch +) +``` + +## File Structure + +``` +internal/autocert/ +├── README.md # This file +├── config.go # Config struct and validation +├── provider.go # Provider implementation +├── setup.go # Extra provider setup +├── sni_matcher.go # SNI matching logic +├── providers.go # DNS provider registration +├── state.go # Certificate state enum +├── user.go # ACME user/account +├── paths.go # Default paths +└── types/ + └── provider.go # Provider interface +``` + +## Default Paths + +| Constant | Default Value | Description | +| -------------------- | ---------------- | ------------------------ | +| `CertFileDefault` | `certs/cert.crt` | Default certificate path | +| `KeyFileDefault` | `certs/priv.key` | Default private key path | +| `ACMEKeyFileDefault` | `certs/acme.key` | Default ACME account key | + +Failure tracking file is generated per-certificate: `/.last_failure-` + +## Error Handling + +The package uses structured error handling with `gperr`: + +- **ErrMissingField** - Required configuration field missing +- **ErrDuplicatedPath** - Duplicate certificate/key paths in extras +- **ErrInvalidDomain** - Invalid domain format +- **ErrUnknownProvider** - Unknown DNS provider +- **ErrGetCertFailure** - Certificate retrieval failed + +**Error Context:** All errors are prefixed with provider name ("main" or "extra[N]") via `fmtError()` for clear attribution. + +### Failure Tracking + +Last failure is persisted per-certificate to prevent rate limiting: + +```go +// File: /.last_failure- where hash is SHA256(certPath|keyPath)[:6] +``` + +**Cooldown Checks:** Last failure is checked in `obtainCertIfNotExists()` (15-second startup cooldown) and `renew()` (1-hour failure cooldown). The `renewModeForce` bypasses cooldown checks entirely. + +## Integration with GoDoxy + +The autocert package integrates with GoDoxy's configuration system: + +```mermaid +flowchart LR + subgraph Config + direction TB + A[config.yml] --> B[Parse Config] + B --> C[AutoCert Config] + end + + subgraph State + C --> D[NewProvider] + D --> E[Schedule Renewal] + E --> F[Set Active Provider] + end + + subgraph Server + F --> G[TLS Handshake] + G --> H[GetCert via SNI] + H --> I[Return Certificate] + end +``` + +### REST API + +Force certificate renewal via WebSocket endpoint: + +| Endpoint | Method | Description | +| -------------------- | ------ | ----------------------------------------- | +| `/api/v1/cert/renew` | GET | Triggers `ForceExpiryAll()` via WebSocket | + +The endpoint streams live logs during the renewal process. + +## Usage Example + +```yaml +# config/config.yml +autocert: + provider: cloudflare + email: admin@example.com + domains: + - example.com + - "*.example.com" + options: + CF_API_TOKEN: ${CF_API_TOKEN} + resolvers: + - 1.1.1.1:53 + - 8.8.8.8:53 +``` + +```go +// In config initialization +autocertCfg := state.AutoCert +user, legoCfg, err := autocertCfg.GetLegoConfig() +if err != nil { + return err +} + +provider, err := autocert.NewProvider(autocertCfg, user, legoCfg) +if err != nil { + return fmt.Errorf("autocert error: %w", err) +} + +if err := provider.ObtainCertIfNotExistsAll(); err != nil { + return fmt.Errorf("failed to obtain certificates: %w", err) +} + +provider.ScheduleRenewalAll(state.Task()) +provider.PrintCertExpiriesAll() +``` diff --git a/internal/idlewatcher/README.md b/internal/idlewatcher/README.md new file mode 100644 index 00000000..3af170be --- /dev/null +++ b/internal/idlewatcher/README.md @@ -0,0 +1,355 @@ +# Idlewatcher + +Idlewatcher manages container lifecycle based on idle timeout. When a container is idle for a configured duration, it can be automatically stopped, paused, or killed. When a request comes in, the container is woken up automatically. + +## Architecture Overview + +```mermaid +graph TB + subgraph Request Flow + HTTP[HTTP Request] -->|Intercept| W[Watcher] + Stream[Stream Request] -->|Intercept| W + end + + subgraph Wake Process + W -->|Wake| Wake[Wake Container] + Wake -->|Check Status| State[Container State] + Wake -->|Wait Ready| Health[Health Check] + Wake -->|Events| SSE[SSE Events] + end + + subgraph Idle Management + Timer[Idle Timer] -->|Timeout| Stop[Stop Container] + State -->|Running| Timer + State -->|Stopped| Timer + end + + subgraph Providers + Docker[DockerProvider] --> DockerAPI[Docker API] + Proxmox[ProxmoxProvider] --> ProxmoxAPI[Proxmox API] + end + + W -->|Uses| Providers +``` + +## Directory Structure + +``` +idlewatcher/ +├── cmd # Command execution utilities +├── debug.go # Debug utilities for watcher inspection +├── errors.go # Error types and conversion +├── events.go # Wake event types and broadcasting +├── handle_http.go # HTTP request handling and loading page +├── handle_http_debug.go # Debug HTTP handler (dev only) +├── handle_stream.go # Stream connection handling +├── health.go # Health monitoring interface +├── loading_page.go # Loading page HTML/CSS/JS templates +├── state.go # Container state management +├── watcher.go # Core Watcher implementation +├── provider/ # Container provider implementations +│ ├── docker.go # Docker container management +│ └── proxmox.go # Proxmox LXC management +├── types/ +│ └── provider.go # Provider interface definition +└── html/ + ├── loading_page.html # Loading page template + ├── style.css # Loading page styles + └── loading.js # Loading page JavaScript +``` + +## Core Components + +### Watcher + +The main component that manages a single container's lifecycle: + +```mermaid +classDiagram + class Watcher { + +string Key() string + +Wake(ctx context.Context) error + +Start(parent task.Parent) gperr.Error + +ServeHTTP(rw ResponseWriter, r *Request) + +ListenAndServe(ctx context.Context, predial, onRead HookFunc) + -idleTicker: *time.Ticker + -healthTicker: *time.Ticker + -state: synk.Value~*containerState~ + -provider: synk.Value~Provider~ + -dependsOn: []*dependency + } + + class containerState { + +status: ContainerStatus + +ready: bool + +err: error + +startedAt: time.Time + +healthTries: int + } + + class dependency { + +*Watcher + +waitHealthy: bool + } + + Watcher --> containerState : manages + Watcher --> dependency : depends on +``` + +### Provider Interface + +Abstraction for different container backends: + +```mermaid +classDiagram + class Provider { + <> + +ContainerPause(ctx) error + +ContainerUnpause(ctx) error + +ContainerStart(ctx) error + +ContainerStop(ctx, signal, timeout) error + +ContainerKill(ctx, signal) error + +ContainerStatus(ctx) (ContainerStatus, error) + +Watch(ctx) (eventCh, errCh) + +Close() + } + + class DockerProvider { + +client: *docker.SharedClient + +watcher: watcher.DockerWatcher + +containerID: string + } + + class ProxmoxProvider { + +*proxmox.Node + +vmid: int + +lxcName: string + +running: bool + } + + Provider <|-- DockerProvider + Provider <|-- ProxmoxProvider +``` + +### Container Status + +```mermaid +stateDiagram-v2 + [*] --> Napping: Container stopped/paused + Napping --> Waking: Wake request + Waking --> Running: Container started + Running --> Starting: Container is running but not healthy + Starting --> Ready: Health check passes + Ready --> Napping: Idle timeout + Ready --> Error check fails: Health + Error --> Waking: Retry wake +``` + +## Lifecycle Flow + +### Wake Flow (HTTP) + +```mermaid +sequenceDiagram + participant C as Client + participant W as Watcher + participant P as Provider + participant H as HealthChecker + participant SSE as SSE Events + + C->>W: HTTP Request + W->>W: resetIdleTimer() + alt Container already ready + W->>W: return true (proceed) + else + alt No loading page configured + W->>P: ContainerStart() + W->>H: Wait for healthy + H-->>W: Healthy + W->>C: Continue request + else Loading page enabled + W->>P: ContainerStart() + W->>SSE: Send WakeEventStarting + W->>C: Serve loading page + loop Health checks + H->>H: Check health + H-->>W: Not healthy yet + W->>SSE: Send progress + end + H-->>W: Healthy + W->>SSE: Send WakeEventReady + C->>W: SSE connection + W->>SSE: Events streamed + C->>W: Poll/retry request + W->>W: return true (proceed) + end + end +``` + +### Stream Wake Flow + +```mermaid +sequenceDiagram + participant C as Client + participant W as Watcher + participant P as Provider + participant H as HealthChecker + + C->>W: Connect to stream + W->>W: preDial hook + W->>W: wakeFromStream() + alt Container ready + W->>W: Pass through + else + W->>P: ContainerStart() + W->>W: waitStarted() + W->>H: Wait for healthy + H-->>W: Healthy + W->>C: Stream connected + end +``` + +### Idle Timeout Flow + +```mermaid +sequenceDiagram + participant Client as Client + participant T as Idle Timer + participant W as Watcher + participant P as Provider + participant D as Dependencies + + loop Every request + Client->>W: HTTP/Stream + W->>W: resetIdleTimer() + end + + T->>W: Timeout + W->>W: stopByMethod() + alt stop method = pause + W->>P: ContainerPause() + else stop method = stop + W->>P: ContainerStop(signal, timeout) + else kill method = kill + W->>P: ContainerKill(signal) + end + P-->>W: Result + W->>D: Stop dependencies + D-->>W: Done +``` + +## Dependency Management + +Watchers can depend on other containers being started first: + +```mermaid +graph LR + A[App] -->|depends on| B[Database] + A -->|depends on| C[Redis] + B -->|depends on| D[Cache] +``` + +```mermaid +sequenceDiagram + participant A as App Watcher + participant B as DB Watcher + participant P as Provider + + A->>B: Wake() + Note over B: SingleFlight prevents
duplicate wake + B->>P: ContainerStart() + P-->>B: Started + B->>B: Wait healthy + B-->>A: Ready + A->>P: ContainerStart() + P-->>A: Started +``` + +## Event System + +Wake events are broadcast via Server-Sent Events (SSE): + +```mermaid +classDiagram + class WakeEvent { + +Type: WakeEventType + +Message: string + +Timestamp: time.Time + +Error: string + +WriteSSE(w io.Writer) error + } + + class WakeEventType { + <> + WakeEventStarting + WakeEventWakingDep + WakeEventDepReady + WakeEventContainerWoke + WakeEventWaitingReady + WakeEventReady + WakeEventError + } + + WakeEvent --> WakeEventType +``` + +## State Machine + +```mermaid +stateDiagram-v2 + note right of Napping + Container is stopped or paused + Idle timer stopped + end note + + note right of Waking + Container is starting + Health checking active + Events broadcasted + end note + + note right of Ready + Container healthy + Idle timer running + end note + + Napping --> Waking: Wake() + Waking --> Ready: Health check passes + Waking --> Error: Health check fails + Error --> Waking: Retry + Ready --> Napping: Idle timeout + Ready --> Napping: Manual stop +``` + +## Key Files + +| File | Purpose | +| --------------------- | ----------------------------------------------------- | +| `watcher.go` | Core Watcher implementation with lifecycle management | +| `handle_http.go` | HTTP interception and loading page serving | +| `handle_stream.go` | Stream connection wake handling | +| `provider/docker.go` | Docker container operations | +| `provider/proxmox.go` | Proxmox LXC container operations | +| `state.go` | Container state transitions | +| `events.go` | Event broadcasting via SSE | +| `health.go` | Health monitor interface implementation | + +## Configuration + +See `types.IdlewatcherConfig` for configuration options: + +- `IdleTimeout`: Duration before container is put to sleep +- `StopMethod`: pause, stop, or kill +- `StopSignal`: Signal to send when stopping +- `StopTimeout`: Timeout for stop operation +- `WakeTimeout`: Timeout for wake operation +- `DependsOn`: List of dependent containers +- `StartEndpoint`: Optional endpoint restriction for wake requests +- `NoLoadingPage`: Skip loading page, wait directly + +## Thread Safety + +- Uses `synk.Value` for atomic state updates +- Uses `xsync.Map` for SSE subscriber management +- Uses `sync.RWMutex` for watcher map access +- Uses `singleflight.Group` to prevent duplicate wake calls diff --git a/internal/logging/README.md b/internal/logging/README.md new file mode 100644 index 00000000..457a6386 --- /dev/null +++ b/internal/logging/README.md @@ -0,0 +1,263 @@ +# Logging Package + +This package provides structured logging capabilities for GoDoxy, including application logging, HTTP access logging, and in-memory log streaming. + +## Structure + +``` +internal/logging/ +├── logging.go # Main logger initialization using zerolog +├── accesslog/ # HTTP access logging with rotation and filtering +│ ├── access_logger.go # Core logging logic and buffering +│ ├── multi_access_logger.go # Fan-out to multiple writers +│ ├── config.go # Configuration types and defaults +│ ├── formatter.go # Log format implementations +│ ├── file_logger.go # File I/O with reference counting +│ ├── rotate.go # Log rotation based on retention policy +│ ├── writer.go # Buffered/unbuffered writer abstractions +│ ├── back_scanner.go # Backward line scanning for rotation +│ ├── filter.go # Request filtering by status/method/header +│ ├── retention.go # Retention policy definitions +│ ├── response_recorder.go # HTTP response recording middleware +│ └── ... # Tests and utilities +└── memlogger/ # In-memory circular buffer with WebSocket streaming + └── mem_logger.go # Ring buffer with WebSocket event notifications +``` + +## Architecture Overview + +```mermaid +graph TB + subgraph "Application Logger" + L[logging.go] --> Z[zerolog.Logger] + Z --> CW[ConsoleWriter] + end + + subgraph "Access Log Pipeline" + R[HTTP Request] --> M[Middleware] + M --> RR[ResponseRecorder] + RR --> F[Formatter] + F --> B[BufferedWriter] + B --> W[Writer] + W --> F1[File] + W --> S[Stdout] + end + + subgraph "Log Rotation" + B --> RT[Rotate Timer] + RT --> BS[BackScanner] + BS --> T[Truncate/Move] + T --> F1 + end + + subgraph "In-Memory Logger" + WB[Write Buffer] + WB --> RB[Circular Buffer
16KB max] + RB --> WS[WebSocket] + WS --> C[Client] + end +``` + +## Components + +### 1. Application Logger (`logging.go`) + +Initializes a zerolog-based console logger with level-aware formatting: + +- **Levels**: Trace → Debug → Info (determined by `common.IsTrace`/`common.IsDebug`) +- **Time Format**: 04:05 (trace) or 01-02 15:04 (debug/info) +- **Multi-line Handling**: Automatically indents continuation lines + +```go +// Auto-initialized on import +func InitLogger(out ...io.Writer) + +// Create logger with fixed level +NewLoggerWithFixedLevel(level zerolog.Level, out ...io.Writer) +``` + +### 2. Access Logging (`accesslog/`) + +Logs HTTP requests/responses with configurable formats, filters, and destinations. + +#### Core Interface + +```go +type AccessLogger interface { + Log(req *http.Request, res *http.Response) + LogError(req *http.Request, err error) + LogACL(info *maxmind.IPInfo, blocked bool) + Config() *Config + Flush() + Close() error +} +``` + +#### Log Formats + +| Format | Description | +| ---------- | --------------------------------- | +| `common` | Basic Apache Common format | +| `combined` | Common + Referer + User-Agent | +| `json` | Structured JSON with full details | + +#### Example Output + +``` +common: localhost 127.0.0.1 - - [01-04 10:30:45] "GET /api HTTP/1.1" 200 1234 +combined: localhost 127.0.0.1 - - [01-04 10:30:45] "GET /api HTTP/1.1" 200 1234 "https://example.com" "Mozilla/5.0" +json: {"time":"04/Jan/2025:10:30:45 +0000","ip":"127.0.0.1","method":"GET",...} +``` + +#### Filters + +Filter incoming requests before logging: + +- **StatusCodes**: Keep/drop by HTTP status code range +- **Method**: Keep/drop by HTTP method +- **Headers**: Match header existence or value +- **CIDR**: Match client IP against CIDR ranges + +#### Multi-Destination Support + +```mermaid +graph LR + A[Request] --> B[MultiAccessLogger] + B --> C[AccessLogger 1] --> F[File] + B --> D[AccessLogger 2] --> S[Stdout] +``` + +### 3. File Management (`file_logger.go`) + +- **Reference Counting**: Multiple loggers can share the same file +- **Auto-Close**: File closes when ref count reaches zero +- **Thread-Safe**: Shared mutex per file path + +### 4. Log Rotation (`rotate.go`) + +Rotates logs based on retention policy: + +| Policy | Description | +| ---------- | ----------------------------------- | +| `Days` | Keep logs within last N days | +| `Last` | Keep last N log lines | +| `KeepSize` | Keep last N bytes (simple truncate) | + +**Algorithm** (for Days/Last): + +1. Scan file backward line-by-line using `BackScanner` +2. Parse timestamps to find cutoff point +3. Move retained lines to file front +4. Truncate excess + +```mermaid +flowchart LR + A[File End] --> B[BackScanner] + B --> C{Valid timestamp?} + C -->|No| D[Skip line] + C -->|Yes| E{Within retention?} + E -->|No| F[Keep line] + E -->|Yes| G[Stop scanning] + F --> H[Move to front] + G --> I[Truncate rest] +``` + +### 5. Buffering (`access_logger.go`) + +- **Dynamic Sizing**: Adjusts buffer size based on write throughput +- **Initial**: 4KB → **Max**: 8MB +- **Adjustment**: Every 5 seconds based on writes-per-second + +### 6. In-Memory Logger (`memlogger/`) + +Circular buffer for real-time log streaming via WebSocket: + +- **Size**: 16KB maximum, auto-truncates old entries +- **Streaming**: WebSocket connection receives live updates +- **Events API**: Subscribe to log events + +```go +// HTTP handler for WebSocket streaming +HandlerFunc() gin.HandlerFunc + +// Subscribe to log events +Events() (<-chan []byte, func()) + +// Write to buffer (implements io.Writer) +Write(p []byte) (n int, err error) +``` + +## Configuration + +```yaml +access_log: + path: /var/log/godoxy/access.log # File path (optional) + stdout: true # Also log to stdout (optional) + format: combined # common | combined | json + rotate_interval: 1h # How often to check rotation + retention: + days: 30 # Keep last 30 days + # OR + last: 10000 # Keep last 10000 lines + # OR + keep_size: 100MB # Keep last 100MB + filters: + status_codes: [400-599] # Only log errors + method: [GET, POST] + headers: + - name: X-Internal + value: "true" + cidr: + - 10.0.0.0/8 + fields: + headers: drop # keep | drop | redacted + query: keep # keep | drop | redacted + cookies: drop # keep | drop | redacted +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant C as Client + participant M as Middleware + participant R as ResponseRecorder + participant F as Formatter + participant B as BufferedWriter + participant W as Writer + + C->>M: HTTP Request + M->>R: Capture request + R-->>M: Continue + + M->>M: Process request + + C->>M: HTTP Response + M->>R: Capture response + R->>F: Format log line + F->>B: Write formatted line + B->>W: Flush when needed + + par File Writer + W->>File: Append line + and Stdout Writer + W->>Stdout: Print line + end + + Note over B,W: Periodic rotation check + W->>File: Rotate if needed +``` + +## Key Design Patterns + +1. **Interface Segregation**: Small, focused interfaces (`AccessLogger`, `Writer`, `BufferedWriter`) + +2. **Dependency Injection**: Writers injected at creation for flexibility + +3. **Reference Counting**: Shared file handles prevent too-many-open-files + +4. **Dynamic Buffering**: Adapts to write throughput automatically + +5. **Backward Scanning**: Efficient rotation without loading entire file + +6. **Zero-Allocation Formatting**: Build log lines in pre-allocated buffers diff --git a/internal/metrics/README.md b/internal/metrics/README.md new file mode 100644 index 00000000..a6ab5aa6 --- /dev/null +++ b/internal/metrics/README.md @@ -0,0 +1,285 @@ +# Metrics Package + +System monitoring and metrics collection for GoDoxy. + +## Overview + +This package provides a unified metrics collection system that polls system and route data at regular intervals, stores historical data across multiple time periods, and exposes both REST and WebSocket APIs for consumption. + +## Architecture + +```mermaid +graph TB + subgraph "Core Framework" + P[Period Generic] + E[Entries Ring Buffer] + PL[Poller Orchestrator] + end + + subgraph "Data Sources" + SI[SystemInfo Poller] + UP[Uptime Poller] + end + + subgraph "Utilities" + UT[Utils] + end + + P --> E + PL --> P + PL --> SI + PL --> UP + UT -.-> PL + UT -.-> SI + UT -.-> UP +``` + +## Directory Structure + +``` +internal/metrics/ +├── period/ # Core polling and storage framework +│ ├── period.go # Period[T] - multi-timeframe container +│ ├── entries.go # Entries[T] - ring buffer implementation +│ ├── poller.go # Poller[T, A] - orchestration and lifecycle +│ └── handler.go # HTTP handler for data access +├── systeminfo/ # System metrics (CPU, memory, disk, network, sensors) +├── uptime/ # Route health and uptime monitoring +└── utils/ # Shared utilities (query parsing, pagination) +``` + +## Core Components + +### 1. Period[T] (`period/period.go`) + +A generic container that manages multiple time periods for the same data type. + +```go +type Period[T any] struct { + Entries map[Filter]*Entries[T] // 5m, 15m, 1h, 1d, 1mo + mu sync.RWMutex +} +``` + +**Time Periods:** + +| Filter | Duration | Entries | Interval | +| ------ | -------- | ------- | -------- | +| `5m` | 5 min | 100 | 3s | +| `15m` | 15 min | 100 | 9s | +| `1h` | 1 hour | 100 | 36s | +| `1d` | 1 day | 100 | 14.4m | +| `1mo` | 30 days | 100 | 7.2h | + +### 2. Entries[T] (`period/entries.go`) + +A fixed-size ring buffer (100 entries) with time-aware sampling. + +```go +type Entries[T any] struct { + entries [100]T // Fixed-size array + index int // Current position + count int // Number of entries + interval time.Duration // Sampling interval + lastAdd time.Time // Last write timestamp +} +``` + +**Features:** + +- Circular buffer for efficient memory usage +- Rate-limited adds (respects configured interval) +- JSON serialization/deserialization with temporal spacing + +### 3. Poller[T, A] (`period/poller.go`) + +The orchestrator that ties together polling, storage, and HTTP serving. + +```go +type Poller[T any, A any] struct { + name string + poll PollFunc[T] // Data collection + aggregate AggregateFunc[T, A] // Data aggregation + resultFilter FilterFunc[T] // Query filtering + period *Period[T] // Data storage + lastResult synk.Value[T] // Latest snapshot +} +``` + +**Poll Cycle (1 second interval):** + +```mermaid +sequenceDiagram + participant T as Task + participant P as Poller + participant D as Data Source + participant S as Storage (Period) + participant F as File + + T->>P: Start() + P->>F: Load historical data + F-->>P: Period[T] state + + loop Every 1 second + P->>D: Poll(ctx, lastResult) + D-->>P: New data point + P->>S: Add to all periods + P->>P: Update lastResult + + alt Every 30 seconds + P->>P: Gather & log errors + end + + alt Every 5 minutes + P->>F: Persist to JSON + end + end +``` + +### 4. HTTP Handler (`period/handler.go`) + +Provides REST and WebSocket endpoints for data access. + +**Endpoints:** + +- `GET /metrics?period=5m&aggregate=cpu_average` - Historical data +- `WS /metrics?period=5m&interval=5s` - Streaming updates + +**Query Parameters:** +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `period` | Filter | (none) | Time range (5m, 15m, 1h, 1d, 1mo) | +| `aggregate` | string | (varies) | Aggregation mode | +| `interval` | duration | 1s | WebSocket update interval | +| `limit` | int | 0 | Max results (0 = all) | +| `offset` | int | 0 | Pagination offset | +| `keyword` | string | "" | Fuzzy search filter | + +## Implementations + +### SystemInfo Poller + +Collects system metrics using `gopsutil`: + +```go +type SystemInfo struct { + Timestamp int64 + CPUAverage *float64 + Memory mem.VirtualMemoryStat + Disks map[string]disk.UsageStat + DisksIO map[string]*disk.IOCountersStat + Network net.IOCountersStat + Sensors Sensors +} +``` + +**Aggregation Modes:** + +- `cpu_average` - CPU usage percentage +- `memory_usage` - Memory used in bytes +- `memory_usage_percent` - Memory usage percentage +- `disks_read_speed` - Disk read speed (bytes/s) +- `disks_write_speed` - Disk write speed (bytes/s) +- `disks_iops` - Disk I/O operations per second +- `disk_usage` - Disk usage in bytes +- `network_speed` - Upload/download speed (bytes/s) +- `network_transfer` - Total bytes transferred +- `sensor_temperature` - Temperature sensor readings + +### Uptime Poller + +Monitors route health and calculates uptime statistics: + +```go +type RouteAggregate struct { + Alias string + DisplayName string + Uptime float32 // Percentage healthy + Downtime float32 // Percentage unhealthy + Idle float32 // Percentage napping/starting + AvgLatency float32 // Average latency in ms + CurrentStatus HealthStatus + Statuses []Status // Historical statuses +} +``` + +## Data Flow + +```mermaid +flowchart TD + A[Data Source] -->|PollFunc| B[Poller] + B -->|Add| C[Period.Entries] + C -->|Ring Buffer| D[(Memory)] + D -->|Every 5min| E[(data/metrics/*.json)] + + B -->|HTTP Request| F[ServeHTTP] + F -->|Filter| G[Get] + G -->|Aggregate| H[Response] + + F -->|WebSocket| I[PeriodicWrite] + I -->|interval| J[Push Updates] +``` + +## Persistence + +Data is persisted to `data/metrics/` as JSON files: + +```json +{ + "entries": { + "5m": { + "entries": [...], + "interval": "3s" + }, + "15m": {...}, + "1h": {...}, + "1d": {...}, + "1mo": {...} + } +} +``` + +**On Load:** + +- Validates and fixes interval mismatches +- Reconstructs temporal spacing for historical entries + +## Thread Safety + +- `Period[T]` uses `sync.RWMutex` for concurrent access +- `Entries[T]` is append-only (safe for single writer) +- `Poller` uses `synk.Value[T]` for atomic last result storage + +## Creating a New Poller + +```go +type MyData struct { + Value int +} + +type MyAggregate struct { + Values []int +} + +var MyPoller = period.NewPoller( + "my_poll_name", + func(ctx context.Context, last *MyData) (*MyData, error) { + // Fetch data + return &MyData{Value: 42}, nil + }, + func(entries []*MyData, query url.Values) (int, MyAggregate) { + // Aggregate for API response + return len(entries), MyAggregate{Values: [...]} + }, +) + +func init() { + MyPoller.Start() +} +``` + +## Error Handling + +- Poll errors are aggregated over 30-second windows +- Errors are logged with frequency counts +- Individual sensor warnings (e.g., ENODATA) are suppressed gracefully diff --git a/internal/serialization/README.md b/internal/serialization/README.md new file mode 100644 index 00000000..79997993 --- /dev/null +++ b/internal/serialization/README.md @@ -0,0 +1,303 @@ +# Serialization Package + +A Go package for flexible, type-safe serialization/deserialization with validation support. It provides robust handling of YAML/JSON input, environment variable substitution, and field-level validation with case-insensitive matching. + +## Architecture Overview + +```mermaid +--- +config: + theme: redux-dark-color +--- +flowchart TB + subgraph Input Processing + YAML[YAML Bytes] --> EnvSub[Env Substitution] + EnvSub --> YAMLParse[YAML Parse] + YAMLParse --> Map[map] + end + + subgraph Type Inspection + Map --> TypeInfo[Type Info Cache] + TypeInfo -.-> FieldLookup[Field Lookup] + end + + subgraph Conversion + FieldLookup --> Convert[Convert Function] + Convert --> StringConvert[String Conversion] + Convert --> NumericConvert[Numeric Conversion] + Convert --> MapConvert[Map/Struct Conversion] + Convert --> SliceConvert[Slice Conversion] + end + + subgraph Validation + Convert --> Validate[ValidateWithFieldTags] + Convert --> CustomValidate[Custom Validator] + CustomValidate --> CustomValidator[CustomValidator Interface] + end + + subgraph Output + Validate --> Result[Typed Struct/Map] + end +``` + +## File Structure + +| File | Purpose | +| ----------------------- | ------------------------------------------------- | +| `serialization.go` | Core serialization/deserialization logic | +| `validation.go` | Field tag validation and custom validator support | +| `time.go` | Duration unit extensions (d, w, M) | +| `serialization_test.go` | Core functionality tests | +| `validation_*_test.go` | Validation-specific tests | + +## Core Types + +```go +type SerializedObject = map[string]any +``` + +The `SerializedObject` is the intermediate representation used throughout deserialization. + +### Interfaces + +```go +// For custom map unmarshaling logic +type MapUnmarshaller interface { + UnmarshalMap(m map[string]any) gperr.Error +} + +// For custom validation logic +type CustomValidator interface { + Validate() gperr.Error +} +``` + +## Key Features + +### 1. Case-Insensitive Field Matching + +Fields are matched using FNV-1a hash with case-insensitive comparison: + +```go +type Config struct { + AuthToken string `json:"auth_token"` +} + +// Matches: "auth_token", "AUTH_TOKEN", "AuthToken", "Auth_Token" +``` + +### 2. Field Tags + +```go +type Config struct { + Name string `json:"name"` // JSON/deserialize field name + Port int `validate:"required"` // Validation tag + Secret string `json:"-"` // Exclude from deserialization + Token string `aliases:"key,api_key"` // Aliases for matching +} +``` + +| Tag | Purpose | +| ------------- | -------------------------------------------- | +| `json` | Field name for serialization; `-` to exclude | +| `deserialize` | Explicit deserialize name; `-` to exclude | +| `validate` | go-playground/validator tags | +| `aliases` | Comma-separated alternative field names | + +### 3. Environment Variable Substitution + +Supports `${VAR}` syntax with prefix-aware lookup: + +```yaml +autocert: + auth_token: ${CLOUDFLARE_AUTH_TOKEN} +``` + +Prefix resolution order: `GODOXY_VAR`, `GOPROXY_VAR`, `VAR` + +### 4. String Conversions + +Converts strings to various types: + +```go +// Duration: "1h30m", "2d" (d=day, w=week, M=month) +ConvertString("2d", reflect.ValueOf(&duration)) + +// Numeric: "123", "0xFF" +ConvertString("123", reflect.ValueOf(&intVal)) + +// Slice: "a,b,c" or YAML list format +ConvertString("a,b,c", reflect.ValueOf(&slice)) + +// Map/Struct: YAML format +ConvertString("key: value", reflect.ValueOf(&mapVal)) +``` + +### 5. Custom Convertor Pattern + +Types can implement a `Parse` method for custom string conversion: + +```go +type Duration struct { + Value int + Unit string +} + +func (d *Duration) Parse(v string) error { + // custom parsing logic +} +``` + +## Main Functions + +### Deserialization + +```go +// YAML with validation +func UnmarshalValidateYAML[T any](data []byte, target *T) gperr.Error + +// YAML with interceptor +func UnmarshalValidateYAMLIntercept[T any]( + data []byte, + target *T, + intercept func(m map[string]any) gperr.Error, +) gperr.Error + +// Direct map deserialization +func MapUnmarshalValidate(src SerializedObject, dst any) gperr.Error + +// To xsync.Map +func UnmarshalValidateYAMLXSync[V any](data []byte) (*xsync.Map[string, V], gperr.Error) +``` + +### Conversion + +```go +// Convert any value to target reflect.Value +func Convert(src reflect.Value, dst reflect.Value, checkValidateTag bool) gperr.Error + +// String to target type +func ConvertString(src string, dst reflect.Value) (convertible bool, convErr gperr.Error) +``` + +### Validation + +```go +// Validate using struct tags +func ValidateWithFieldTags(s any) gperr.Error + +// Register custom validator +func MustRegisterValidation(tag string, fn validator.Func) + +// Validate using CustomValidator interface +func ValidateWithCustomValidator(v reflect.Value) gperr.Error +``` + +### Default Values + +```go +// Register factory for default values +func RegisterDefaultValueFactory[T any](factory func() *T) +``` + +## Usage Example + +```go +package main + +import ( + "os" + "github.com/yusing/godoxy/internal/serialization" +) + +type ServerConfig struct { + Host string `json:"host" validate:"required,hostname_port"` + Port int `json:"port" validate:"required,min=1,max=65535"` + MaxConns int `json:"max_conns"` + TLSEnabled bool `json:"tls_enabled"` +} + +func main() { + yamlData := []byte(` +host: localhost +port: 8080 +max_conns: 100 +tls_enabled: true +`) + + var config ServerConfig + if err := serialization.UnmarshalValidateYAML(yamlData, &config); err != nil { + panic(err) + } + // config is now populated and validated +} +``` + +## Deserialization Flow + +```mermaid +sequenceDiagram + participant C as Caller + participant U as UnmarshalValidateYAML + participant E as Env Substitution + participant Y as YAML Parser + participant M as MapUnmarshalValidate + participant T as Type Info Cache + participant CV as Convert + participant V as Validator + + C->>U: YAML bytes + target struct + U->>E: Substitute ${ENV} vars + E-->>U: Substituted bytes + U->>Y: Parse YAML + Y-->>U: map[string]any + U->>M: Map + target + M->>T: Get type info + loop For each field in map + M->>T: Lookup field by name (case-insensitive) + T-->>M: Field reflect.Value + M->>CV: Convert value to field type + CV-->>M: Converted value or error + end + M->>V: Validate struct tags + V-->>M: Validation errors + M-->>U: Combined errors + U-->>C: Result +``` + +## Error Handling + +Errors use `gperr` (goutils error package) with structured error subjects: + +```go +// Unknown field +ErrUnknownField.Subject("field_name").With(gperr.DoYouMeanField("field_name", ["fieldName"])) + +// Validation error +ErrValidationError.Subject("Namespace").Withf("required") + +// Unsupported conversion +ErrUnsupportedConversion.Subjectf("string to int") +``` + +## Performance Optimizations + +1. **Type Info Caching**: Uses `xsync.Map` to cache field metadata per type +2. **Hash-based Lookup**: FNV-1a hash for O(1) field matching +3. **Lazy Pointer Init**: Pointers initialized only when first set +4. **Presized Collections**: Initial capacity hints for maps/slices + +## Testing + +```bash +go test ./internal/serialization/... -v +``` + +Test categories: + +- Basic deserialization +- Anonymous struct handling +- Pointer primitives +- String conversions +- Environment substitution +- Custom validators From cc27942c4d32899e248d32346289f8f7703490cc Mon Sep 17 00:00:00 2001 From: yusing Date: Mon, 5 Jan 2026 20:47:56 +0800 Subject: [PATCH 06/51] chore(deps): update dependencies --- agent/go.mod | 12 ++--- agent/go.sum | 93 ++++++++++++++++++++++++++++++++++-- go.mod | 16 +++---- go.sum | 8 ++-- internal/dnsproviders/go.mod | 4 +- internal/dnsproviders/go.sum | 4 +- internal/gopsutil | 2 +- 7 files changed, 112 insertions(+), 27 deletions(-) diff --git a/agent/go.mod b/agent/go.mod index ca7d9c9a..613b0f2e 100644 --- a/agent/go.mod +++ b/agent/go.mod @@ -25,8 +25,8 @@ require ( github.com/yusing/godoxy v0.0.0-00010101000000-000000000000 github.com/yusing/godoxy/socketproxy v0.0.0-00010101000000-000000000000 github.com/yusing/goutils v0.7.0 - github.com/yusing/goutils/http/reverseproxy v0.0.0-20251217162119-cb0f79b51ce2 - github.com/yusing/goutils/server v0.0.0-20251217162119-cb0f79b51ce2 + github.com/yusing/goutils/http/reverseproxy v0.0.0-20260103043911-785deb23bd64 + github.com/yusing/goutils/server v0.0.0-20260103043911-785deb23bd64 ) require ( @@ -65,7 +65,7 @@ require ( github.com/goccy/go-json v0.10.5 // indirect github.com/goccy/go-yaml v1.19.1 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gotify/server/v2 v2.7.3 // indirect + github.com/gotify/server/v2 v2.8.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 // indirect github.com/klauspost/compress v1.18.2 // indirect @@ -73,7 +73,7 @@ require ( github.com/leodido/go-urn v1.4.0 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect - github.com/luthermonson/go-proxmox v0.2.4 // indirect + github.com/luthermonson/go-proxmox v0.3.1 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -95,7 +95,7 @@ require ( github.com/samber/lo v1.52.0 // indirect github.com/samber/slog-common v0.19.0 // indirect github.com/samber/slog-zerolog/v2 v2.9.0 // indirect - github.com/shirou/gopsutil/v4 v4.25.11 // indirect + github.com/shirou/gopsutil/v4 v4.25.12 // indirect github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect github.com/spf13/afero v1.15.0 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect @@ -106,7 +106,7 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/yusing/ds v0.3.1 // indirect github.com/yusing/gointernals v0.1.16 // indirect - github.com/yusing/goutils/http/websocket v0.0.0-20251217162119-cb0f79b51ce2 // indirect + github.com/yusing/goutils/http/websocket v0.0.0-20260103043911-785deb23bd64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect diff --git a/agent/go.sum b/agent/go.sum index f5363269..573a3a70 100644 --- a/agent/go.sum +++ b/agent/go.sum @@ -1,13 +1,38 @@ +cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= +cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 h1:lpOxwrQ919lCZoNCd69rVt8u1eLZuMORrGXqy8sNf3c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0/go.mod h1:fSvRkb8d26z9dbL40Uf/OO6Vo9iExtZK3D0ulRV+8M0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0 h1:yzrctSl9GMIQ5lHu7jc8olOsGjWDCsBpJhWqfGa/YIM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0/go.mod h1:GE4m0rnnfwLGX0Y9A9A25Zx5N/90jneT5ABevqzhuFQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 h1:zLzoX5+W2l95UJoVwiyNS4dX8vHyQ6x2xRLoBBL9wMk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0/go.mod h1:wVEOJfGTj0oPAUGA1JuRAvz/lxXQsWW16axmHPP47Bk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/PuerkitoBio/goquery v1.11.0 h1:jZ7pwMQXIITcUXNH83LLk+txlaEy6NVOfTuP43xxfqw= github.com/PuerkitoBio/goquery v1.11.0/go.mod h1:wQHgxUOU3JGuj3oD/QFfxUdlzW6xPHfqyHre6VMY4DQ= +github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0 h1:h/33OxYLqBk0BYmEbSUy7MlvgQR/m1w1/7OJFKoPL1I= +github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0/go.mod h1:rvh3imDA6EaQi+oM/GQHkQAOHbXPKJ7EWJvfjuw141Q= github.com/anchore/go-lzo v0.1.0 h1:NgAacnzqPeGH49Ky19QKLBZEuFRqtTG9cdaucc3Vncs= github.com/anchore/go-lzo v0.1.0/go.mod h1:3kLx0bve2oN1iDwgM1U5zGku1Tfbdb0No5qp1eL1fIk= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= @@ -71,6 +96,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -79,6 +106,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w= github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= +github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4= +github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -88,24 +117,38 @@ github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PU github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE= github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= +github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= +github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotify/server/v2 v2.7.3 h1:nro/ZnxdlZFvxFcw9LREGA8zdk6CK744azwhuhX/A4g= -github.com/gotify/server/v2 v2.7.3/go.mod h1:VAtE1RIc/2j886PYs9WPQbMjqbFsoyQ0G8IdFtnAxU0= +github.com/gotify/server/v2 v2.8.0 h1:E3UDDn/3rFZi1sjZfbuhXNnxJP3ACZhdcw/iySegPRA= +github.com/gotify/server/v2 v2.8.0/go.mod h1:6ci5adxcE2hf1v+2oowKiQmixOxXV8vU+CRLKP6sqZA= github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 h1:9Nu54bhS/H/Kgo2/7xNSUuC5G28VR8ljfrLKU2G4IjU= @@ -118,14 +161,18 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk= +github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= -github.com/luthermonson/go-proxmox v0.2.4 h1:XQ6YNUTVvHS7N4EJxWpuqWLW2s1VPtsIblxLV/rGHLw= -github.com/luthermonson/go-proxmox v0.2.4/go.mod h1:oyFgg2WwTEIF0rP6ppjiixOHa5ebK1p8OaRiFhvICBQ= +github.com/luthermonson/go-proxmox v0.3.1 h1:h64s4/zIEQ06TBo0phFKcckV441YpvUPgLfRAptYsjY= +github.com/luthermonson/go-proxmox v0.3.1/go.mod h1:oyFgg2WwTEIF0rP6ppjiixOHa5ebK1p8OaRiFhvICBQ= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -137,6 +184,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc= github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= @@ -148,18 +197,30 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0= +github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg= +github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 h1:l0tH15ACQADZAzC+LZ+mo2tIX4H6uZu0ulrVmG5Tqz0= +github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8= +github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 h1:gzB4c6ztb38C/jYiqEaFC+mCGcWFHDji9e6jwymY9d4= +github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2/go.mod h1:l1qIPIq2uRV5WTSvkbhbl/ndbeOu7OCb3UZ+0+2ZSb8= +github.com/nrdcg/porkbun v0.4.0 h1:rWweKlwo1PToQ3H+tEO9gPRW0wzzgmI/Ob3n2Guticw= +github.com/nrdcg/porkbun v0.4.0/go.mod h1:/QMskrHEIM0IhC/wY7iTCUgINsxdT2WcOphktJ9+Q54= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE= github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8= +github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= +github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= @@ -185,14 +246,20 @@ github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89 github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M= github.com/samber/slog-zerolog/v2 v2.9.0 h1:6LkOabJmZdNLaUWkTC3IVVA+dq7b/V0FM6lz6/7+THI= github.com/samber/slog-zerolog/v2 v2.9.0/go.mod h1:gnQW9VnCfM34v2pRMUIGMsZOVbYLqY/v0Wxu6atSVGc= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM= github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= +github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= +github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -216,11 +283,17 @@ github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFn github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vultr/govultr/v3 v3.26.1 h1:G/M0rMQKwVSmL+gb0UgETbW5mcQi0Vf/o/ZSGdBCxJw= +github.com/vultr/govultr/v3 v3.26.1/go.mod h1:9WwnWGCKnwDlNjHjtt+j+nP+0QWq6hQXzaHgddqrLWY= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusing/ds v0.3.1 h1:mCqTgTQD8RhiBpcysvii5kZ7ZBmqcknVsFubNALGLbY= github.com/yusing/ds v0.3.1/go.mod h1:XhKV4l7cZwBbbl7lRzNC9zX27zvCM0frIwiuD40ULRk= +github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260104140148-1c2515cb298d h1:O6umnEZyKot6IqyOCuLMUuCT8/K8n+lKiQJ+UjmSfVc= +github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260104140148-1c2515cb298d/go.mod h1:84uz4o4GfD4FhXv3v7620Vj7LtXL0gnxDgL9LA+KmEI= github.com/yusing/gointernals v0.1.16 h1:GrhZZdxzA+jojLEqankctJrOuAYDb7kY1C93S1pVR34= github.com/yusing/gointernals v0.1.16/go.mod h1:B/0FVXt4WPmgzVy3ynzkqKi+BSGaJVmwCJBRXYapo34= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -243,6 +316,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0= +go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg= golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -334,11 +409,21 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.258.0 h1:IKo1j5FBlN74fe5isA2PVozN3Y5pwNKriEgAXPOkDAc= +google.golang.org/api v0.258.0/go.mod h1:qhOMTQEZ6lUps63ZNq9jhODswwjkjYYguA7fA3TBFww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/go.mod b/go.mod index b3a99c71..5642ca51 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/go-playground/validator/v10 v10.30.1 // validator github.com/gobwas/glob v0.2.3 // glob matcher for route rules github.com/gorilla/websocket v1.5.3 // websocket for API and agent - github.com/gotify/server/v2 v2.7.3 // reference the Message struct for json response + github.com/gotify/server/v2 v2.8.0 // reference the Message struct for json response github.com/lithammer/fuzzysearch v1.1.8 // fuzzy search for searching icons and filtering metrics github.com/pires/go-proxyproto v0.8.1 // proxy protocol support github.com/puzpuzpuz/xsync/v4 v4.2.0 // lock free map for concurrent operations @@ -41,23 +41,23 @@ require ( github.com/docker/cli v29.1.3+incompatible // needs docker/cli/cli/connhelper connection helper for docker client github.com/goccy/go-yaml v1.19.1 // yaml parsing for different config files github.com/golang-jwt/jwt/v5 v5.3.0 // jwt authentication - github.com/luthermonson/go-proxmox v0.2.4 // proxmox API client + github.com/luthermonson/go-proxmox v0.3.1 // proxmox API client github.com/moby/moby/api v1.52.0 // docker API github.com/moby/moby/client v0.2.1 // docker client github.com/oschwald/maxminddb-golang v1.13.1 // maxminddb for geoip database github.com/quic-go/quic-go v0.58.0 // http3 support - github.com/shirou/gopsutil/v4 v4.25.11 // system information + github.com/shirou/gopsutil/v4 v4.25.12 // system information github.com/spf13/afero v1.15.0 // afero for file system operations github.com/stretchr/testify v1.11.1 // testing framework github.com/valyala/fasthttp v1.68.0 // fast http for health check github.com/yusing/ds v0.3.1 // data structures and algorithms - github.com/yusing/godoxy/agent v0.0.0-20251230135310-5087800fd763 - github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20251230043958-dba8441e8a5d + github.com/yusing/godoxy/agent v0.0.0-20260104140148-1c2515cb298d + github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260104140148-1c2515cb298d github.com/yusing/gointernals v0.1.16 github.com/yusing/goutils v0.7.0 - github.com/yusing/goutils/http/reverseproxy v0.0.0-20251217162119-cb0f79b51ce2 - github.com/yusing/goutils/http/websocket v0.0.0-20251217162119-cb0f79b51ce2 - github.com/yusing/goutils/server v0.0.0-20251217162119-cb0f79b51ce2 + github.com/yusing/goutils/http/reverseproxy v0.0.0-20260103043911-785deb23bd64 + github.com/yusing/goutils/http/websocket v0.0.0-20260103043911-785deb23bd64 + github.com/yusing/goutils/server v0.0.0-20260103043911-785deb23bd64 ) require ( diff --git a/go.sum b/go.sum index 49748d7c..bc6fbb48 100644 --- a/go.sum +++ b/go.sum @@ -150,8 +150,8 @@ github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5 github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotify/server/v2 v2.7.3 h1:nro/ZnxdlZFvxFcw9LREGA8zdk6CK744azwhuhX/A4g= -github.com/gotify/server/v2 v2.7.3/go.mod h1:VAtE1RIc/2j886PYs9WPQbMjqbFsoyQ0G8IdFtnAxU0= +github.com/gotify/server/v2 v2.8.0 h1:E3UDDn/3rFZi1sjZfbuhXNnxJP3ACZhdcw/iySegPRA= +github.com/gotify/server/v2 v2.8.0/go.mod h1:6ci5adxcE2hf1v+2oowKiQmixOxXV8vU+CRLKP6sqZA= github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= @@ -188,8 +188,8 @@ github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8 github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= -github.com/luthermonson/go-proxmox v0.2.4 h1:XQ6YNUTVvHS7N4EJxWpuqWLW2s1VPtsIblxLV/rGHLw= -github.com/luthermonson/go-proxmox v0.2.4/go.mod h1:oyFgg2WwTEIF0rP6ppjiixOHa5ebK1p8OaRiFhvICBQ= +github.com/luthermonson/go-proxmox v0.3.1 h1:h64s4/zIEQ06TBo0phFKcckV441YpvUPgLfRAptYsjY= +github.com/luthermonson/go-proxmox v0.3.1/go.mod h1:oyFgg2WwTEIF0rP6ppjiixOHa5ebK1p8OaRiFhvICBQ= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= diff --git a/internal/dnsproviders/go.mod b/internal/dnsproviders/go.mod index d8db282c..129e5834 100644 --- a/internal/dnsproviders/go.mod +++ b/internal/dnsproviders/go.mod @@ -6,7 +6,7 @@ replace github.com/yusing/godoxy => ../.. require ( github.com/go-acme/lego/v4 v4.30.1 - github.com/yusing/godoxy v0.21.3 + github.com/yusing/godoxy v0.23.0 ) require ( @@ -47,7 +47,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.16.0 // indirect - github.com/gotify/server/v2 v2.7.3 // indirect + github.com/gotify/server/v2 v2.8.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect diff --git a/internal/dnsproviders/go.sum b/internal/dnsproviders/go.sum index 6c6314ec..7fd58a56 100644 --- a/internal/dnsproviders/go.sum +++ b/internal/dnsproviders/go.sum @@ -100,8 +100,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAV github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= -github.com/gotify/server/v2 v2.7.3 h1:nro/ZnxdlZFvxFcw9LREGA8zdk6CK744azwhuhX/A4g= -github.com/gotify/server/v2 v2.7.3/go.mod h1:VAtE1RIc/2j886PYs9WPQbMjqbFsoyQ0G8IdFtnAxU0= +github.com/gotify/server/v2 v2.8.0 h1:E3UDDn/3rFZi1sjZfbuhXNnxJP3ACZhdcw/iySegPRA= +github.com/gotify/server/v2 v2.8.0/go.mod h1:6ci5adxcE2hf1v+2oowKiQmixOxXV8vU+CRLKP6sqZA= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= diff --git a/internal/gopsutil b/internal/gopsutil index 2dec3012..9532b08a 160000 --- a/internal/gopsutil +++ b/internal/gopsutil @@ -1 +1 @@ -Subproject commit 2dec30129b0ebfd2101e147aa621fed6598f080e +Subproject commit 9532b08adde452459baa610ce7768a6bc811f199 From 61c8ac04e8999adbf98681cfbdc3726e04b387d4 Mon Sep 17 00:00:00 2001 From: yusing Date: Mon, 5 Jan 2026 20:55:04 +0800 Subject: [PATCH 07/51] feat(autocert): add back `inwx` provider --- go.mod | 6 ++++++ go.sum | 13 +++++++++++++ internal/dnsproviders/gen.py | 2 ++ internal/dnsproviders/go.mod | 6 ++++++ internal/dnsproviders/go.sum | 15 +++++++++++++++ internal/dnsproviders/providers.go | 2 ++ 6 files changed, 44 insertions(+) diff --git a/go.mod b/go.mod index 5642ca51..a2858539 100644 --- a/go.mod +++ b/go.mod @@ -147,6 +147,7 @@ require ( require ( github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect + github.com/boombuler/barcode v1.1.0 // indirect github.com/bytedance/sonic/loader v0.4.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -154,20 +155,25 @@ require ( github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/fatih/color v1.18.0 // indirect + github.com/fatih/structs v1.1.0 // indirect github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect github.com/go-resty/resty/v2 v2.17.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/google/go-querystring v1.2.0 // indirect github.com/klauspost/compress v1.18.2 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/linode/linodego v1.63.0 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect + github.com/nrdcg/goinwx v0.12.0 // indirect github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 // indirect github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/pquerna/otp v1.5.0 // indirect github.com/stretchr/objx v0.5.3 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect diff --git a/go.sum b/go.sum index bc6fbb48..5b290d61 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,9 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVkHo= +github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= @@ -85,6 +88,8 @@ github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1Ugj github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -121,6 +126,8 @@ github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6x github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= @@ -174,6 +181,8 @@ github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uq github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -218,6 +227,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0= github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg= +github.com/nrdcg/goinwx v0.12.0 h1:ujdUqDBnaRSFwzVnImvPHYw3w3m9XgmGImNUw1GyMb4= +github.com/nrdcg/goinwx v0.12.0/go.mod h1:IrVKd3ZDbFiMjdPgML4CSxZAY9wOoqLvH44zv3NodJ0= github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 h1:l0tH15ACQADZAzC+LZ+mo2tIX4H6uZu0ulrVmG5Tqz0= github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8= github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 h1:gzB4c6ztb38C/jYiqEaFC+mCGcWFHDji9e6jwymY9d4= @@ -248,6 +259,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0= github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= diff --git a/internal/dnsproviders/gen.py b/internal/dnsproviders/gen.py index 9b1e9184..c34c3f29 100644 --- a/internal/dnsproviders/gen.py +++ b/internal/dnsproviders/gen.py @@ -41,6 +41,7 @@ allowlist = [ "hostinger", "httpreq", "ionos", + "inwx", "linode", "namecheap", "netcup", @@ -49,6 +50,7 @@ allowlist = [ "ovh", "porkbun", "rfc2136", + # "route53", "scaleway", "spaceship", "vercel", diff --git a/internal/dnsproviders/go.mod b/internal/dnsproviders/go.mod index 129e5834..5f05f7ff 100644 --- a/internal/dnsproviders/go.mod +++ b/internal/dnsproviders/go.mod @@ -22,6 +22,7 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect + github.com/boombuler/barcode v1.1.0 // indirect github.com/bytedance/gopkg v0.1.3 // indirect github.com/bytedance/sonic v1.14.2 // indirect github.com/bytedance/sonic/loader v0.4.0 // indirect @@ -29,6 +30,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.12 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect @@ -39,6 +41,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.30.1 // indirect github.com/go-resty/resty/v2 v2.17.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-yaml v1.19.1 // indirect github.com/gofrs/flock v0.13.0 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect @@ -51,6 +54,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/linode/linodego v1.63.0 // indirect @@ -60,12 +64,14 @@ require ( github.com/miekg/dns v1.1.69 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/nrdcg/goacmedns v0.2.0 // indirect + github.com/nrdcg/goinwx v0.12.0 // indirect github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 // indirect github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 // indirect github.com/nrdcg/porkbun v0.4.0 // indirect github.com/ovh/go-ovh v1.9.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pquerna/otp v1.5.0 // indirect github.com/puzpuzpuz/xsync/v4 v4.2.0 // indirect github.com/rs/zerolog v1.34.0 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 // indirect diff --git a/internal/dnsproviders/go.sum b/internal/dnsproviders/go.sum index 7fd58a56..2aa01305 100644 --- a/internal/dnsproviders/go.sum +++ b/internal/dnsproviders/go.sum @@ -34,6 +34,9 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVkHo= +github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE= @@ -53,6 +56,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= @@ -78,6 +83,8 @@ github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy0 github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4= github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE= github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -114,6 +121,8 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -139,6 +148,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0= github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg= +github.com/nrdcg/goinwx v0.12.0 h1:ujdUqDBnaRSFwzVnImvPHYw3w3m9XgmGImNUw1GyMb4= +github.com/nrdcg/goinwx v0.12.0/go.mod h1:IrVKd3ZDbFiMjdPgML4CSxZAY9wOoqLvH44zv3NodJ0= github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 h1:l0tH15ACQADZAzC+LZ+mo2tIX4H6uZu0ulrVmG5Tqz0= github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8= github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 h1:gzB4c6ztb38C/jYiqEaFC+mCGcWFHDji9e6jwymY9d4= @@ -153,6 +164,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0= github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -226,10 +239,12 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= diff --git a/internal/dnsproviders/providers.go b/internal/dnsproviders/providers.go index ab6b847a..d21acde7 100644 --- a/internal/dnsproviders/providers.go +++ b/internal/dnsproviders/providers.go @@ -17,6 +17,7 @@ import ( "github.com/go-acme/lego/v4/providers/dns/hetzner" "github.com/go-acme/lego/v4/providers/dns/hostinger" "github.com/go-acme/lego/v4/providers/dns/httpreq" + "github.com/go-acme/lego/v4/providers/dns/inwx" "github.com/go-acme/lego/v4/providers/dns/ionos" "github.com/go-acme/lego/v4/providers/dns/linode" "github.com/go-acme/lego/v4/providers/dns/namecheap" @@ -57,6 +58,7 @@ func InitProviders() { autocert.Providers["hostinger"] = autocert.DNSProvider(hostinger.NewDefaultConfig, hostinger.NewDNSProviderConfig) autocert.Providers["httpreq"] = autocert.DNSProvider(httpreq.NewDefaultConfig, httpreq.NewDNSProviderConfig) autocert.Providers["ionos"] = autocert.DNSProvider(ionos.NewDefaultConfig, ionos.NewDNSProviderConfig) + autocert.Providers["inwx"] = autocert.DNSProvider(inwx.NewDefaultConfig, inwx.NewDNSProviderConfig) autocert.Providers["linode"] = autocert.DNSProvider(linode.NewDefaultConfig, linode.NewDNSProviderConfig) autocert.Providers["namecheap"] = autocert.DNSProvider(namecheap.NewDefaultConfig, namecheap.NewDNSProviderConfig) autocert.Providers["netcup"] = autocert.DNSProvider(netcup.NewDefaultConfig, netcup.NewDNSProviderConfig) From 724617a2b382dcb096dd1e511db4df0b4370ad60 Mon Sep 17 00:00:00 2001 From: yusing Date: Mon, 5 Jan 2026 20:58:56 +0800 Subject: [PATCH 08/51] chore(go.mod): update goquery comment and add description for x/sync package --- go.mod | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index a2858539..ec2fc7ad 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ replace ( ) require ( - github.com/PuerkitoBio/goquery v1.11.0 // parsing HTML for extract fav icon + github.com/PuerkitoBio/goquery v1.11.0 // parsing HTML for extract fav icon; modify_html middleware github.com/coreos/go-oidc/v3 v3.17.0 // oidc authentication github.com/fsnotify/fsnotify v1.9.0 // file watcher github.com/gin-gonic/gin v1.11.0 // api server @@ -31,7 +31,7 @@ require ( golang.org/x/crypto v0.46.0 // encrypting password with bcrypt golang.org/x/net v0.48.0 // HTTP header utilities golang.org/x/oauth2 v0.34.0 // oauth2 authentication - golang.org/x/sync v0.19.0 + golang.org/x/sync v0.19.0 // errgroup and singleflight for concurrent operations golang.org/x/time v0.14.0 // time utilities ) From 424398442b3dfb461b9154458f43788ae07b55ae Mon Sep 17 00:00:00 2001 From: yusing Date: Tue, 6 Jan 2026 16:29:35 +0800 Subject: [PATCH 09/51] refactor: replace gperr.Builder with gperr.Group for concurrent error handling - Updated various files to utilize gperr.Group for cleaner concurrency error handling. - Removed sync.WaitGroup usage, simplifying the code structure. - Ensured consistent error reporting across different components. --- internal/api/v1/metrics/all_system_info.go | 29 ++++++++--------- internal/config/state.go | 23 +++++++------- internal/idlewatcher/watcher.go | 2 +- internal/metrics/systeminfo/system_info.go | 31 +++++++++---------- .../net/gphttp/loadbalancer/loadbalancer.go | 5 ++- .../gphttp/middleware/middleware_builder.go | 14 ++++----- internal/route/provider/provider.go | 17 +++++----- internal/serialization/serialization.go | 8 ++--- internal/types/idlewatcher.go | 3 +- 9 files changed, 62 insertions(+), 70 deletions(-) diff --git a/internal/api/v1/metrics/all_system_info.go b/internal/api/v1/metrics/all_system_info.go index 29963d19..3e8ae474 100644 --- a/internal/api/v1/metrics/all_system_info.go +++ b/internal/api/v1/metrics/all_system_info.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/http" - "sync" "sync/atomic" "time" @@ -103,54 +102,52 @@ func AllSystemInfo(c *gin.Context) { // processing function for one round. doRound := func() (bool, error) { - var roundWg sync.WaitGroup var numErrs atomic.Int32 totalAgents := int32(1) // myself - errs := gperr.NewBuilderWithConcurrency() + var errs gperr.Group // get system info for me and all agents in parallel. - roundWg.Go(func() { + errs.Go(func() error { data, err := systeminfo.Poller.GetRespData(req.Period, query) if err != nil { - errs.Add(gperr.Wrap(err, "Main server")) numErrs.Add(1) - return + return gperr.PrependSubject("Main server", err) } select { case <-manager.Done(): - return + return nil case dataCh <- SystemInfoData{ AgentName: "GoDoxy", SystemInfo: data, }: } + return nil }) for _, a := range agent.IterAgents() { totalAgents++ - agentShallowCopy := *a - roundWg.Go(func() { - data, err := getAgentSystemInfoWithRetry(manager.Context(), &agentShallowCopy, queryEncoded) + errs.Go(func() error { + data, err := getAgentSystemInfoWithRetry(manager.Context(), a, queryEncoded) if err != nil { - errs.Add(gperr.Wrap(err, "Agent "+agentShallowCopy.Name)) numErrs.Add(1) - return + return gperr.PrependSubject("Agent "+a.Name, err) } select { case <-manager.Done(): - return + return nil case dataCh <- SystemInfoData{ - AgentName: agentShallowCopy.Name, + AgentName: a.Name, SystemInfo: data, }: } + return nil }) } - roundWg.Wait() - return numErrs.Load() == totalAgents, errs.Error() + err := errs.Wait().Error() + return numErrs.Load() == totalAgents, err } // write system info immediately once. diff --git a/internal/config/state.go b/internal/config/state.go index 0586dca0..9605356c 100644 --- a/internal/config/state.go +++ b/internal/config/state.go @@ -302,13 +302,16 @@ func (state *state) initProxmox() error { return nil } - errs := gperr.NewBuilder() + var errs gperr.Group for _, cfg := range proxmoxCfg { - if err := cfg.Init(state.task.Context()); err != nil { - errs.Add(err.Subject(cfg.URL)) - } + errs.Go(func() error { + if err := cfg.Init(state.task.Context()); err != nil { + return err.Subject(cfg.URL) + } + return nil + }) } - return errs.Error() + return errs.Wait().Error() } func (state *state) storeProvider(p types.RouteProvider) { @@ -326,8 +329,8 @@ func (state *state) loadRouteProviders() error { }() providers := &state.Providers - errs := gperr.NewBuilderWithConcurrency("route provider errors") - results := gperr.NewBuilder("loaded route providers") + errs := gperr.NewGroup("route provider errors") + results := gperr.NewGroup("loaded route providers") agent.RemoveAllAgents() @@ -388,8 +391,6 @@ func (state *state) loadRouteProviders() error { } } - results.EnableConcurrency() - // load routes concurrently var providersLoader sync.WaitGroup for _, p := range state.providers.Range { @@ -402,10 +403,10 @@ func (state *state) loadRouteProviders() error { } providersLoader.Wait() - state.tmpLog.Info().Msg(results.String()) + state.tmpLog.Info().Msg(results.Wait().String()) state.printRoutesByProvider(lenLongestName) state.printState() - return errs.Error() + return errs.Wait().Error() } func (state *state) printRoutesByProvider(lenLongestName int) { diff --git a/internal/idlewatcher/watcher.go b/internal/idlewatcher/watcher.go index 92587f76..c2c28845 100644 --- a/internal/idlewatcher/watcher.go +++ b/internal/idlewatcher/watcher.go @@ -143,7 +143,7 @@ func NewWatcher(parent task.Parent, r types.Route, cfg *types.IdlewatcherConfig) } } - depErrors := gperr.NewBuilder() + var depErrors gperr.Builder for i, dep := range cfg.DependsOn { depSegments := strings.Split(dep, ":") dep = depSegments[0] diff --git a/internal/metrics/systeminfo/system_info.go b/internal/metrics/systeminfo/system_info.go index 677e66f5..3a2ec761 100644 --- a/internal/metrics/systeminfo/system_info.go +++ b/internal/metrics/systeminfo/system_info.go @@ -4,7 +4,6 @@ import ( "context" "errors" "net/url" - "sync" "syscall" "time" @@ -72,43 +71,41 @@ func isNoDataAvailable(err error) bool { } func getSystemInfo(ctx context.Context, lastResult *SystemInfo) (*SystemInfo, error) { - errs := gperr.NewBuilderWithConcurrency("failed to get system info") + errs := gperr.NewGroup("failed to get system info") var s SystemInfo s.Timestamp = time.Now().Unix() - var wg sync.WaitGroup - if !common.MetricsDisableCPU { - wg.Go(func() { - errs.Add(s.collectCPUInfo(ctx)) + errs.Go(func() error { + return s.collectCPUInfo(ctx) }) } if !common.MetricsDisableMemory { - wg.Go(func() { - errs.Add(s.collectMemoryInfo(ctx)) + errs.Go(func() error { + return s.collectMemoryInfo(ctx) }) } if !common.MetricsDisableDisk { - wg.Go(func() { - errs.Add(s.collectDisksInfo(ctx, lastResult)) + errs.Go(func() error { + return s.collectDisksInfo(ctx, lastResult) }) } if !common.MetricsDisableNetwork { - wg.Go(func() { - errs.Add(s.collectNetworkInfo(ctx, lastResult)) + errs.Go(func() error { + return s.collectNetworkInfo(ctx, lastResult) }) } if !common.MetricsDisableSensors { - wg.Go(func() { - errs.Add(s.collectSensorsInfo(ctx)) + errs.Go(func() error { + return s.collectSensorsInfo(ctx) }) } - wg.Wait() - if errs.HasError() { + result := errs.Wait() + if result.HasError() { allWarnings := gperr.NewBuilder("") allErrors := gperr.NewBuilder("failed to get system info") - errs.ForEach(func(err error) { + result.ForEach(func(err error) { warnings := new(warning.Warning) if errors.As(err, &warnings) { for _, warning := range warnings.List { diff --git a/internal/net/gphttp/loadbalancer/loadbalancer.go b/internal/net/gphttp/loadbalancer/loadbalancer.go index 688d906a..c4316b2d 100644 --- a/internal/net/gphttp/loadbalancer/loadbalancer.go +++ b/internal/net/gphttp/loadbalancer/loadbalancer.go @@ -13,7 +13,6 @@ import ( gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/pool" "github.com/yusing/goutils/task" - "golang.org/x/sync/errgroup" ) // TODO: stats of each server. @@ -223,7 +222,7 @@ func (lb *LoadBalancer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { return } if r.URL.Path == idlewatcher.WakeEventsPath { - var errs errgroup.Group + var errs gperr.Group // wake all servers for _, srv := range srvs { errs.Go(func() error { @@ -234,7 +233,7 @@ func (lb *LoadBalancer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { return nil }) } - if err := errs.Wait(); err != nil { + if err := errs.Wait().Error(); err != nil { gperr.LogWarn("failed to wake some servers", err, &lb.l) } } diff --git a/internal/net/gphttp/middleware/middleware_builder.go b/internal/net/gphttp/middleware/middleware_builder.go index f5a14c08..738e439c 100644 --- a/internal/net/gphttp/middleware/middleware_builder.go +++ b/internal/net/gphttp/middleware/middleware_builder.go @@ -43,8 +43,8 @@ func BuildMiddlewaresFromYAML(source string, data []byte, eb *gperr.Builder) map func compileMiddlewares(middlewaresMap map[string]OptionsRaw) ([]*Middleware, gperr.Error) { middlewares := make([]*Middleware, 0, len(middlewaresMap)) - errs := gperr.NewBuilder() - invalidOpts := gperr.NewBuilder() + var errs gperr.Builder + var invalidOpts gperr.Builder for name, opts := range middlewaresMap { m, err := Get(name) @@ -55,7 +55,7 @@ func compileMiddlewares(middlewaresMap map[string]OptionsRaw) ([]*Middleware, gp m, err = m.New(opts) if err != nil { - invalidOpts.Add(err.Subject("middlewares." + name)) + invalidOpts.AddSubjectf(err, "middlewares.%s", name) continue } middlewares = append(middlewares, m) @@ -78,23 +78,23 @@ func BuildMiddlewareFromMap(name string, middlewaresMap map[string]OptionsRaw) ( // TODO: check conflict or duplicates. func BuildMiddlewareFromChainRaw(name string, defs []map[string]any) (*Middleware, gperr.Error) { - chainErr := gperr.NewBuilder("") + var chainErr gperr.Builder chain := make([]*Middleware, 0, len(defs)) for i, def := range defs { if def["use"] == nil || def["use"] == "" { - chainErr.Add(ErrMissingMiddlewareUse.Subjectf("%s[%d]", name, i)) + chainErr.AddSubjectf(ErrMissingMiddlewareUse, "%s[%d]", name, i) continue } baseName := def["use"].(string) base, err := Get(baseName) if err != nil { - chainErr.Add(err.Subjectf("%s[%d]", name, i)) + chainErr.AddSubjectf(err, "%s[%d]", name, i) continue } delete(def, "use") m, err := base.New(def) if err != nil { - chainErr.Add(err.Subjectf("%s[%d]", name, i)) + chainErr.AddSubjectf(err, "%s[%d]", name, i) continue } m.name = fmt.Sprintf("%s[%d]", name, i) diff --git a/internal/route/provider/provider.go b/internal/route/provider/provider.go index aee257a7..de32fe8a 100644 --- a/internal/route/provider/provider.go +++ b/internal/route/provider/provider.go @@ -97,8 +97,7 @@ func (p *Provider) MarshalText() ([]byte, error) { // Start implements task.TaskStarter. func (p *Provider) Start(parent task.Parent) gperr.Error { - errs := gperr.NewBuilder("routes error") - errs.EnableConcurrency() + errs := gperr.NewGroup("routes error") t := parent.Subtask("provider."+p.String(), false) @@ -108,15 +107,13 @@ func (p *Provider) Start(parent task.Parent) gperr.Error { routeSlice = append(routeSlice, r) } - var wg sync.WaitGroup for _, r := range routeSlice { - wg.Add(1) - go func(r *route.Route) { - defer wg.Done() - errs.Add(p.startRoute(t, r)) - }(r) + errs.Go(func() error { + return p.startRoute(t, r) + }) } - wg.Wait() + + err := errs.Wait().Error() eventQueue := events.NewEventQueue( t.Subtask("event_queue", false), @@ -133,7 +130,7 @@ func (p *Provider) Start(parent task.Parent) gperr.Error { ) eventQueue.Start(p.watcher.Events(t.Context())) - if err := errs.Error(); err != nil { + if err != nil { return err.Subject(p.String()) } return nil diff --git a/internal/serialization/serialization.go b/internal/serialization/serialization.go index 0cd14da1..5dd777a8 100644 --- a/internal/serialization/serialization.go +++ b/internal/serialization/serialization.go @@ -86,7 +86,7 @@ func initPtr(dst reflect.Value) { } func ValidateWithFieldTags(s any) gperr.Error { - errs := gperr.NewBuilder() + var errs gperr.Builder err := validate.Struct(s) var valErrs validator.ValidationErrors if errors.As(err, &valErrs) { @@ -302,7 +302,7 @@ func mapUnmarshalValidate(src SerializedObject, dstV reflect.Value, checkValidat // convert target fields to lower no-snake // then check if the field of data is in the target - errs := gperr.NewBuilder() + var errs gperr.Builder switch dstV.Kind() { case reflect.Struct, reflect.Interface: @@ -457,7 +457,7 @@ func Convert(src reflect.Value, dst reflect.Value, checkValidateTag bool) gperr. if dstT.Kind() != reflect.Slice { return ErrUnsupportedConversion.Subject(dstT.String() + " to " + srcT.String()) } - sliceErrs := gperr.NewBuilder() + var sliceErrs gperr.Builder i := 0 gi.ReflectInitSlice(dst, srcLen, srcLen) for j, v := range src.Seq2() { @@ -541,7 +541,7 @@ func ConvertString(src string, dst reflect.Value) (convertible bool, convErr gpe if !isMultiline && src[0] != '-' { values := strutils.CommaSeperatedList(src) gi.ReflectInitSlice(dst, len(values), len(values)) - errs := gperr.NewBuilder() + var errs gperr.Builder for i, v := range values { _, err := ConvertString(v, dst.Index(i)) if err != nil { diff --git a/internal/types/idlewatcher.go b/internal/types/idlewatcher.go index 38d22554..624341d0 100644 --- a/internal/types/idlewatcher.go +++ b/internal/types/idlewatcher.go @@ -76,7 +76,8 @@ func (c *IdlewatcherConfig) Validate() gperr.Error { c.valErr = nil return nil } - errs := gperr.NewBuilder() + + var errs gperr.Builder errs.AddRange( c.validateProvider(), c.validateTimeouts(), From a44b9e352c5e83d4cbfb18ce136264a699e51c09 Mon Sep 17 00:00:00 2001 From: yusing Date: Tue, 6 Jan 2026 16:38:49 +0800 Subject: [PATCH 10/51] refactor(docker): simplify flow of isLocal check --- internal/docker/container.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/docker/container.go b/internal/docker/container.go index a36f4c87..0eb3fbbf 100644 --- a/internal/docker/container.go +++ b/internal/docker/container.go @@ -175,11 +175,14 @@ func isLocal(c *types.Container) bool { return false } hostname := url.Hostname() + if hostname == "localhost" { + return true + } ip := net.ParseIP(hostname) if ip != nil { return ip.IsLoopback() || ip.IsUnspecified() } - return hostname == "localhost" + return false } func setPublicHostname(c *types.Container) { From 08f4d9e95ff1d40df5efc29b8cf3fdd9cbf6a4c0 Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 10:24:08 +0800 Subject: [PATCH 11/51] chore: update goutils --- goutils | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/goutils b/goutils index 785deb23..06f20f6b 160000 --- a/goutils +++ b/goutils @@ -1 +1 @@ -Subproject commit 785deb23bd64fb9db28875ae39cf3ea6675fb146 +Subproject commit 06f20f6b8710dabab8287b128d109d065da3b281 From 9205af3a4f1fbfc70aa5286c282f768fd676af90 Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 10:54:33 +0800 Subject: [PATCH 12/51] feat(api/cert): enhance certificate info retrieval - Introduced a new method `GetCertInfos` to fetch details of all available certificates. - Updated the `Info` handler to return an array of `CertInfo` instead of a single certificate. - Improved error handling for cases with no available certificates. - Refactored related error messages for clarity. --- internal/api/v1/cert/info.go | 40 +++++++++++-------------------- internal/api/v1/docs/swagger.json | 11 +++++---- internal/api/v1/docs/swagger.yaml | 10 ++++---- internal/autocert/provider.go | 39 +++++++++++++++++++++++++++--- 4 files changed, 63 insertions(+), 37 deletions(-) diff --git a/internal/api/v1/cert/info.go b/internal/api/v1/cert/info.go index 882f3763..412642e7 100644 --- a/internal/api/v1/cert/info.go +++ b/internal/api/v1/cert/info.go @@ -1,6 +1,7 @@ package certapi import ( + "errors" "net/http" "github.com/gin-gonic/gin" @@ -8,46 +9,33 @@ import ( apitypes "github.com/yusing/goutils/apitypes" ) -type CertInfo struct { - Subject string `json:"subject"` - Issuer string `json:"issuer"` - NotBefore int64 `json:"not_before"` - NotAfter int64 `json:"not_after"` - DNSNames []string `json:"dns_names"` - EmailAddresses []string `json:"email_addresses"` -} // @name CertInfo - // @x-id "info" // @BasePath /api/v1 // @Summary Get cert info // @Description Get cert info // @Tags cert // @Produce json -// @Success 200 {object} CertInfo -// @Failure 403 {object} apitypes.ErrorResponse -// @Failure 404 {object} apitypes.ErrorResponse -// @Failure 500 {object} apitypes.ErrorResponse -// @Router /cert/info [get] +// @Success 200 {array} autocert.CertInfo +// @Failure 403 {object} apitypes.ErrorResponse "Unauthorized" +// @Failure 404 {object} apitypes.ErrorResponse "No certificates found or autocert is not enabled" +// @Failure 500 {object} apitypes.ErrorResponse "Internal server error" +// @Router /cert/info [get] func Info(c *gin.Context) { - autocert := autocert.ActiveProvider.Load() - if autocert == nil { + provider := autocert.ActiveProvider.Load() + if provider == nil { c.JSON(http.StatusNotFound, apitypes.Error("autocert is not enabled")) return } - cert, err := autocert.GetCert(nil) + certInfos, err := provider.GetCertInfos() if err != nil { + if errors.Is(err, autocert.ErrNoCertificates) { + c.JSON(http.StatusNotFound, apitypes.Error("no certificate found")) + return + } c.Error(apitypes.InternalServerError(err, "failed to get cert info")) return } - certInfo := CertInfo{ - Subject: cert.Leaf.Subject.CommonName, - Issuer: cert.Leaf.Issuer.CommonName, - NotBefore: cert.Leaf.NotBefore.Unix(), - NotAfter: cert.Leaf.NotAfter.Unix(), - DNSNames: cert.Leaf.DNSNames, - EmailAddresses: cert.Leaf.EmailAddresses, - } - c.JSON(http.StatusOK, certInfo) + c.JSON(http.StatusOK, certInfos) } diff --git a/internal/api/v1/docs/swagger.json b/internal/api/v1/docs/swagger.json index da35ecc3..aad503bf 100644 --- a/internal/api/v1/docs/swagger.json +++ b/internal/api/v1/docs/swagger.json @@ -328,23 +328,26 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/CertInfo" + "type": "array", + "items": { + "$ref": "#/definitions/CertInfo" + } } }, "403": { - "description": "Forbidden", + "description": "Unauthorized", "schema": { "$ref": "#/definitions/ErrorResponse" } }, "404": { - "description": "Not Found", + "description": "No certificates found or autocert is not enabled", "schema": { "$ref": "#/definitions/ErrorResponse" } }, "500": { - "description": "Internal Server Error", + "description": "Internal server error", "schema": { "$ref": "#/definitions/ErrorResponse" } diff --git a/internal/api/v1/docs/swagger.yaml b/internal/api/v1/docs/swagger.yaml index 1865cedd..e4dd69b5 100644 --- a/internal/api/v1/docs/swagger.yaml +++ b/internal/api/v1/docs/swagger.yaml @@ -1886,17 +1886,19 @@ paths: "200": description: OK schema: - $ref: '#/definitions/CertInfo' + items: + $ref: '#/definitions/CertInfo' + type: array "403": - description: Forbidden + description: Unauthorized schema: $ref: '#/definitions/ErrorResponse' "404": - description: Not Found + description: No certificates found or autocert is not enabled schema: $ref: '#/definitions/ErrorResponse' "500": - description: Internal Server Error + description: Internal server error schema: $ref: '#/definitions/ErrorResponse' summary: Get cert info diff --git a/internal/autocert/provider.go b/internal/autocert/provider.go index 4be580c2..df3e56a8 100644 --- a/internal/autocert/provider.go +++ b/internal/autocert/provider.go @@ -55,10 +55,20 @@ type ( } CertExpiries map[string]time.Time - RenewMode uint8 + + CertInfo struct { + Subject string `json:"subject"` + Issuer string `json:"issuer"` + NotBefore int64 `json:"not_before"` + NotAfter int64 `json:"not_after"` + DNSNames []string `json:"dns_names"` + EmailAddresses []string `json:"email_addresses"` + } // @name CertInfo + + RenewMode uint8 ) -var ErrNoCertificate = errors.New("no certificate found") +var ErrNoCertificates = errors.New("no certificates found") const ( // renew failed for whatever reason, 1 hour cooldown @@ -98,7 +108,7 @@ func NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, erro func (p *Provider) GetCert(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { if p.tlsCert == nil { - return nil, ErrNoCertificate + return nil, ErrNoCertificates } if hello == nil || hello.ServerName == "" { return p.tlsCert, nil @@ -109,6 +119,29 @@ func (p *Provider) GetCert(hello *tls.ClientHelloInfo) (*tls.Certificate, error) return p.tlsCert, nil } +func (p *Provider) GetCertInfos() ([]CertInfo, error) { + allProviders := p.allProviders() + certInfos := make([]CertInfo, 0, len(allProviders)) + for _, provider := range allProviders { + if provider.tlsCert == nil { + continue + } + certInfos = append(certInfos, CertInfo{ + Subject: provider.tlsCert.Leaf.Subject.CommonName, + Issuer: provider.tlsCert.Leaf.Issuer.CommonName, + NotBefore: provider.tlsCert.Leaf.NotBefore.Unix(), + NotAfter: provider.tlsCert.Leaf.NotAfter.Unix(), + DNSNames: provider.tlsCert.Leaf.DNSNames, + EmailAddresses: provider.tlsCert.Leaf.EmailAddresses, + }) + } + + if len(certInfos) == 0 { + return nil, ErrNoCertificates + } + return certInfos, nil +} + func (p *Provider) GetName() string { if p.cfg.idx == 0 { return "main" From 25ceb512b4e9d7cf3671055bddd033bd6e9b3abc Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 15:05:55 +0800 Subject: [PATCH 13/51] feat(route): add bind address support for TCP/UDP routes - Introduced a new `Bind` field in the route configuration to specify the address to listen on for TCP and UDP routes. - Defaulted the bind address to "0.0.0.0" if not provided. - Enhanced validation to ensure the bind address is a valid IP. - Updated stream initialization to use the correct network type (tcp4/tcp6 or udp4/udp6) based on the bind address. - Refactored stream creation functions to accept the network type as a parameter. --- internal/api/v1/docs/swagger.json | 10 ++++++++++ internal/api/v1/docs/swagger.yaml | 8 ++++++++ internal/route/route.go | 27 ++++++++++++++++++++++++++- internal/route/stream.go | 11 +++++++---- internal/route/stream/tcp_tcp.go | 11 ++++++----- internal/route/stream/udp_udp.go | 20 +++++++++----------- 6 files changed, 66 insertions(+), 21 deletions(-) diff --git a/internal/api/v1/docs/swagger.json b/internal/api/v1/docs/swagger.json index aad503bf..03ea87f4 100644 --- a/internal/api/v1/docs/swagger.json +++ b/internal/api/v1/docs/swagger.json @@ -4189,6 +4189,11 @@ "x-nullable": false, "x-omitempty": false }, + "bind": { + "description": "for TCP and UDP routes, bind address to listen on", + "type": "string", + "x-nullable": true + }, "container": { "description": "Docker only", "allOf": [ @@ -5327,6 +5332,11 @@ "x-nullable": false, "x-omitempty": false }, + "bind": { + "description": "for TCP and UDP routes, bind address to listen on", + "type": "string", + "x-nullable": true + }, "container": { "description": "Docker only", "allOf": [ diff --git a/internal/api/v1/docs/swagger.yaml b/internal/api/v1/docs/swagger.yaml index e4dd69b5..38cb6d7c 100644 --- a/internal/api/v1/docs/swagger.yaml +++ b/internal/api/v1/docs/swagger.yaml @@ -879,6 +879,10 @@ definitions: type: string alias: type: string + bind: + description: for TCP and UDP routes, bind address to listen on + type: string + x-nullable: true container: allOf: - $ref: '#/definitions/Container' @@ -1495,6 +1499,10 @@ definitions: type: string alias: type: string + bind: + description: for TCP and UDP routes, bind address to listen on + type: string + x-nullable: true container: allOf: - $ref: '#/definitions/Container' diff --git a/internal/route/route.go b/internal/route/route.go index ef5cec60..f1f162db 100644 --- a/internal/route/route.go +++ b/internal/route/route.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net" "net/url" "os" "reflect" @@ -47,6 +48,9 @@ type ( Host string `json:"host,omitempty"` Port route.Port `json:"port"` + // for TCP and UDP routes, bind address to listen on + Bind string `json:"bind,omitempty" validate:"omitempty,ip_addr" extensions:"x-nullable"` + Root string `json:"root,omitempty"` SPA bool `json:"spa,omitempty"` // Single-page app mode: serves index for non-existent paths Index string `json:"index,omitempty"` // Index file to serve for single-page app mode @@ -278,7 +282,28 @@ func (r *Route) validate() gperr.Error { r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", r.Scheme, r.Host, r.Port.Proxy)) case route.SchemeTCP, route.SchemeUDP: if !r.ShouldExclude() { - r.LisURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://:%d", r.Scheme, r.Port.Listening)) + if r.Bind == "" { + r.Bind = "0.0.0.0" + } + bindIP := net.ParseIP(r.Bind) + if bindIP == nil { + return gperr.Errorf("invalid bind address %s", r.Bind) + } + var scheme string + if bindIP.To4() == nil { // IPv6 + if r.Scheme == route.SchemeTCP { + scheme = "tcp6" + } else { + scheme = "udp6" + } + } else { + if r.Scheme == route.SchemeTCP { + scheme = "tcp4" + } else { + scheme = "udp4" + } + } + r.LisURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", scheme, r.Bind, r.Port.Listening)) } r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", r.Scheme, r.Host, r.Port.Proxy)) } diff --git a/internal/route/stream.go b/internal/route/stream.go index 0c38ef74..7c97bef2 100755 --- a/internal/route/stream.go +++ b/internal/route/stream.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "strings" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -30,7 +31,7 @@ func NewStreamRoute(base *Route) (types.Route, gperr.Error) { return &StreamRoute{ Route: base, l: log.With(). - Str("type", string(base.Scheme)). + Str("type", base.LisURL.Scheme). Str("name", base.Name()). Logger(), }, nil @@ -99,7 +100,9 @@ func (r *StreamRoute) LocalAddr() net.Addr { func (r *StreamRoute) initStream() (nettypes.Stream, error) { lurl, rurl := r.LisURL, r.ProxyURL - if lurl != nil && lurl.Scheme != rurl.Scheme { + // lurl scheme is either tcp4/tcp6 -> tcp, udp4/udp6 -> udp + // rurl scheme does not have the trailing 4/6 + if strings.TrimRight(lurl.Scheme, "46") != rurl.Scheme { return nil, fmt.Errorf("incoherent scheme is not yet supported: %s != %s", lurl.Scheme, rurl.Scheme) } @@ -110,9 +113,9 @@ func (r *StreamRoute) initStream() (nettypes.Stream, error) { switch rurl.Scheme { case "tcp": - return stream.NewTCPTCPStream(laddr, rurl.Host) + return stream.NewTCPTCPStream(r.LisURL.Scheme, laddr, rurl.Host) case "udp": - return stream.NewUDPUDPStream(laddr, rurl.Host) + return stream.NewUDPUDPStream(r.LisURL.Scheme, laddr, rurl.Host) } return nil, fmt.Errorf("unknown scheme: %s", rurl.Scheme) } diff --git a/internal/route/stream/tcp_tcp.go b/internal/route/stream/tcp_tcp.go index 1033d519..429c7cc4 100644 --- a/internal/route/stream/tcp_tcp.go +++ b/internal/route/stream/tcp_tcp.go @@ -14,6 +14,7 @@ import ( ) type TCPTCPStream struct { + network string listener net.Listener laddr *net.TCPAddr dst *net.TCPAddr @@ -24,21 +25,21 @@ type TCPTCPStream struct { closed atomic.Bool } -func NewTCPTCPStream(listenAddr, dstAddr string) (nettypes.Stream, error) { - dst, err := net.ResolveTCPAddr("tcp", dstAddr) +func NewTCPTCPStream(network, listenAddr, dstAddr string) (nettypes.Stream, error) { + dst, err := net.ResolveTCPAddr(network, dstAddr) if err != nil { return nil, err } - laddr, err := net.ResolveTCPAddr("tcp", listenAddr) + laddr, err := net.ResolveTCPAddr(network, listenAddr) if err != nil { return nil, err } - return &TCPTCPStream{laddr: laddr, dst: dst}, nil + return &TCPTCPStream{network: network, laddr: laddr, dst: dst}, nil } func (s *TCPTCPStream) ListenAndServe(ctx context.Context, preDial, onRead nettypes.HookFunc) { var err error - s.listener, err = net.ListenTCP("tcp", s.laddr) + s.listener, err = net.ListenTCP(s.network, s.laddr) if err != nil { logErr(s, err, "failed to listen") return diff --git a/internal/route/stream/udp_udp.go b/internal/route/stream/udp_udp.go index 20b813a9..8600e962 100644 --- a/internal/route/stream/udp_udp.go +++ b/internal/route/stream/udp_udp.go @@ -17,7 +17,7 @@ import ( ) type UDPUDPStream struct { - name string + network string listener net.PacketConn laddr *net.UDPAddr @@ -51,25 +51,26 @@ const ( var bufPool = synk.GetSizedBytesPool() -func NewUDPUDPStream(listenAddr, dstAddr string) (nettypes.Stream, error) { - dst, err := net.ResolveUDPAddr("udp", dstAddr) +func NewUDPUDPStream(network, listenAddr, dstAddr string) (nettypes.Stream, error) { + dst, err := net.ResolveUDPAddr(network, dstAddr) if err != nil { return nil, err } - laddr, err := net.ResolveUDPAddr("udp", listenAddr) + laddr, err := net.ResolveUDPAddr(network, listenAddr) if err != nil { return nil, err } return &UDPUDPStream{ - laddr: laddr, - dst: dst, - conns: make(map[string]*udpUDPConn), + network: network, + laddr: laddr, + dst: dst, + conns: make(map[string]*udpUDPConn), }, nil } func (s *UDPUDPStream) ListenAndServe(ctx context.Context, preDial, onRead nettypes.HookFunc) { var err error - s.listener, err = net.ListenUDP("udp", s.laddr) + s.listener, err = net.ListenUDP(s.network, s.laddr) if err != nil { logErr(s, err, "failed to listen") return @@ -114,9 +115,6 @@ func (s *UDPUDPStream) LocalAddr() net.Addr { func (s *UDPUDPStream) MarshalZerologObject(e *zerolog.Event) { e.Str("protocol", "udp-udp") - if s.name != "" { - e.Str("name", s.name) - } if s.dst != nil { e.Str("dst", s.dst.String()) } From 7bfb57ea30a2f9f495eb4037d15178b10ddea453 Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 15:24:09 +0800 Subject: [PATCH 14/51] fix(stream): nil panic for excluded routes --- internal/route/stream.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/internal/route/stream.go b/internal/route/stream.go index 7c97bef2..f8dac179 100755 --- a/internal/route/stream.go +++ b/internal/route/stream.go @@ -28,13 +28,7 @@ type StreamRoute struct { func NewStreamRoute(base *Route) (types.Route, gperr.Error) { // TODO: support non-coherent scheme - return &StreamRoute{ - Route: base, - l: log.With(). - Str("type", base.LisURL.Scheme). - Str("name", base.Name()). - Logger(), - }, nil + return &StreamRoute{Route: base}, nil } func (r *StreamRoute) Stream() nettypes.Stream { @@ -43,6 +37,10 @@ func (r *StreamRoute) Stream() nettypes.Stream { // Start implements task.TaskStarter. func (r *StreamRoute) Start(parent task.Parent) gperr.Error { + if r.LisURL == nil { + return gperr.Errorf("listen URL is not set") + } + stream, err := r.initStream() if err != nil { return gperr.Wrap(err) @@ -71,7 +69,11 @@ func (r *StreamRoute) Start(parent task.Parent) gperr.Error { } r.ListenAndServe(r.task.Context(), nil, nil) - r.l = r.l.With().Stringer("rurl", r.ProxyURL).Stringer("laddr", r.LocalAddr()).Logger() + r.l = log.With(). + Str("type", r.LisURL.Scheme). + Str("name", r.Name()). + Stringer("rurl", r.ProxyURL). + Stringer("laddr", r.LocalAddr()).Logger() r.l.Info().Msg("stream started") r.task.OnCancel("close_stream", func() { From 1ebba202167d093bd6a3c9e4569d25f2902a40f9 Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 15:28:53 +0800 Subject: [PATCH 15/51] fix(docker): add TLS check; correct dial handling and reconnection for custom docker provider; modernize pointer arithemetic with unsafe.Add --- internal/docker/client.go | 22 ++++++++++++++++++++-- internal/watcher/docker_watcher.go | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/internal/docker/client.go b/internal/docker/client.go index a1ac7612..eaaefe9d 100644 --- a/internal/docker/client.go +++ b/internal/docker/client.go @@ -6,6 +6,7 @@ import ( "maps" "net" "net/http" + "net/url" "reflect" "sync" "sync/atomic" @@ -169,9 +170,26 @@ func NewClient(cfg types.DockerProviderConfig, unique ...bool) (*SharedClient, e client.WithDialContext(helper.Dialer), } } else { + // connhelper.GetConnectionHelper already parsed the host without error + url, _ := url.Parse(host) opt = []client.Opt{ client.WithHost(host), } + switch url.Scheme { + case "", "tls", "http", "https": + if (url.Scheme == "https" || url.Scheme == "tls") && cfg.TLS == nil { + return nil, fmt.Errorf("TLS config is not set when using %s:// host", url.Scheme) + } + + dial = func(ctx context.Context) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, "tcp", url.Host) + } + + opt = append(opt, client.WithDialContext(func(ctx context.Context, _, _ string) (net.Conn, error) { + return dial(ctx) + })) + } } } @@ -212,7 +230,7 @@ func NewClient(cfg types.DockerProviderConfig, unique ...bool) (*SharedClient, e } func (c *SharedClient) GetHTTPClient() **http.Client { - return (**http.Client)(unsafe.Pointer(uintptr(unsafe.Pointer(c.Client)) + clientClientOffset)) + return (**http.Client)(unsafe.Add(unsafe.Pointer(c.Client), clientClientOffset)) } func (c *SharedClient) InterceptHTTPClient(intercept httputils.InterceptFunc) { @@ -279,6 +297,6 @@ func (c *SharedClient) unotel() { log.Debug().Str("host", c.DaemonHost()).Msgf("docker client transport is not an otelhttp.Transport: %T", httpClient.Transport) return } - transport := *(*http.RoundTripper)(unsafe.Pointer(uintptr(unsafe.Pointer(otelTransport)) + otelRtOffset)) + transport := *(*http.RoundTripper)(unsafe.Add(unsafe.Pointer(otelTransport), otelRtOffset)) httpClient.Transport = transport } diff --git a/internal/watcher/docker_watcher.go b/internal/watcher/docker_watcher.go index c5359210..472d1fd9 100644 --- a/internal/watcher/docker_watcher.go +++ b/internal/watcher/docker_watcher.go @@ -177,7 +177,7 @@ func checkConnection(ctx context.Context, client *docker.SharedClient) bool { defer cancel() err := client.CheckConnection(ctx) if err != nil { - log.Debug().Err(err).Msg("docker watcher: connection failed") + log.Debug().Err(err).Str("host", client.Address()).Msg("docker watcher: connection failed") return false } return true From 9ea9e62ee8459785e5be6c6a2c00ac3dd15e0355 Mon Sep 17 00:00:00 2001 From: yusing Date: Wed, 7 Jan 2026 17:17:12 +0800 Subject: [PATCH 16/51] refactor: remove NoCopy struct; move RefCounter struct to goutils and update usage; remove internal/utils entirely --- goutils | 2 +- internal/idlewatcher/watcher.go | 2 - internal/logging/accesslog/file_logger.go | 6 +- internal/net/gphttp/loadbalancer/server.go | 3 - internal/route/route.go | 3 - internal/utils/nocopy.go | 8 --- internal/utils/ref_count.go | 54 --------------- internal/utils/ref_count_test.go | 78 ---------------------- 8 files changed, 4 insertions(+), 152 deletions(-) delete mode 100644 internal/utils/nocopy.go delete mode 100644 internal/utils/ref_count.go delete mode 100644 internal/utils/ref_count_test.go diff --git a/goutils b/goutils index 06f20f6b..4b046d27 160000 --- a/goutils +++ b/goutils @@ -1 +1 @@ -Subproject commit 06f20f6b8710dabab8287b128d109d065da3b281 +Subproject commit 4b046d275fbfc49de3c02038983b775a95a3a8f6 diff --git a/internal/idlewatcher/watcher.go b/internal/idlewatcher/watcher.go index c2c28845..de19e3fe 100644 --- a/internal/idlewatcher/watcher.go +++ b/internal/idlewatcher/watcher.go @@ -19,7 +19,6 @@ import ( nettypes "github.com/yusing/godoxy/internal/net/types" "github.com/yusing/godoxy/internal/route/routes" "github.com/yusing/godoxy/internal/types" - U "github.com/yusing/godoxy/internal/utils" "github.com/yusing/godoxy/internal/watcher/events" "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" @@ -48,7 +47,6 @@ type ( } Watcher struct { - _ U.NoCopy routeHelper l zerolog.Logger diff --git a/internal/logging/accesslog/file_logger.go b/internal/logging/accesslog/file_logger.go index 168ceb36..a845fb7c 100644 --- a/internal/logging/accesslog/file_logger.go +++ b/internal/logging/accesslog/file_logger.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/rs/zerolog/log" - "github.com/yusing/godoxy/internal/utils" + "github.com/yusing/goutils/synk" ) type File struct { @@ -18,7 +18,7 @@ type File struct { // Store it for later delete from `openedFiles`. path string - refCount *utils.RefCount + refCount *synk.RefCount } var ( @@ -55,7 +55,7 @@ func NewFileIO(path string) (Writer, error) { if _, err := f.Seek(0, io.SeekEnd); err != nil { return nil, fmt.Errorf("access log seek error: %w", err) } - file = &File{f: f, path: path, refCount: utils.NewRefCounter()} + file = &File{f: f, path: path, refCount: synk.NewRefCounter()} openedFiles[path] = file go file.closeOnZero() return file, nil diff --git a/internal/net/gphttp/loadbalancer/server.go b/internal/net/gphttp/loadbalancer/server.go index b94aba09..26e4b800 100644 --- a/internal/net/gphttp/loadbalancer/server.go +++ b/internal/net/gphttp/loadbalancer/server.go @@ -7,12 +7,9 @@ import ( idlewatcher "github.com/yusing/godoxy/internal/idlewatcher/types" nettypes "github.com/yusing/godoxy/internal/net/types" "github.com/yusing/godoxy/internal/types" - U "github.com/yusing/godoxy/internal/utils" ) type server struct { - _ U.NoCopy - name string url *nettypes.URL weight int diff --git a/internal/route/route.go b/internal/route/route.go index f1f162db..99de1237 100644 --- a/internal/route/route.go +++ b/internal/route/route.go @@ -36,13 +36,10 @@ import ( "github.com/yusing/godoxy/internal/route/rules" rulepresets "github.com/yusing/godoxy/internal/route/rules/presets" route "github.com/yusing/godoxy/internal/route/types" - "github.com/yusing/godoxy/internal/utils" ) type ( Route struct { - _ utils.NoCopy - Alias string `json:"alias"` Scheme route.Scheme `json:"scheme,omitempty" swaggertype:"string" enums:"http,https,h2c,tcp,udp,fileserver"` Host string `json:"host,omitempty"` diff --git a/internal/utils/nocopy.go b/internal/utils/nocopy.go deleted file mode 100644 index f3443745..00000000 --- a/internal/utils/nocopy.go +++ /dev/null @@ -1,8 +0,0 @@ -package utils - -// empty struct that implements Locker interface -// for hinting that no copy should be performed. -type NoCopy struct{} - -func (*NoCopy) Lock() {} -func (*NoCopy) Unlock() {} diff --git a/internal/utils/ref_count.go b/internal/utils/ref_count.go deleted file mode 100644 index 782783ac..00000000 --- a/internal/utils/ref_count.go +++ /dev/null @@ -1,54 +0,0 @@ -package utils - -import ( - "sync/atomic" -) - -type RefCount struct { - _ NoCopy - - refCount uint32 - zeroCh chan struct{} -} - -func NewRefCounter() *RefCount { - rc := &RefCount{ - refCount: 1, - zeroCh: make(chan struct{}), - } - return rc -} - -func (rc *RefCount) Zero() <-chan struct{} { - return rc.zeroCh -} - -func (rc *RefCount) Add() { - // We add before checking to ensure proper ordering - newV := atomic.AddUint32(&rc.refCount, 1) - if newV == 1 { - // If it was 0 before we added, that means we're incrementing after a close - // This is a programming error - panic("RefCount.Add() called after count reached zero") - } -} - -func (rc *RefCount) Sub() { - // First read the current value - for { - current := atomic.LoadUint32(&rc.refCount) - if current == 0 { - // Already at zero, channel should be closed - return - } - - // Try to decrement, but only if the value hasn't changed - if atomic.CompareAndSwapUint32(&rc.refCount, current, current-1) { - if current == 1 { // Was this the last reference? - close(rc.zeroCh) - } - return - } - // If CAS failed, someone else modified the count, try again - } -} diff --git a/internal/utils/ref_count_test.go b/internal/utils/ref_count_test.go deleted file mode 100644 index a40cc75b..00000000 --- a/internal/utils/ref_count_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package utils - -import ( - "sync" - "testing" - "time" - - expect "github.com/yusing/goutils/testing" -) - -func TestRefCounterAddSub(t *testing.T) { - rc := NewRefCounter() // Count starts at 1 - - var wg sync.WaitGroup - - rc.Add() - for range 2 { - wg.Go(rc.Sub) - } - - wg.Wait() - expect.Equal(t, int(rc.refCount), 0) - - select { - case <-rc.Zero(): - // Expected behavior - case <-time.After(1 * time.Second): - t.Fatal("Expected Zero channel to close, but it didn't") - } -} - -func TestRefCounterMultipleAddSub(t *testing.T) { - rc := NewRefCounter() - - var wg sync.WaitGroup - numAdds := 5 - numSubs := 5 - wg.Add(numAdds) - - for range numAdds { - go func() { - defer wg.Done() - rc.Add() - }() - } - wg.Wait() - expect.Equal(t, int(rc.refCount), numAdds+1) - - wg.Add(numSubs) - for range numSubs { - go func() { - defer wg.Done() - rc.Sub() - }() - } - wg.Wait() - expect.Equal(t, int(rc.refCount), numAdds+1-numSubs) - - rc.Sub() - select { - case <-rc.Zero(): - // Expected behavior - case <-time.After(1 * time.Second): - t.Fatal("Expected Zero channel to close, but it didn't") - } -} - -func TestRefCounterOneInitially(t *testing.T) { - rc := NewRefCounter() - rc.Sub() // Bring count to zero - - select { - case <-rc.Zero(): - // Expected behavior - case <-time.After(1 * time.Second): - t.Fatal("Expected Zero channel to close, but it didn't") - } -} From 8b5cb947c804d485fb69e3614dec2a171e8ef7cf Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 12:02:21 +0800 Subject: [PATCH 17/51] refactor(agent): extract agent pool and HTTP utilities to dedicated package Moved non-agent-specific logic from agent/pkg/agent/ to internal/agentpool/: - pool.go: Agent pool management (Get, Add, Remove, List, Iter, etc.) - http_requests.go: HTTP utilities (health checks, forwarding, websockets, reverse proxy) - agent.go: Agent struct with HTTP client management This separates general-purpose pool management from agent-specific configuration, improving code organization and making the agent package focused on agent config only. --- agent/go.mod | 6 +- agent/pkg/agent/agent_pool.go | 68 --------------- agent/pkg/agent/config.go | 83 +++++++++---------- internal/agentpool/agent.go | 54 ++++++++++++ .../agentpool}/http_requests.go | 48 ++++------- internal/agentpool/pool.go | 79 ++++++++++++++++++ internal/api/v1/agent/create.go | 3 +- internal/api/v1/agent/list.go | 8 +- internal/api/v1/agent/verify.go | 33 +++++--- internal/api/v1/metrics/all_system_info.go | 9 +- internal/api/v1/metrics/system_info.go | 5 +- internal/config/state.go | 8 +- internal/docker/client.go | 9 +- internal/docker/container.go | 3 +- internal/route/route.go | 10 +-- internal/types/docker.go | 4 +- internal/types/routes.go | 4 +- .../watcher/health/monitor/agent_proxied.go | 6 +- 18 files changed, 249 insertions(+), 191 deletions(-) delete mode 100644 agent/pkg/agent/agent_pool.go create mode 100644 internal/agentpool/agent.go rename {agent/pkg/agent => internal/agentpool}/http_requests.go (54%) create mode 100644 internal/agentpool/pool.go diff --git a/agent/go.mod b/agent/go.mod index 613b0f2e..543a696b 100644 --- a/agent/go.mod +++ b/agent/go.mod @@ -18,14 +18,11 @@ require ( github.com/bytedance/sonic v1.14.2 github.com/gin-gonic/gin v1.11.0 github.com/gorilla/websocket v1.5.3 - github.com/puzpuzpuz/xsync/v4 v4.2.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 - github.com/valyala/fasthttp v1.68.0 github.com/yusing/godoxy v0.0.0-00010101000000-000000000000 github.com/yusing/godoxy/socketproxy v0.0.0-00010101000000-000000000000 github.com/yusing/goutils v0.7.0 - github.com/yusing/goutils/http/reverseproxy v0.0.0-20260103043911-785deb23bd64 github.com/yusing/goutils/server v0.0.0-20260103043911-785deb23bd64 ) @@ -90,6 +87,7 @@ require ( github.com/pires/go-proxyproto v0.8.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/puzpuzpuz/xsync/v4 v4.2.0 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.58.0 // indirect github.com/samber/lo v1.52.0 // indirect @@ -103,9 +101,11 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.3.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.68.0 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/yusing/ds v0.3.1 // indirect github.com/yusing/gointernals v0.1.16 // indirect + github.com/yusing/goutils/http/reverseproxy v0.0.0-20260103043911-785deb23bd64 // indirect github.com/yusing/goutils/http/websocket v0.0.0-20260103043911-785deb23bd64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect diff --git a/agent/pkg/agent/agent_pool.go b/agent/pkg/agent/agent_pool.go deleted file mode 100644 index a3793144..00000000 --- a/agent/pkg/agent/agent_pool.go +++ /dev/null @@ -1,68 +0,0 @@ -package agent - -import ( - "iter" - "os" - "strings" - - "github.com/puzpuzpuz/xsync/v4" -) - -var agentPool = xsync.NewMap[string, *AgentConfig](xsync.WithPresize(10)) - -func init() { - if strings.HasSuffix(os.Args[0], ".test") { - agentPool.Store("test-agent", &AgentConfig{ - Addr: "test-agent", - }) - } -} - -func GetAgent(agentAddrOrDockerHost string) (*AgentConfig, bool) { - if !IsDockerHostAgent(agentAddrOrDockerHost) { - return getAgentByAddr(agentAddrOrDockerHost) - } - return getAgentByAddr(GetAgentAddrFromDockerHost(agentAddrOrDockerHost)) -} - -func GetAgentByName(name string) (*AgentConfig, bool) { - for _, agent := range agentPool.Range { - if agent.Name == name { - return agent, true - } - } - return nil, false -} - -func AddAgent(agent *AgentConfig) { - agentPool.Store(agent.Addr, agent) -} - -func RemoveAgent(agent *AgentConfig) { - agentPool.Delete(agent.Addr) -} - -func RemoveAllAgents() { - agentPool.Clear() -} - -func ListAgents() []*AgentConfig { - agents := make([]*AgentConfig, 0, agentPool.Size()) - for _, agent := range agentPool.Range { - agents = append(agents, agent) - } - return agents -} - -func IterAgents() iter.Seq2[string, *AgentConfig] { - return agentPool.Range -} - -func NumAgents() int { - return agentPool.Size() -} - -func getAgentByAddr(addr string) (agent *AgentConfig, ok bool) { - agent, ok = agentPool.Load(addr) - return agent, ok -} diff --git a/agent/pkg/agent/config.go b/agent/pkg/agent/config.go index 95b4c28f..bf6b102b 100644 --- a/agent/pkg/agent/config.go +++ b/agent/pkg/agent/config.go @@ -6,6 +6,7 @@ import ( "crypto/x509" "errors" "fmt" + "io" "net" "net/http" "net/url" @@ -15,8 +16,8 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/valyala/fasthttp" "github.com/yusing/godoxy/agent/pkg/certs" + httputils "github.com/yusing/goutils/http" "github.com/yusing/goutils/version" ) @@ -26,10 +27,8 @@ type AgentConfig struct { Version version.Version `json:"version" swaggertype:"string"` Runtime ContainerRuntime `json:"runtime"` - httpClient *http.Client - fasthttpClientHealthCheck *fasthttp.Client - tlsConfig tls.Config - l zerolog.Logger + tlsConfig tls.Config + l zerolog.Logger } // @name Agent const ( @@ -85,7 +84,8 @@ func (cfg *AgentConfig) Parse(addr string) error { var serverVersion = version.Get() -func (cfg *AgentConfig) StartWithCerts(ctx context.Context, ca, crt, key []byte) error { +// InitWithCerts initializes the agent config with the given CA, certificate, and key. +func (cfg *AgentConfig) InitWithCerts(ctx context.Context, ca, crt, key []byte) error { clientCert, err := tls.X509KeyPair(crt, key) if err != nil { return err @@ -104,12 +104,6 @@ func (cfg *AgentConfig) StartWithCerts(ctx context.Context, ca, crt, key []byte) ServerName: CertsDNSName, } - // create transport and http client - cfg.httpClient = cfg.NewHTTPClient() - applyNormalTransportConfig(cfg.httpClient) - - cfg.fasthttpClientHealthCheck = cfg.NewFastHTTPHealthCheckClient() - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -163,7 +157,8 @@ func (cfg *AgentConfig) StartWithCerts(ctx context.Context, ca, crt, key []byte) return nil } -func (cfg *AgentConfig) Start(ctx context.Context) error { +// Init initializes the agent config with the given context. +func (cfg *AgentConfig) Init(ctx context.Context) error { filepath, ok := certs.AgentCertsFilepath(cfg.Addr) if !ok { return fmt.Errorf("invalid agent host: %s", cfg.Addr) @@ -179,32 +174,7 @@ func (cfg *AgentConfig) Start(ctx context.Context) error { return fmt.Errorf("failed to extract agent certs: %w", err) } - return cfg.StartWithCerts(ctx, ca, crt, key) -} - -func (cfg *AgentConfig) NewHTTPClient() *http.Client { - return &http.Client{ - Transport: cfg.Transport(), - } -} - -func (cfg *AgentConfig) NewFastHTTPHealthCheckClient() *fasthttp.Client { - return &fasthttp.Client{ - Dial: func(addr string) (net.Conn, error) { - if addr != AgentHost+":443" { - return nil, &net.AddrError{Err: "invalid address", Addr: addr} - } - return net.Dial("tcp", cfg.Addr) - }, - TLSConfig: &cfg.tlsConfig, - ReadTimeout: 5 * time.Second, - WriteTimeout: 3 * time.Second, - DisableHeaderNamesNormalizing: true, - DisablePathNormalizing: true, - NoDefaultUserAgentHeader: true, - ReadBufferSize: 1024, - WriteBufferSize: 1024, - } + return cfg.InitWithCerts(ctx, ca, crt, key) } func (cfg *AgentConfig) Transport() *http.Transport { @@ -222,6 +192,10 @@ func (cfg *AgentConfig) Transport() *http.Transport { } } +func (cfg *AgentConfig) TLSConfig() *tls.Config { + return &cfg.tlsConfig +} + var dialer = &net.Dialer{Timeout: 5 * time.Second} func (cfg *AgentConfig) DialContext(ctx context.Context) (net.Conn, error) { @@ -232,10 +206,29 @@ func (cfg *AgentConfig) String() string { return cfg.Name + "@" + cfg.Addr } -func applyNormalTransportConfig(client *http.Client) { - transport := client.Transport.(*http.Transport) - transport.MaxIdleConns = 100 - transport.MaxIdleConnsPerHost = 100 - transport.ReadBufferSize = 16384 - transport.WriteBufferSize = 16384 +func (cfg *AgentConfig) do(ctx context.Context, method, endpoint string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, APIBaseURL+endpoint, body) + if err != nil { + return nil, err + } + client := http.Client{ + Transport: cfg.Transport(), + } + return client.Do(req) +} + +func (cfg *AgentConfig) fetchString(ctx context.Context, endpoint string) (string, int, error) { + resp, err := cfg.do(ctx, "GET", endpoint, nil) + if err != nil { + return "", 0, err + } + defer resp.Body.Close() + + data, release, err := httputils.ReadAllBody(resp) + if err != nil { + return "", 0, err + } + ret := string(data) + release(data) + return ret, resp.StatusCode, nil } diff --git a/internal/agentpool/agent.go b/internal/agentpool/agent.go new file mode 100644 index 00000000..59fe1e77 --- /dev/null +++ b/internal/agentpool/agent.go @@ -0,0 +1,54 @@ +package agentpool + +import ( + "net" + "net/http" + "time" + + "github.com/valyala/fasthttp" + "github.com/yusing/godoxy/agent/pkg/agent" +) + +type Agent struct { + *agent.AgentConfig + + httpClient *http.Client + fasthttpHcClient *fasthttp.Client +} + +func newAgent(cfg *agent.AgentConfig) *Agent { + transport := cfg.Transport() + transport.MaxIdleConns = 100 + transport.MaxIdleConnsPerHost = 100 + transport.ReadBufferSize = 16384 + transport.WriteBufferSize = 16384 + + return &Agent{ + AgentConfig: cfg, + httpClient: &http.Client{ + Transport: transport, + }, + fasthttpHcClient: &fasthttp.Client{ + DialTimeout: func(addr string, timeout time.Duration) (net.Conn, error) { + if addr != agent.AgentHost+":443" { + return nil, &net.AddrError{Err: "invalid address", Addr: addr} + } + return net.DialTimeout("tcp", cfg.Addr, timeout) + }, + TLSConfig: cfg.TLSConfig(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 3 * time.Second, + DisableHeaderNamesNormalizing: true, + DisablePathNormalizing: true, + NoDefaultUserAgentHeader: true, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + }, + } +} + +func (agent *Agent) HTTPClient() *http.Client { + return &http.Client{ + Transport: agent.Transport(), + } +} diff --git a/agent/pkg/agent/http_requests.go b/internal/agentpool/http_requests.go similarity index 54% rename from agent/pkg/agent/http_requests.go rename to internal/agentpool/http_requests.go index aeae1221..6b5735cf 100644 --- a/agent/pkg/agent/http_requests.go +++ b/internal/agentpool/http_requests.go @@ -1,4 +1,4 @@ -package agent +package agentpool import ( "context" @@ -10,22 +10,22 @@ import ( "github.com/bytedance/sonic" "github.com/gorilla/websocket" "github.com/valyala/fasthttp" - httputils "github.com/yusing/goutils/http" + "github.com/yusing/godoxy/agent/pkg/agent" "github.com/yusing/goutils/http/reverseproxy" ) -func (cfg *AgentConfig) Do(ctx context.Context, method, endpoint string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, APIBaseURL+endpoint, body) +func (cfg *Agent) Do(ctx context.Context, method, endpoint string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, agent.APIBaseURL+endpoint, body) if err != nil { return nil, err } return cfg.httpClient.Do(req) } -func (cfg *AgentConfig) Forward(req *http.Request, endpoint string) (*http.Response, error) { - req.URL.Host = AgentHost +func (cfg *Agent) Forward(req *http.Request, endpoint string) (*http.Response, error) { + req.URL.Host = agent.AgentHost req.URL.Scheme = "https" - req.URL.Path = APIEndpointBase + endpoint + req.URL.Path = agent.APIEndpointBase + endpoint req.RequestURI = "" resp, err := cfg.httpClient.Do(req) if err != nil { @@ -40,20 +40,20 @@ type HealthCheckResponse struct { Latency time.Duration `json:"latency"` } -func (cfg *AgentConfig) DoHealthCheck(timeout time.Duration, query string) (ret HealthCheckResponse, err error) { +func (cfg *Agent) DoHealthCheck(timeout time.Duration, query string) (ret HealthCheckResponse, err error) { req := fasthttp.AcquireRequest() defer fasthttp.ReleaseRequest(req) resp := fasthttp.AcquireResponse() defer fasthttp.ReleaseResponse(resp) - req.SetRequestURI(APIBaseURL + EndpointHealth + "?" + query) + req.SetRequestURI(agent.APIBaseURL + agent.EndpointHealth + "?" + query) req.Header.SetMethod(fasthttp.MethodGet) req.Header.Set("Accept-Encoding", "identity") req.SetConnectionClose() start := time.Now() - err = cfg.fasthttpClientHealthCheck.DoTimeout(req, resp, timeout) + err = cfg.fasthttpHcClient.DoTimeout(req, resp, timeout) ret.Latency = time.Since(start) if err != nil { return ret, err @@ -71,30 +71,14 @@ func (cfg *AgentConfig) DoHealthCheck(timeout time.Duration, query string) (ret return ret, nil } -func (cfg *AgentConfig) fetchString(ctx context.Context, endpoint string) (string, int, error) { - resp, err := cfg.Do(ctx, "GET", endpoint, nil) - if err != nil { - return "", 0, err - } - defer resp.Body.Close() - - data, release, err := httputils.ReadAllBody(resp) - if err != nil { - return "", 0, err - } - ret := string(data) - release(data) - return ret, resp.StatusCode, nil -} - -func (cfg *AgentConfig) Websocket(ctx context.Context, endpoint string) (*websocket.Conn, *http.Response, error) { +func (cfg *Agent) Websocket(ctx context.Context, endpoint string) (*websocket.Conn, *http.Response, error) { transport := cfg.Transport() dialer := websocket.Dialer{ NetDialContext: transport.DialContext, NetDialTLSContext: transport.DialTLSContext, } - return dialer.DialContext(ctx, APIBaseURL+endpoint, http.Header{ - "Host": {AgentHost}, + return dialer.DialContext(ctx, agent.APIBaseURL+endpoint, http.Header{ + "Host": {agent.AgentHost}, }) } @@ -102,9 +86,9 @@ func (cfg *AgentConfig) Websocket(ctx context.Context, endpoint string) (*websoc // // It will create a new request with the same context, method, and body, but with the agent host and scheme, and the endpoint // If the request has a query, it will be added to the proxy request's URL -func (cfg *AgentConfig) ReverseProxy(w http.ResponseWriter, req *http.Request, endpoint string) { - rp := reverseproxy.NewReverseProxy("agent", AgentURL, cfg.Transport()) - req.URL.Host = AgentHost +func (cfg *Agent) ReverseProxy(w http.ResponseWriter, req *http.Request, endpoint string) { + rp := reverseproxy.NewReverseProxy("agent", agent.AgentURL, cfg.Transport()) + req.URL.Host = agent.AgentHost req.URL.Scheme = "https" req.URL.Path = endpoint req.RequestURI = "" diff --git a/internal/agentpool/pool.go b/internal/agentpool/pool.go new file mode 100644 index 00000000..ddd4e04f --- /dev/null +++ b/internal/agentpool/pool.go @@ -0,0 +1,79 @@ +package agentpool + +import ( + "iter" + "os" + "strings" + + "github.com/puzpuzpuz/xsync/v4" + "github.com/yusing/godoxy/agent/pkg/agent" +) + +var agentPool = xsync.NewMap[string, *Agent](xsync.WithPresize(10)) + +func init() { + if strings.HasSuffix(os.Args[0], ".test") { + agentPool.Store("test-agent", &Agent{ + AgentConfig: &agent.AgentConfig{ + Addr: "test-agent", + }, + }) + } +} + +func Get(agentAddrOrDockerHost string) (*Agent, bool) { + if !agent.IsDockerHostAgent(agentAddrOrDockerHost) { + return getAgentByAddr(agentAddrOrDockerHost) + } + return getAgentByAddr(agent.GetAgentAddrFromDockerHost(agentAddrOrDockerHost)) +} + +func GetAgent(name string) (*Agent, bool) { + for _, agent := range agentPool.Range { + if agent.Name == name { + return agent, true + } + } + return nil, false +} + +func Add(cfg *agent.AgentConfig) (added bool) { + _, loaded := agentPool.LoadOrCompute(cfg.Addr, func() (*Agent, bool) { + return newAgent(cfg), false + }) + return !loaded +} + +func Has(cfg *agent.AgentConfig) bool { + _, ok := agentPool.Load(cfg.Addr) + return ok +} + +func Remove(cfg *agent.AgentConfig) { + agentPool.Delete(cfg.Addr) +} + +func RemoveAll() { + agentPool.Clear() +} + +func List() []*Agent { + agents := make([]*Agent, 0, agentPool.Size()) + for _, agent := range agentPool.Range { + agents = append(agents, agent) + } + return agents +} + +func Iter() iter.Seq2[string, *Agent] { + return agentPool.Range +} + +func Num() int { + return agentPool.Size() +} + +func getAgentByAddr(addr string) (agent *Agent, ok bool) { + agent, ok = agentPool.Load(addr) + return agent, ok +} diff --git a/internal/api/v1/agent/create.go b/internal/api/v1/agent/create.go index 225240ea..6c38a562 100644 --- a/internal/api/v1/agent/create.go +++ b/internal/api/v1/agent/create.go @@ -9,6 +9,7 @@ import ( "github.com/gin-gonic/gin" "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" apitypes "github.com/yusing/goutils/apitypes" ) @@ -50,7 +51,7 @@ func Create(c *gin.Context) { } hostport := net.JoinHostPort(request.Host, strconv.Itoa(request.Port)) - if _, ok := agent.GetAgent(hostport); ok { + if _, ok := agentpool.Get(hostport); ok { c.JSON(http.StatusConflict, apitypes.Error("agent already exists")) return } diff --git a/internal/api/v1/agent/list.go b/internal/api/v1/agent/list.go index 73968fd7..1592a7b3 100644 --- a/internal/api/v1/agent/list.go +++ b/internal/api/v1/agent/list.go @@ -5,7 +5,7 @@ import ( "time" "github.com/gin-gonic/gin" - "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/goutils/http/httpheaders" "github.com/yusing/goutils/http/websocket" @@ -19,15 +19,15 @@ import ( // @Tags agent,websocket // @Accept json // @Produce json -// @Success 200 {array} Agent +// @Success 200 {array} agent.AgentConfig // @Failure 403 {object} apitypes.ErrorResponse // @Router /agent/list [get] func List(c *gin.Context) { if httpheaders.IsWebsocket(c.Request.Header) { websocket.PeriodicWrite(c, 10*time.Second, func() (any, error) { - return agent.ListAgents(), nil + return agentpool.List(), nil }) } else { - c.JSON(http.StatusOK, agent.ListAgents()) + c.JSON(http.StatusOK, agentpool.List()) } } diff --git a/internal/api/v1/agent/verify.go b/internal/api/v1/agent/verify.go index 23473d66..4492c1db 100644 --- a/internal/api/v1/agent/verify.go +++ b/internal/api/v1/agent/verify.go @@ -8,6 +8,7 @@ import ( "github.com/gin-gonic/gin" "github.com/yusing/godoxy/agent/pkg/agent" "github.com/yusing/godoxy/agent/pkg/certs" + "github.com/yusing/godoxy/internal/agentpool" config "github.com/yusing/godoxy/internal/config/types" "github.com/yusing/godoxy/internal/route/provider" apitypes "github.com/yusing/goutils/apitypes" @@ -79,21 +80,28 @@ func Verify(c *gin.Context) { c.JSON(http.StatusOK, apitypes.Success(fmt.Sprintf("Added %d routes", nRoutesAdded))) } -func verifyNewAgent(host string, ca agent.PEMPair, client agent.PEMPair, containerRuntime agent.ContainerRuntime) (int, gperr.Error) { - cfgState := config.ActiveState.Load() - for _, a := range cfgState.Value().Providers.Agents { - if a.Addr == host { - return 0, gperr.New("agent already exists") - } - } +var errAgentAlreadyExists = gperr.New("agent already exists") +func verifyNewAgent(host string, ca agent.PEMPair, client agent.PEMPair, containerRuntime agent.ContainerRuntime) (int, gperr.Error) { var agentCfg agent.AgentConfig agentCfg.Addr = host agentCfg.Runtime = containerRuntime - err := agentCfg.StartWithCerts(cfgState.Context(), ca.Cert, client.Cert, client.Key) + // check if agent host exists in the config + cfgState := config.ActiveState.Load() + for _, a := range cfgState.Value().Providers.Agents { + if a.Addr == host { + return 0, errAgentAlreadyExists + } + } + // check if agent host exists in the agent pool + if agentpool.Has(&agentCfg) { + return 0, errAgentAlreadyExists + } + + err := agentCfg.InitWithCerts(cfgState.Context(), ca.Cert, client.Cert, client.Key) if err != nil { - return 0, gperr.Wrap(err, "failed to start agent") + return 0, gperr.Wrap(err, "failed to initialize agent config") } provider := provider.NewAgentProvider(&agentCfg) @@ -102,11 +110,14 @@ func verifyNewAgent(host string, ca agent.PEMPair, client agent.PEMPair, contain } // agent must be added before loading routes - agent.AddAgent(&agentCfg) + added := agentpool.Add(&agentCfg) + if !added { + return 0, errAgentAlreadyExists + } err = provider.LoadRoutes() if err != nil { cfgState.DeleteProvider(provider.String()) - agent.RemoveAgent(&agentCfg) + agentpool.Remove(&agentCfg) return 0, gperr.Wrap(err, "failed to load routes") } diff --git a/internal/api/v1/metrics/all_system_info.go b/internal/api/v1/metrics/all_system_info.go index 3e8ae474..fee4fc56 100644 --- a/internal/api/v1/metrics/all_system_info.go +++ b/internal/api/v1/metrics/all_system_info.go @@ -11,6 +11,7 @@ import ( "github.com/gin-gonic/gin" "github.com/rs/zerolog/log" "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/metrics/period" "github.com/yusing/godoxy/internal/metrics/systeminfo" apitypes "github.com/yusing/goutils/apitypes" @@ -79,7 +80,7 @@ func AllSystemInfo(c *gin.Context) { } // leave 5 extra slots for buffering in case new agents are added. - dataCh := make(chan SystemInfoData, 1+agent.NumAgents()+5) + dataCh := make(chan SystemInfoData, 1+agentpool.Num()+5) defer close(dataCh) ticker := time.NewTicker(req.Interval) @@ -125,7 +126,7 @@ func AllSystemInfo(c *gin.Context) { return nil }) - for _, a := range agent.IterAgents() { + for _, a := range agentpool.Iter() { totalAgents++ errs.Go(func() error { @@ -175,7 +176,7 @@ func AllSystemInfo(c *gin.Context) { } } -func getAgentSystemInfo(ctx context.Context, a *agent.AgentConfig, query string) (bytesFromPool, error) { +func getAgentSystemInfo(ctx context.Context, a *agentpool.Agent, query string) (bytesFromPool, error) { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -194,7 +195,7 @@ func getAgentSystemInfo(ctx context.Context, a *agent.AgentConfig, query string) return bytesFromPool{json.RawMessage(bytesBuf), release}, nil } -func getAgentSystemInfoWithRetry(ctx context.Context, a *agent.AgentConfig, query string) (bytesFromPool, error) { +func getAgentSystemInfoWithRetry(ctx context.Context, a *agentpool.Agent, query string) (bytesFromPool, error) { const maxRetries = 3 var lastErr error diff --git a/internal/api/v1/metrics/system_info.go b/internal/api/v1/metrics/system_info.go index 36b777f1..e415277d 100644 --- a/internal/api/v1/metrics/system_info.go +++ b/internal/api/v1/metrics/system_info.go @@ -7,6 +7,7 @@ import ( "github.com/gin-gonic/gin" agentPkg "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/metrics/period" "github.com/yusing/godoxy/internal/metrics/systeminfo" apitypes "github.com/yusing/goutils/apitypes" @@ -49,9 +50,9 @@ func SystemInfo(c *gin.Context) { } c.Request.URL.RawQuery = query.Encode() - agent, ok := agentPkg.GetAgent(agentAddr) + agent, ok := agentpool.Get(agentAddr) if !ok { - agent, ok = agentPkg.GetAgentByName(agentName) + agent, ok = agentpool.GetAgent(agentName) } if !ok { c.JSON(http.StatusNotFound, apitypes.Error("agent_addr or agent_name not found")) diff --git a/internal/config/state.go b/internal/config/state.go index 9605356c..ef351e0b 100644 --- a/internal/config/state.go +++ b/internal/config/state.go @@ -18,8 +18,8 @@ import ( "github.com/goccy/go-yaml" "github.com/puzpuzpuz/xsync/v4" "github.com/rs/zerolog" - "github.com/yusing/godoxy/agent/pkg/agent" "github.com/yusing/godoxy/internal/acl" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/autocert" config "github.com/yusing/godoxy/internal/config/types" "github.com/yusing/godoxy/internal/entrypoint" @@ -332,7 +332,7 @@ func (state *state) loadRouteProviders() error { errs := gperr.NewGroup("route provider errors") results := gperr.NewGroup("loaded route providers") - agent.RemoveAllAgents() + agentpool.RemoveAll() numProviders := len(providers.Agents) + len(providers.Files) + len(providers.Docker) providersCh := make(chan types.RouteProvider, numProviders) @@ -352,11 +352,11 @@ func (state *state) loadRouteProviders() error { var providersProducer sync.WaitGroup for _, a := range providers.Agents { providersProducer.Go(func() { - if err := a.Start(state.task.Context()); err != nil { + if err := a.Init(state.task.Context()); err != nil { errs.Add(gperr.PrependSubject(a.String(), err)) return } - agent.AddAgent(a) + agentpool.Add(a) p := route.NewAgentProvider(a) providersCh <- p }) diff --git a/internal/docker/client.go b/internal/docker/client.go index eaaefe9d..de8669cb 100644 --- a/internal/docker/client.go +++ b/internal/docker/client.go @@ -17,6 +17,7 @@ import ( "github.com/moby/moby/client" "github.com/rs/zerolog/log" "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/types" httputils "github.com/yusing/goutils/http" "github.com/yusing/goutils/task" @@ -149,16 +150,16 @@ func NewClient(cfg types.DockerProviderConfig, unique ...bool) (*SharedClient, e var dial func(ctx context.Context) (net.Conn, error) if agent.IsDockerHostAgent(host) { - cfg, ok := agent.GetAgent(host) + a, ok := agentpool.Get(host) if !ok { panic(fmt.Errorf("agent %q not found", host)) } opt = []client.Opt{ client.WithHost(agent.DockerHost), - client.WithHTTPClient(cfg.NewHTTPClient()), + client.WithHTTPClient(a.HTTPClient()), } - addr = "tcp://" + cfg.Addr - dial = cfg.DialContext + addr = "tcp://" + a.Addr + dial = a.DialContext } else { helper, err := connhelper.GetConnectionHelper(host) if err != nil { diff --git a/internal/docker/container.go b/internal/docker/container.go index 0eb3fbbf..a7f01665 100644 --- a/internal/docker/container.go +++ b/internal/docker/container.go @@ -15,6 +15,7 @@ import ( "github.com/moby/moby/api/types/container" "github.com/moby/moby/client" "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/serialization" "github.com/yusing/godoxy/internal/types" gperr "github.com/yusing/goutils/errs" @@ -71,7 +72,7 @@ func FromDocker(c *container.Summary, dockerCfg types.DockerProviderConfig) (res if agent.IsDockerHostAgent(dockerCfg.URL) { var ok bool - res.Agent, ok = agent.GetAgent(dockerCfg.URL) + res.Agent, ok = agentpool.Get(dockerCfg.URL) if !ok { addError(res, fmt.Errorf("agent %q not found", dockerCfg.URL)) } diff --git a/internal/route/route.go b/internal/route/route.go index 99de1237..3d48f4e6 100644 --- a/internal/route/route.go +++ b/internal/route/route.go @@ -15,7 +15,7 @@ import ( "time" "github.com/rs/zerolog/log" - "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" config "github.com/yusing/godoxy/internal/config/types" "github.com/yusing/godoxy/internal/docker" "github.com/yusing/godoxy/internal/homepage" @@ -94,7 +94,7 @@ type ( provider types.RouteProvider - agent *agent.AgentConfig + agent *agentpool.Agent started chan struct{} onceStart sync.Once @@ -153,10 +153,10 @@ func (r *Route) validate() gperr.Error { } var ok bool // by agent address - r.agent, ok = agent.GetAgent(r.Agent) + r.agent, ok = agentpool.Get(r.Agent) if !ok { // fallback to get agent by name - r.agent, ok = agent.GetAgentByName(r.Agent) + r.agent, ok = agentpool.GetAgent(r.Agent) if !ok { return gperr.Errorf("agent %s not found", r.Agent) } @@ -510,7 +510,7 @@ func (r *Route) Type() route.RouteType { panic(fmt.Errorf("unexpected scheme %s for alias %s", r.Scheme, r.Alias)) } -func (r *Route) GetAgent() *agent.AgentConfig { +func (r *Route) GetAgent() *agentpool.Agent { if r.Container != nil && r.Container.Agent != nil { return r.Container.Agent } diff --git a/internal/types/docker.go b/internal/types/docker.go index d0cdc88d..b33f53e4 100644 --- a/internal/types/docker.go +++ b/internal/types/docker.go @@ -4,7 +4,7 @@ import ( "github.com/bytedance/sonic" "github.com/moby/moby/api/types/container" "github.com/yusing/ds/ordered" - "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" gperr "github.com/yusing/goutils/errs" ) @@ -20,7 +20,7 @@ type ( State container.ContainerState `json:"state"` - Agent *agent.AgentConfig `json:"agent"` + Agent *agentpool.Agent `json:"agent"` Labels map[string]string `json:"-"` // for creating routes ActualLabels map[string]string `json:"labels"` // for displaying in UI diff --git a/internal/types/routes.go b/internal/types/routes.go index bc8ac7c4..2d63215a 100644 --- a/internal/types/routes.go +++ b/internal/types/routes.go @@ -3,7 +3,7 @@ package types import ( "net/http" - "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/homepage" nettypes "github.com/yusing/godoxy/internal/net/types" provider "github.com/yusing/godoxy/internal/route/provider/types" @@ -35,7 +35,7 @@ type ( DisplayName() string ContainerInfo() *Container - GetAgent() *agent.AgentConfig + GetAgent() *agentpool.Agent IsDocker() bool IsAgent() bool diff --git a/internal/watcher/health/monitor/agent_proxied.go b/internal/watcher/health/monitor/agent_proxied.go index 332d6d56..d9514a70 100644 --- a/internal/watcher/health/monitor/agent_proxied.go +++ b/internal/watcher/health/monitor/agent_proxied.go @@ -3,14 +3,14 @@ package monitor import ( "net/url" - agentPkg "github.com/yusing/godoxy/agent/pkg/agent" + "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/types" "github.com/yusing/goutils/synk" ) type ( AgentProxiedMonitor struct { - agent *agentPkg.AgentConfig + agent *agentpool.Agent query synk.Value[string] *monitor } @@ -45,7 +45,7 @@ func (target *AgentCheckHealthTarget) displayURL() *url.URL { } } -func NewAgentProxiedMonitor(agent *agentPkg.AgentConfig, config types.HealthCheckConfig, target *AgentCheckHealthTarget) *AgentProxiedMonitor { +func NewAgentProxiedMonitor(agent *agentpool.Agent, config types.HealthCheckConfig, target *AgentCheckHealthTarget) *AgentProxiedMonitor { mon := &AgentProxiedMonitor{ agent: agent, } From 243e7e9e95a93cbb114b74572cf331bed7bb3274 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 14:54:33 +0800 Subject: [PATCH 18/51] refactor(health): restructure health check implementations into dedicated check package - Move health check implementations from monitor/ to new check/ package - Add h2c, tcp4/6, udp4/6 scheme support to agent health check API - Add timeout URL parameter to agent health check endpoint - Remove unused agent dependencies (dnsproviders, lego, various cloud SDKs) - Use net.JoinHostPort instead of fmt.Sprintf for port joining --- agent/go.mod | 17 --- agent/go.sum | 103 ------------- agent/pkg/handler/check_health.go | 55 ++++--- internal/docker/client.go | 2 +- internal/watcher/docker_watcher.go | 4 +- internal/watcher/health/check/docker.go | 115 +++++++++++++++ internal/watcher/health/check/fileserver.go | 28 ++++ .../watcher/health/{monitor => check}/http.go | 139 +++++++----------- .../{monitor/raw.go => check/stream.go} | 27 +--- .../watcher/health/monitor/agent_proxied.go | 71 --------- internal/watcher/health/monitor/docker.go | 139 ------------------ internal/watcher/health/monitor/fileserver.go | 36 ----- internal/watcher/health/monitor/monitor.go | 46 +++--- .../watcher/health/monitor/monitor_test.go | 37 ++--- internal/watcher/health/monitor/new.go | 96 ++++++++++++ 15 files changed, 380 insertions(+), 535 deletions(-) create mode 100644 internal/watcher/health/check/docker.go create mode 100644 internal/watcher/health/check/fileserver.go rename internal/watcher/health/{monitor => check}/http.go (69%) rename internal/watcher/health/{monitor/raw.go => check/stream.go} (53%) delete mode 100644 internal/watcher/health/monitor/agent_proxied.go delete mode 100644 internal/watcher/health/monitor/docker.go delete mode 100644 internal/watcher/health/monitor/fileserver.go create mode 100644 internal/watcher/health/monitor/new.go diff --git a/agent/go.mod b/agent/go.mod index 543a696b..a37eb1bc 100644 --- a/agent/go.mod +++ b/agent/go.mod @@ -31,7 +31,6 @@ require ( github.com/PuerkitoBio/goquery v1.11.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/buger/goterm v1.0.4 // indirect github.com/bytedance/gopkg v0.1.3 // indirect github.com/bytedance/sonic/loader v0.4.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect @@ -40,9 +39,7 @@ require ( github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/diskfs/go-diskfs v1.7.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/djherbis/times v1.6.0 // indirect github.com/docker/cli v29.1.3+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -50,31 +47,23 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.12 // indirect github.com/gin-contrib/sse v1.1.0 // indirect - github.com/go-acme/lego/v4 v4.30.1 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.30.1 // indirect - github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/goccy/go-yaml v1.19.1 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gotify/server/v2 v2.8.0 // indirect - github.com/jinzhu/copier v0.4.0 // indirect github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 // indirect github.com/klauspost/compress v1.18.2 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect - github.com/luthermonson/go-proxmox v0.3.1 // indirect - github.com/magefile/mage v1.15.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.69 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/moby/api v1.52.0 // indirect github.com/moby/moby/client v0.2.1 // indirect @@ -82,7 +71,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/oschwald/maxminddb-golang v1.13.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pires/go-proxyproto v0.8.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -95,7 +83,6 @@ require ( github.com/samber/slog-zerolog/v2 v2.9.0 // indirect github.com/shirou/gopsutil/v4 v4.25.12 // indirect github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/spf13/afero v1.15.0 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect @@ -115,13 +102,9 @@ require ( go.opentelemetry.io/otel/trace v1.39.0 // indirect golang.org/x/arch v0.23.0 // indirect golang.org/x/crypto v0.46.0 // indirect - golang.org/x/mod v0.31.0 // indirect golang.org/x/net v0.48.0 // indirect - golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect - golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.40.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/agent/go.sum b/agent/go.sum index 573a3a70..26ba7305 100644 --- a/agent/go.sum +++ b/agent/go.sum @@ -1,38 +1,11 @@ -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 h1:lpOxwrQ919lCZoNCd69rVt8u1eLZuMORrGXqy8sNf3c= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0/go.mod h1:fSvRkb8d26z9dbL40Uf/OO6Vo9iExtZK3D0ulRV+8M0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0 h1:yzrctSl9GMIQ5lHu7jc8olOsGjWDCsBpJhWqfGa/YIM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0/go.mod h1:GE4m0rnnfwLGX0Y9A9A25Zx5N/90jneT5ABevqzhuFQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 h1:zLzoX5+W2l95UJoVwiyNS4dX8vHyQ6x2xRLoBBL9wMk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0/go.mod h1:wVEOJfGTj0oPAUGA1JuRAvz/lxXQsWW16axmHPP47Bk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/PuerkitoBio/goquery v1.11.0 h1:jZ7pwMQXIITcUXNH83LLk+txlaEy6NVOfTuP43xxfqw= github.com/PuerkitoBio/goquery v1.11.0/go.mod h1:wQHgxUOU3JGuj3oD/QFfxUdlzW6xPHfqyHre6VMY4DQ= -github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0 h1:h/33OxYLqBk0BYmEbSUy7MlvgQR/m1w1/7OJFKoPL1I= -github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0/go.mod h1:rvh3imDA6EaQi+oM/GQHkQAOHbXPKJ7EWJvfjuw141Q= -github.com/anchore/go-lzo v0.1.0 h1:NgAacnzqPeGH49Ky19QKLBZEuFRqtTG9cdaucc3Vncs= -github.com/anchore/go-lzo v0.1.0/go.mod h1:3kLx0bve2oN1iDwgM1U5zGku1Tfbdb0No5qp1eL1fIk= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= @@ -72,8 +45,6 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY= -github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -96,8 +67,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= -github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -106,10 +75,6 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w= github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= -github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4= -github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= @@ -117,38 +82,20 @@ github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PU github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE= github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= -github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= -github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= -github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotify/server/v2 v2.8.0 h1:E3UDDn/3rFZi1sjZfbuhXNnxJP3ACZhdcw/iySegPRA= github.com/gotify/server/v2 v2.8.0/go.mod h1:6ci5adxcE2hf1v+2oowKiQmixOxXV8vU+CRLKP6sqZA= -github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= -github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= -github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= -github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 h1:9Nu54bhS/H/Kgo2/7xNSUuC5G28VR8ljfrLKU2G4IjU= @@ -161,12 +108,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk= -github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= @@ -184,8 +127,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc= github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= @@ -197,33 +138,17 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0= -github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg= -github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2 h1:l0tH15ACQADZAzC+LZ+mo2tIX4H6uZu0ulrVmG5Tqz0= -github.com/nrdcg/oci-go-sdk/common/v1065 v1065.105.2/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8= -github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2 h1:gzB4c6ztb38C/jYiqEaFC+mCGcWFHDji9e6jwymY9d4= -github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.105.2/go.mod h1:l1qIPIq2uRV5WTSvkbhbl/ndbeOu7OCb3UZ+0+2ZSb8= -github.com/nrdcg/porkbun v0.4.0 h1:rWweKlwo1PToQ3H+tEO9gPRW0wzzgmI/Ob3n2Guticw= -github.com/nrdcg/porkbun v0.4.0/go.mod h1:/QMskrHEIM0IhC/wY7iTCUgINsxdT2WcOphktJ9+Q54= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE= github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8= -github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= -github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= -github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -246,20 +171,14 @@ github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89 github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M= github.com/samber/slog-zerolog/v2 v2.9.0 h1:6LkOabJmZdNLaUWkTC3IVVA+dq7b/V0FM6lz6/7+THI= github.com/samber/slog-zerolog/v2 v2.9.0/go.mod h1:gnQW9VnCfM34v2pRMUIGMsZOVbYLqY/v0Wxu6atSVGc= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM= github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= -github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= -github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -275,25 +194,17 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY= github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= -github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= -github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok= github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= -github.com/vultr/govultr/v3 v3.26.1 h1:G/M0rMQKwVSmL+gb0UgETbW5mcQi0Vf/o/ZSGdBCxJw= -github.com/vultr/govultr/v3 v3.26.1/go.mod h1:9WwnWGCKnwDlNjHjtt+j+nP+0QWq6hQXzaHgddqrLWY= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusing/ds v0.3.1 h1:mCqTgTQD8RhiBpcysvii5kZ7ZBmqcknVsFubNALGLbY= github.com/yusing/ds v0.3.1/go.mod h1:XhKV4l7cZwBbbl7lRzNC9zX27zvCM0frIwiuD40ULRk= -github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260104140148-1c2515cb298d h1:O6umnEZyKot6IqyOCuLMUuCT8/K8n+lKiQJ+UjmSfVc= -github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260104140148-1c2515cb298d/go.mod h1:84uz4o4GfD4FhXv3v7620Vj7LtXL0gnxDgL9LA+KmEI= github.com/yusing/gointernals v0.1.16 h1:GrhZZdxzA+jojLEqankctJrOuAYDb7kY1C93S1pVR34= github.com/yusing/gointernals v0.1.16/go.mod h1:B/0FVXt4WPmgzVy3ynzkqKi+BSGaJVmwCJBRXYapo34= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -316,8 +227,6 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= -go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0= -go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg= golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -361,10 +270,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -409,21 +316,11 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.258.0 h1:IKo1j5FBlN74fe5isA2PVozN3Y5pwNKriEgAXPOkDAc= -google.golang.org/api v0.258.0/go.mod h1:qhOMTQEZ6lUps63ZNq9jhODswwjkjYYguA7fA3TBFww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/agent/pkg/handler/check_health.go b/agent/pkg/handler/check_health.go index 16f82a15..d511e7de 100644 --- a/agent/pkg/handler/check_health.go +++ b/agent/pkg/handler/check_health.go @@ -1,16 +1,16 @@ package handler import ( - "context" - "fmt" + "net" "net/http" "net/url" - "os" + "strconv" "strings" + "time" "github.com/bytedance/sonic" "github.com/yusing/godoxy/internal/types" - "github.com/yusing/godoxy/internal/watcher/health/monitor" + healthcheck "github.com/yusing/godoxy/internal/watcher/health/check" ) func CheckHealth(w http.ResponseWriter, r *http.Request) { @@ -20,6 +20,7 @@ func CheckHealth(w http.ResponseWriter, r *http.Request) { http.Error(w, "missing scheme", http.StatusBadRequest) return } + timeout := parseMsOrDefault(query.Get("timeout")) var ( result types.HealthCheckResult @@ -32,24 +33,21 @@ func CheckHealth(w http.ResponseWriter, r *http.Request) { http.Error(w, "missing path", http.StatusBadRequest) return } - _, err := os.Stat(path) - result = types.HealthCheckResult{Healthy: err == nil} - if err != nil { - result.Detail = err.Error() - } - case "http", "https": // path is optional + result, err = healthcheck.FileServer(path) + case "http", "https", "h2c": // path is optional host := query.Get("host") path := query.Get("path") if host == "" { http.Error(w, "missing host", http.StatusBadRequest) return } - result, err = monitor.NewHTTPHealthMonitor(&url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, healthCheckConfigFromRequest(r)).CheckHealth() - case "tcp", "udp": + url := url.URL{Scheme: scheme, Host: host} + if scheme == "h2c" { + result, err = healthcheck.H2C(r.Context(), &url, http.MethodHead, path, timeout) + } else { + result, err = healthcheck.HTTP(&url, http.MethodHead, path, timeout) + } + case "tcp", "udp", "tcp4", "udp4", "tcp6", "udp6": host := query.Get("host") if host == "" { http.Error(w, "missing host", http.StatusBadRequest) @@ -62,12 +60,10 @@ func CheckHealth(w http.ResponseWriter, r *http.Request) { return } if port != "" { - host = fmt.Sprintf("%s:%s", host, port) + host = net.JoinHostPort(host, port) } - result, err = monitor.NewRawHealthMonitor(&url.URL{ - Scheme: scheme, - Host: host, - }, healthCheckConfigFromRequest(r)).CheckHealth() + url := url.URL{Scheme: scheme, Host: host} + result, err = healthcheck.Stream(r.Context(), &url, timeout) } if err != nil { @@ -80,12 +76,15 @@ func CheckHealth(w http.ResponseWriter, r *http.Request) { sonic.ConfigDefault.NewEncoder(w).Encode(result) } -func healthCheckConfigFromRequest(r *http.Request) types.HealthCheckConfig { - // we only need timeout and base context because it's one shot request - return types.HealthCheckConfig{ - Timeout: types.HealthCheckTimeoutDefault, - BaseContext: func() context.Context { - return r.Context() - }, +func parseMsOrDefault(msStr string) time.Duration { + if msStr == "" { + return types.HealthCheckTimeoutDefault } + + timeoutMs, _ := strconv.ParseInt(msStr, 10, 64) + if timeoutMs == 0 { + return types.HealthCheckTimeoutDefault + } + + return time.Duration(timeoutMs) * time.Millisecond } diff --git a/internal/docker/client.go b/internal/docker/client.go index de8669cb..428d6483 100644 --- a/internal/docker/client.go +++ b/internal/docker/client.go @@ -250,7 +250,7 @@ func (c *SharedClient) Key() string { return c.key } -func (c *SharedClient) Address() string { +func (c *SharedClient) DaemonHost() string { return c.addr } diff --git a/internal/watcher/docker_watcher.go b/internal/watcher/docker_watcher.go index 472d1fd9..77b8d2c7 100644 --- a/internal/watcher/docker_watcher.go +++ b/internal/watcher/docker_watcher.go @@ -104,7 +104,7 @@ func (w DockerWatcher) EventsWithOptions(ctx context.Context, options DockerList }() chs := client.Events(ctx, options) - defer log.Debug().Str("host", client.Address()).Msg("docker watcher closed") + defer log.Debug().Str("host", client.DaemonHost()).Msg("docker watcher closed") for { select { case <-ctx.Done(): @@ -177,7 +177,7 @@ func checkConnection(ctx context.Context, client *docker.SharedClient) bool { defer cancel() err := client.CheckConnection(ctx) if err != nil { - log.Debug().Err(err).Str("host", client.Address()).Msg("docker watcher: connection failed") + log.Debug().Err(err).Str("host", client.DaemonHost()).Msg("docker watcher: connection failed") return false } return true diff --git a/internal/watcher/health/check/docker.go b/internal/watcher/health/check/docker.go new file mode 100644 index 00000000..80567240 --- /dev/null +++ b/internal/watcher/health/check/docker.go @@ -0,0 +1,115 @@ +package healthcheck + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/bytedance/sonic" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/client" + "github.com/yusing/godoxy/internal/docker" + "github.com/yusing/godoxy/internal/types" + httputils "github.com/yusing/goutils/http" +) + +type DockerHealthcheckState struct { + client *docker.SharedClient + containerId string + + numDockerFailures int +} + +const dockerFailuresThreshold = 3 + +var errDockerHealthCheckFailedTooManyTimes = errors.New("docker health check failed too many times") + +func NewDockerHealthcheckState(client *docker.SharedClient, containerId string) *DockerHealthcheckState { + client.InterceptHTTPClient(interceptDockerInspectResponse) + return &DockerHealthcheckState{ + client: client, + containerId: containerId, + numDockerFailures: 0, + } +} + +func Docker(ctx context.Context, state *DockerHealthcheckState, containerId string, timeout time.Duration) (types.HealthCheckResult, error) { + if state.numDockerFailures > dockerFailuresThreshold { + return types.HealthCheckResult{}, errDockerHealthCheckFailedTooManyTimes + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // the actual inspect response is intercepted and returned as RequestInterceptedError + _, err := state.client.ContainerInspect(ctx, containerId, client.ContainerInspectOptions{}) + + var interceptedErr *httputils.RequestInterceptedError + if !httputils.AsRequestInterceptedError(err, &interceptedErr) { + state.numDockerFailures++ + return types.HealthCheckResult{}, err + } + + if interceptedErr == nil || interceptedErr.Data == nil { // should not happen + state.numDockerFailures++ + return types.HealthCheckResult{}, errors.New("intercepted error is nil or data is nil") + } + + containerState := interceptedErr.Data.(container.State) + + status := containerState.Status + switch status { + case "dead", "exited", "paused", "restarting", "removing": + state.numDockerFailures = 0 + return types.HealthCheckResult{ + Healthy: false, + Detail: "container is " + string(status), + }, nil + case "created": + state.numDockerFailures = 0 + return types.HealthCheckResult{ + Healthy: false, + Detail: "container is not started", + }, nil + } + + health := containerState.Health + if health == nil { + // no health check from docker, directly use fallback + state.numDockerFailures = dockerFailuresThreshold + 1 + return types.HealthCheckResult{}, errDockerHealthCheckFailedTooManyTimes + } + + state.numDockerFailures = 0 + result := types.HealthCheckResult{ + Healthy: health.Status == container.Healthy, + } + if len(health.Log) > 0 { + lastLog := health.Log[len(health.Log)-1] + result.Detail = lastLog.Output + result.Latency = lastLog.End.Sub(lastLog.Start) + } + return result, nil +} + +func interceptDockerInspectResponse(resp *http.Response) (intercepted bool, err error) { + if resp.StatusCode != http.StatusOK { + return false, nil + } + + body, release, err := httputils.ReadAllBody(resp) + resp.Body.Close() + if err != nil { + return false, err + } + + var state container.State + err = sonic.Unmarshal(body, &state) + release(body) + if err != nil { + return false, err + } + + return true, httputils.NewRequestInterceptedError(resp, state) +} diff --git a/internal/watcher/health/check/fileserver.go b/internal/watcher/health/check/fileserver.go new file mode 100644 index 00000000..ea16318f --- /dev/null +++ b/internal/watcher/health/check/fileserver.go @@ -0,0 +1,28 @@ +package healthcheck + +import ( + "os" + "time" + + "github.com/yusing/godoxy/internal/types" +) + +func FileServer(path string) (types.HealthCheckResult, error) { + start := time.Now() + _, err := os.Stat(path) + lat := time.Since(start) + + if err != nil { + if os.IsNotExist(err) { + return types.HealthCheckResult{ + Detail: err.Error(), + }, nil + } + return types.HealthCheckResult{}, err + } + + return types.HealthCheckResult{ + Healthy: true, + Latency: lat, + }, nil +} diff --git a/internal/watcher/health/monitor/http.go b/internal/watcher/health/check/http.go similarity index 69% rename from internal/watcher/health/monitor/http.go rename to internal/watcher/health/check/http.go index b286ca1d..44d69b84 100644 --- a/internal/watcher/health/monitor/http.go +++ b/internal/watcher/health/check/http.go @@ -1,4 +1,4 @@ -package monitor +package healthcheck import ( "context" @@ -15,11 +15,15 @@ import ( "golang.org/x/net/http2" ) -type HTTPHealthMonitor struct { - *monitor - method string +var h2cClient = &http.Client{ + Transport: &http2.Transport{ + AllowHTTP: true, + DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, } - var pinger = &fasthttp.Client{ MaxConnDuration: 0, DisableHeaderNamesNormalizing: true, @@ -31,15 +35,56 @@ var pinger = &fasthttp.Client{ NoDefaultUserAgentHeader: true, } -func NewHTTPHealthMonitor(url *url.URL, config types.HealthCheckConfig) *HTTPHealthMonitor { - mon := new(HTTPHealthMonitor) - mon.monitor = newMonitor(url, config, mon.CheckHealth) - if config.UseGet { - mon.method = fasthttp.MethodGet +func HTTP(url *url.URL, method, path string, timeout time.Duration) (types.HealthCheckResult, error) { + req := fasthttp.AcquireRequest() + defer fasthttp.ReleaseRequest(req) + + resp := fasthttp.AcquireResponse() + defer fasthttp.ReleaseResponse(resp) + + req.SetRequestURI(url.JoinPath(path).String()) + req.Header.SetMethod(method) + setCommonHeaders(req.Header.Set) + req.SetConnectionClose() + + start := time.Now() + respErr := pinger.DoTimeout(req, resp, timeout) + lat := time.Since(start) + + return processHealthResponse(lat, respErr, resp.StatusCode) +} + +func H2C(ctx context.Context, url *url.URL, method, path string, timeout time.Duration) (types.HealthCheckResult, error) { + u := url.JoinPath(path) // JoinPath returns a copy of the URL with the path joined + u.Scheme = "http" + + ctx, cancel := context.WithTimeoutCause(ctx, timeout, errors.New("h2c health check timed out")) + defer cancel() + + var req *http.Request + var err error + if method == fasthttp.MethodGet { + req, err = http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) } else { - mon.method = fasthttp.MethodHead + req, err = http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil) } - return mon + if err != nil { + return types.HealthCheckResult{ + Detail: err.Error(), + }, nil + } + + setCommonHeaders(req.Header.Set) + + start := time.Now() + resp, err := h2cClient.Do(req) + lat := time.Since(start) + + if resp != nil { + defer resp.Body.Close() + } + + return processHealthResponse(lat, err, func() int { return resp.StatusCode }) } var userAgent = "GoDoxy/" + version.Get().String() @@ -76,73 +121,3 @@ func processHealthResponse(lat time.Duration, err error, getStatusCode func() in Healthy: true, }, nil } - -var h2cClient = &http.Client{ - Transport: &http2.Transport{ - AllowHTTP: true, - DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, addr) - }, - }, -} - -func (mon *HTTPHealthMonitor) CheckHealth() (types.HealthCheckResult, error) { - if mon.url.Load().Scheme == "h2c" { - return mon.CheckHealthH2C() - } - return mon.CheckHealthHTTP() -} - -func (mon *HTTPHealthMonitor) CheckHealthHTTP() (types.HealthCheckResult, error) { - req := fasthttp.AcquireRequest() - defer fasthttp.ReleaseRequest(req) - - resp := fasthttp.AcquireResponse() - defer fasthttp.ReleaseResponse(resp) - - req.SetRequestURI(mon.url.Load().JoinPath(mon.config.Path).String()) - req.Header.SetMethod(mon.method) - setCommonHeaders(req.Header.Set) - req.SetConnectionClose() - - start := time.Now() - respErr := pinger.DoTimeout(req, resp, mon.config.Timeout) - lat := time.Since(start) - - return processHealthResponse(lat, respErr, resp.StatusCode) -} - -func (mon *HTTPHealthMonitor) CheckHealthH2C() (types.HealthCheckResult, error) { - u := mon.url.Load() - u = u.JoinPath(mon.config.Path) // JoinPath returns a copy of the URL with the path joined - u.Scheme = "http" - - ctx, cancel := mon.ContextWithTimeout("h2c health check timed out") - defer cancel() - - var req *http.Request - var err error - if mon.method == fasthttp.MethodGet { - req, err = http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - } else { - req, err = http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil) - } - if err != nil { - return types.HealthCheckResult{ - Detail: err.Error(), - }, nil - } - - setCommonHeaders(req.Header.Set) - - start := time.Now() - resp, err := h2cClient.Do(req) - lat := time.Since(start) - - if resp != nil { - defer resp.Body.Close() - } - - return processHealthResponse(lat, err, func() int { return resp.StatusCode }) -} diff --git a/internal/watcher/health/monitor/raw.go b/internal/watcher/health/check/stream.go similarity index 53% rename from internal/watcher/health/monitor/raw.go rename to internal/watcher/health/check/stream.go index fcde7255..6f4c9a16 100644 --- a/internal/watcher/health/monitor/raw.go +++ b/internal/watcher/health/check/stream.go @@ -1,6 +1,7 @@ -package monitor +package healthcheck import ( + "context" "errors" "net" "net/url" @@ -10,30 +11,17 @@ import ( "github.com/yusing/godoxy/internal/types" ) -type ( - RawHealthMonitor struct { - *monitor - dialer *net.Dialer - } -) - -func NewRawHealthMonitor(url *url.URL, config types.HealthCheckConfig) *RawHealthMonitor { - mon := new(RawHealthMonitor) - mon.monitor = newMonitor(url, config, mon.CheckHealth) - mon.dialer = &net.Dialer{ - Timeout: config.Timeout, +func Stream(ctx context.Context, url *url.URL, timeout time.Duration) (types.HealthCheckResult, error) { + dialer := net.Dialer{ + Timeout: timeout, FallbackDelay: -1, } - return mon -} -func (mon *RawHealthMonitor) CheckHealth() (types.HealthCheckResult, error) { - ctx, cancel := mon.ContextWithTimeout("ping request timed out") + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - url := mon.url.Load() start := time.Now() - conn, err := mon.dialer.DialContext(ctx, url.Scheme, url.Host) + conn, err := dialer.DialContext(ctx, url.Scheme, url.Host) lat := time.Since(start) if err != nil { if errors.Is(err, net.ErrClosed) || @@ -49,6 +37,7 @@ func (mon *RawHealthMonitor) CheckHealth() (types.HealthCheckResult, error) { } return types.HealthCheckResult{}, err } + defer conn.Close() return types.HealthCheckResult{ Latency: lat, diff --git a/internal/watcher/health/monitor/agent_proxied.go b/internal/watcher/health/monitor/agent_proxied.go deleted file mode 100644 index d9514a70..00000000 --- a/internal/watcher/health/monitor/agent_proxied.go +++ /dev/null @@ -1,71 +0,0 @@ -package monitor - -import ( - "net/url" - - "github.com/yusing/godoxy/internal/agentpool" - "github.com/yusing/godoxy/internal/types" - "github.com/yusing/goutils/synk" -) - -type ( - AgentProxiedMonitor struct { - agent *agentpool.Agent - query synk.Value[string] - *monitor - } - AgentCheckHealthTarget struct { - Scheme string - Host string - Path string - } -) - -func AgentTargetFromURL(url *url.URL) *AgentCheckHealthTarget { - return &AgentCheckHealthTarget{ - Scheme: url.Scheme, - Host: url.Host, - Path: url.Path, - } -} - -func (target *AgentCheckHealthTarget) buildQuery() string { - query := make(url.Values, 3) - query.Set("scheme", target.Scheme) - query.Set("host", target.Host) - query.Set("path", target.Path) - return query.Encode() -} - -func (target *AgentCheckHealthTarget) displayURL() *url.URL { - return &url.URL{ - Scheme: target.Scheme, - Host: target.Host, - Path: target.Path, - } -} - -func NewAgentProxiedMonitor(agent *agentpool.Agent, config types.HealthCheckConfig, target *AgentCheckHealthTarget) *AgentProxiedMonitor { - mon := &AgentProxiedMonitor{ - agent: agent, - } - mon.monitor = newMonitor(target.displayURL(), config, mon.CheckHealth) - mon.query.Store(target.buildQuery()) - return mon -} - -func (mon *AgentProxiedMonitor) CheckHealth() (types.HealthCheckResult, error) { - resp, err := mon.agent.DoHealthCheck(mon.config.Timeout, mon.query.Load()) - result := types.HealthCheckResult{ - Healthy: resp.Healthy, - Detail: resp.Detail, - Latency: resp.Latency, - } - return result, err -} - -func (mon *AgentProxiedMonitor) UpdateURL(url *url.URL) { - mon.monitor.UpdateURL(url) - newTarget := AgentTargetFromURL(url) - mon.query.Store(newTarget.buildQuery()) -} diff --git a/internal/watcher/health/monitor/docker.go b/internal/watcher/health/monitor/docker.go deleted file mode 100644 index 7e0c7d4e..00000000 --- a/internal/watcher/health/monitor/docker.go +++ /dev/null @@ -1,139 +0,0 @@ -package monitor - -import ( - "net/http" - "net/url" - - "github.com/bytedance/sonic" - "github.com/moby/moby/api/types/container" - "github.com/moby/moby/client" - "github.com/rs/zerolog/log" - "github.com/yusing/godoxy/internal/docker" - "github.com/yusing/godoxy/internal/types" - gperr "github.com/yusing/goutils/errs" - httputils "github.com/yusing/goutils/http" - "github.com/yusing/goutils/task" -) - -type DockerHealthMonitor struct { - *monitor - client *docker.SharedClient - containerID string - fallback types.HealthChecker - - numDockerFailures int -} - -const dockerFailuresThreshold = 3 - -func NewDockerHealthMonitor(client *docker.SharedClient, containerID, alias string, config types.HealthCheckConfig, fallback types.HealthChecker) *DockerHealthMonitor { - mon := new(DockerHealthMonitor) - mon.client = client - mon.containerID = containerID - mon.monitor = newMonitor(fallback.URL(), config, mon.CheckHealth) - mon.fallback = fallback - mon.service = alias - return mon -} - -func (mon *DockerHealthMonitor) Start(parent task.Parent) gperr.Error { - mon.client = mon.client.CloneUnique() - err := mon.monitor.Start(parent) - if err != nil { - return err - } - // zero port - if mon.monitor.task == nil { - return nil - } - mon.client.InterceptHTTPClient(mon.interceptInspectResponse) - mon.monitor.task.OnFinished("close docker client", mon.client.Close) - return nil -} - -func (mon *DockerHealthMonitor) UpdateURL(url *url.URL) { - mon.monitor.UpdateURL(url) - if mon.fallback != nil { - mon.fallback.UpdateURL(url) - } -} - -func (mon *DockerHealthMonitor) interceptInspectResponse(resp *http.Response) (intercepted bool, err error) { - if resp.StatusCode != http.StatusOK { - return false, nil - } - - body, release, err := httputils.ReadAllBody(resp) - resp.Body.Close() - if err != nil { - return false, err - } - - var state container.State - err = sonic.Unmarshal(body, &state) - release(body) - if err != nil { - return false, err - } - - return true, httputils.NewRequestInterceptedError(resp, state) -} - -func (mon *DockerHealthMonitor) CheckHealth() (types.HealthCheckResult, error) { - // if docker health check failed too many times, use fallback forever - if mon.numDockerFailures > dockerFailuresThreshold { - return mon.fallback.CheckHealth() - } - - ctx, cancel := mon.ContextWithTimeout("docker health check timed out") - defer cancel() - - // the actual inspect response is intercepted and returned as RequestInterceptedError - _, err := mon.client.ContainerInspect(ctx, mon.containerID, client.ContainerInspectOptions{}) - - var interceptedErr *httputils.RequestInterceptedError - if !httputils.AsRequestInterceptedError(err, &interceptedErr) { - mon.numDockerFailures++ - log.Debug().Err(err).Str("container_id", mon.containerID).Msg("docker health check failed, using fallback") - return mon.fallback.CheckHealth() - } - - if interceptedErr == nil || interceptedErr.Data == nil { // should not happen - log.Debug().Msgf("intercepted error is nil or data is nil, container_id: %s", mon.containerID) - mon.numDockerFailures++ - log.Debug().Err(err).Str("container_id", mon.containerID).Msg("docker health check failed, using fallback") - return mon.fallback.CheckHealth() - } - - state := interceptedErr.Data.(container.State) - status := state.Status - switch status { - case "dead", "exited", "paused", "restarting", "removing": - mon.numDockerFailures = 0 - return types.HealthCheckResult{ - Healthy: false, - Detail: "container is " + string(status), - }, nil - case "created": - mon.numDockerFailures = 0 - return types.HealthCheckResult{ - Healthy: false, - Detail: "container is not started", - }, nil - } - if state.Health == nil { // no health check from docker, directly use fallback starting from next check - mon.numDockerFailures = dockerFailuresThreshold + 1 - return mon.fallback.CheckHealth() - } - - mon.numDockerFailures = 0 - result := types.HealthCheckResult{ - Healthy: state.Health.Status == container.Healthy, - } - if len(state.Health.Log) > 0 { - lastLog := state.Health.Log[len(state.Health.Log)-1] - result.Detail = lastLog.Output - result.Latency = lastLog.End.Sub(lastLog.Start) - } - return result, nil -} diff --git a/internal/watcher/health/monitor/fileserver.go b/internal/watcher/health/monitor/fileserver.go deleted file mode 100644 index cd1d7568..00000000 --- a/internal/watcher/health/monitor/fileserver.go +++ /dev/null @@ -1,36 +0,0 @@ -package monitor - -import ( - "os" - "time" - - "github.com/yusing/godoxy/internal/types" -) - -type FileServerHealthMonitor struct { - *monitor - path string -} - -func NewFileServerHealthMonitor(config types.HealthCheckConfig, path string) *FileServerHealthMonitor { - mon := &FileServerHealthMonitor{path: path} - mon.monitor = newMonitor(nil, config, mon.CheckHealth) - return mon -} - -func (s *FileServerHealthMonitor) CheckHealth() (types.HealthCheckResult, error) { - start := time.Now() - _, err := os.Stat(s.path) - if err != nil { - if os.IsNotExist(err) { - return types.HealthCheckResult{ - Detail: err.Error(), - }, nil - } - return types.HealthCheckResult{}, err - } - return types.HealthCheckResult{ - Healthy: true, - Latency: time.Since(start), - }, nil -} diff --git a/internal/watcher/health/monitor/monitor.go b/internal/watcher/health/monitor/monitor.go index 91f41bfd..5f56ec40 100644 --- a/internal/watcher/health/monitor/monitor.go +++ b/internal/watcher/health/monitor/monitor.go @@ -21,7 +21,7 @@ import ( ) type ( - HealthCheckFunc func() (result types.HealthCheckResult, err error) + HealthCheckFunc func(url *url.URL) (result types.HealthCheckResult, err error) monitor struct { service string config types.HealthCheckConfig @@ -44,52 +44,56 @@ type ( var ErrNegativeInterval = gperr.New("negative interval") func NewMonitor(r types.Route) types.HealthMonCheck { + target := &r.TargetURL().URL + var mon types.HealthMonCheck if r.IsAgent() { - mon = NewAgentProxiedMonitor(r.GetAgent(), r.HealthCheckConfig(), AgentTargetFromURL(&r.TargetURL().URL)) + mon = NewAgentProxiedMonitor(r.HealthCheckConfig(), r.GetAgent(), target) } else { switch r := r.(type) { case types.ReverseProxyRoute: - mon = NewHTTPHealthMonitor(&r.TargetURL().URL, r.HealthCheckConfig()) + mon = NewHTTPHealthMonitor(r.HealthCheckConfig(), target) case types.FileServerRoute: mon = NewFileServerHealthMonitor(r.HealthCheckConfig(), r.RootPath()) case types.StreamRoute: - mon = NewRawHealthMonitor(&r.TargetURL().URL, r.HealthCheckConfig()) + mon = NewStreamHealthMonitor(r.HealthCheckConfig(), target) default: log.Panic().Msgf("unexpected route type: %T", r) } } if r.IsDocker() { cont := r.ContainerInfo() - client, err := docker.NewClient(cont.DockerCfg) + client, err := docker.NewClient(cont.DockerCfg, true) if err != nil { return mon } r.Task().OnCancel("close_docker_client", client.Close) - return NewDockerHealthMonitor(client, cont.ContainerID, r.Name(), r.HealthCheckConfig(), mon) + + fallback := mon + return NewDockerHealthMonitor(r.HealthCheckConfig(), client, cont.ContainerID, fallback) } return mon } -func newMonitor(u *url.URL, cfg types.HealthCheckConfig, healthCheckFunc HealthCheckFunc) *monitor { +func (mon *monitor) init(u *url.URL, cfg types.HealthCheckConfig, healthCheckFunc HealthCheckFunc) *monitor { if state := config.WorkingState.Load(); state != nil { cfg.ApplyDefaults(state.Value().Defaults.HealthCheck) } else { cfg.ApplyDefaults(types.HealthCheckConfig{}) // use defaults from constants } - mon := &monitor{ - config: cfg, - checkHealth: healthCheckFunc, - startTime: time.Now(), - notifyFunc: notif.Notify, - } - if u == nil { - u = &url.URL{} - } - mon.url.Store(u) + mon.config = cfg + mon.checkHealth = healthCheckFunc + mon.startTime = time.Now() + mon.notifyFunc = notif.Notify mon.status.Store(types.StatusHealthy) mon.lastResult.Store(types.HealthCheckResult{Healthy: true, Detail: "started"}) - return mon + + if u == nil { + mon.url.Store(&url.URL{}) + } else { + mon.url.Store(u) + } + return nil } func (mon *monitor) ContextWithTimeout(cause string) (ctx context.Context, cancel context.CancelFunc) { @@ -104,6 +108,10 @@ func (mon *monitor) ContextWithTimeout(cause string) (ctx context.Context, cance return context.WithTimeoutCause(ctx, mon.config.Timeout, gperr.New(cause)) } +func (mon *monitor) CheckHealth() (types.HealthCheckResult, error) { + return mon.checkHealth(mon.url.Load()) +} + // Start implements task.TaskStarter. func (mon *monitor) Start(parent task.Parent) gperr.Error { if mon.config.Interval <= 0 { @@ -242,7 +250,7 @@ func (mon *monitor) MarshalJSON() ([]byte, error) { func (mon *monitor) checkUpdateHealth() error { logger := log.With().Str("name", mon.Name()).Logger() - result, err := mon.checkHealth() + result, err := mon.checkHealth(mon.url.Load()) var lastStatus types.HealthStatus switch { diff --git a/internal/watcher/health/monitor/monitor_test.go b/internal/watcher/health/monitor/monitor_test.go index db4f1415..ad2d3e06 100644 --- a/internal/watcher/health/monitor/monitor_test.go +++ b/internal/watcher/health/monitor/monitor_test.go @@ -31,7 +31,8 @@ func (t *testNotificationTracker) getStats() (up, down int, last string) { func createTestMonitor(config types.HealthCheckConfig, checkFunc HealthCheckFunc) (*monitor, *testNotificationTracker) { testURL, _ := url.Parse("http://localhost:8080") - mon := newMonitor(testURL, config, checkFunc) + var mon monitor + mon.init(testURL, config, checkFunc) // Override notification functions to track calls instead of actually notifying tracker := &testNotificationTracker{} @@ -52,7 +53,7 @@ func createTestMonitor(config types.HealthCheckConfig, checkFunc HealthCheckFunc } } - return mon, tracker + return &mon, tracker } func TestNotification_ImmediateNotifyAfterZero(t *testing.T) { @@ -62,17 +63,17 @@ func TestNotification_ImmediateNotifyAfterZero(t *testing.T) { Retries: -1, // Immediate notification } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil }) // Start with healthy service - result, err := mon.checkHealth() + result, err := mon.checkHealth(nil) require.NoError(t, err) require.True(t, result.Healthy) // Set to unhealthy - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -97,7 +98,7 @@ func TestNotification_WithNotifyAfterThreshold(t *testing.T) { Retries: 2, // Notify after 2 consecutive failures } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil }) @@ -105,7 +106,7 @@ func TestNotification_WithNotifyAfterThreshold(t *testing.T) { mon.status.Store(types.StatusHealthy) // Set to unhealthy - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -136,7 +137,7 @@ func TestNotification_ServiceRecoversBeforeThreshold(t *testing.T) { Retries: 3, // Notify after 3 consecutive failures } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil }) @@ -144,7 +145,7 @@ func TestNotification_ServiceRecoversBeforeThreshold(t *testing.T) { mon.status.Store(types.StatusHealthy) // Set to unhealthy - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -162,7 +163,7 @@ func TestNotification_ServiceRecoversBeforeThreshold(t *testing.T) { require.Equal(t, 0, up) // Service recovers before third failure - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil } @@ -185,7 +186,7 @@ func TestNotification_ConsecutiveFailureReset(t *testing.T) { Retries: 2, // Notify after 2 consecutive failures } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil }) @@ -193,7 +194,7 @@ func TestNotification_ConsecutiveFailureReset(t *testing.T) { mon.status.Store(types.StatusHealthy) // Set to unhealthy - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -202,7 +203,7 @@ func TestNotification_ConsecutiveFailureReset(t *testing.T) { require.NoError(t, err) // Recover briefly - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil } @@ -215,7 +216,7 @@ func TestNotification_ConsecutiveFailureReset(t *testing.T) { require.Equal(t, 1, up) // Go down again - consecutive counter should start from 0 - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -246,7 +247,7 @@ func TestNotification_ContextCancellation(t *testing.T) { Retries: 1, } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true}, nil }) @@ -256,7 +257,7 @@ func TestNotification_ContextCancellation(t *testing.T) { // Start healthy, then go unhealthy mon.status.Store(types.StatusHealthy) - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil } @@ -285,7 +286,7 @@ func TestImmediateUpNotification(t *testing.T) { Retries: 2, // NotifyAfter should not affect up notifications } - mon, tracker := createTestMonitor(config, func() (types.HealthCheckResult, error) { + mon, tracker := createTestMonitor(config, func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: false}, nil }) @@ -293,7 +294,7 @@ func TestImmediateUpNotification(t *testing.T) { mon.status.Store(types.StatusUnhealthy) // Set to healthy - mon.checkHealth = func() (types.HealthCheckResult, error) { + mon.checkHealth = func(u *url.URL) (types.HealthCheckResult, error) { return types.HealthCheckResult{Healthy: true, Latency: 50 * time.Millisecond}, nil } diff --git a/internal/watcher/health/monitor/new.go b/internal/watcher/health/monitor/new.go new file mode 100644 index 00000000..9715cbf8 --- /dev/null +++ b/internal/watcher/health/monitor/new.go @@ -0,0 +1,96 @@ +package monitor + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "github.com/rs/zerolog/log" + "github.com/yusing/godoxy/internal/agentpool" + "github.com/yusing/godoxy/internal/docker" + "github.com/yusing/godoxy/internal/types" + healthcheck "github.com/yusing/godoxy/internal/watcher/health/check" +) + +type Result = types.HealthCheckResult +type Monitor = types.HealthMonCheck + +func NewHTTPHealthMonitor(config types.HealthCheckConfig, u *url.URL) Monitor { + var method string + if config.UseGet { + method = http.MethodGet + } else { + method = http.MethodHead + } + + var mon monitor + mon.init(u, config, func(u *url.URL) (result Result, err error) { + if u.Scheme == "h2c" { + return healthcheck.H2C(mon.task.Context(), u, method, config.Path, config.Timeout) + } + return healthcheck.HTTP(u, method, config.Path, config.Timeout) + }) + return &mon +} + +func NewFileServerHealthMonitor(config types.HealthCheckConfig, path string) Monitor { + var mon monitor + mon.init(&url.URL{Scheme: "file", Host: path}, config, func(u *url.URL) (result Result, err error) { + return healthcheck.FileServer(path) + }) + return &mon +} + +func NewStreamHealthMonitor(config types.HealthCheckConfig, targetUrl *url.URL) Monitor { + var mon monitor + mon.init(targetUrl, config, func(u *url.URL) (result Result, err error) { + return healthcheck.Stream(mon.task.Context(), u, config.Timeout) + }) + return &mon +} + +func NewDockerHealthMonitor(config types.HealthCheckConfig, client *docker.SharedClient, containerId string, fallback Monitor) Monitor { + state := healthcheck.NewDockerHealthcheckState(client, containerId) + displayURL := &url.URL{ // only for display purposes, no actual request is made + Scheme: "docker", + Host: client.DaemonHost(), + Path: "/containers/" + containerId + "/json", + } + logger := log.With().Str("host", client.DaemonHost()).Str("container_id", containerId).Logger() + + var mon monitor + mon.init(displayURL, config, func(u *url.URL) (result Result, err error) { + result, err = healthcheck.Docker(mon.task.Context(), state, containerId, config.Timeout) + if err != nil { + logger.Err(err).Msg("docker health check failed, using fallback") + return fallback.CheckHealth() + } + return result, nil + }) + return &mon +} + +func NewAgentProxiedMonitor(config types.HealthCheckConfig, agent *agentpool.Agent, targetUrl *url.URL) Monitor { + var mon monitor + mon.init(targetUrl, config, func(u *url.URL) (result Result, err error) { + return CheckHealthAgentProxied(agent, config.Timeout, targetUrl) + }) + return &mon +} + +func CheckHealthAgentProxied(agent *agentpool.Agent, timeout time.Duration, targetUrl *url.URL) (Result, error) { + query := url.Values{ + "scheme": {targetUrl.Scheme}, + "host": {targetUrl.Host}, + "path": {targetUrl.Path}, + "timeout": {fmt.Sprintf("%d", timeout.Milliseconds())}, + } + resp, err := agent.DoHealthCheck(timeout, query.Encode()) + result := Result{ + Healthy: resp.Healthy, + Detail: resp.Detail, + Latency: resp.Latency, + } + return result, err +} From 581503e160adcf2ee61291635f1499547bc78986 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 15:08:02 +0800 Subject: [PATCH 19/51] refactor: move internal/watcher/health to internal/health --- agent/pkg/handler/check_health.go | 2 +- internal/autocert/setup_test.go | 18 +++++++++--------- internal/{watcher => }/health/check/docker.go | 0 .../{watcher => }/health/check/fileserver.go | 0 internal/{watcher => }/health/check/http.go | 0 internal/{watcher => }/health/check/stream.go | 0 .../{watcher => }/health/monitor/last_seen.go | 0 .../{watcher => }/health/monitor/monitor.go | 0 .../health/monitor/monitor_test.go | 0 internal/{watcher => }/health/monitor/new.go | 2 +- internal/idlewatcher/watcher.go | 2 +- internal/route/fileserver.go | 2 +- internal/route/reverse_proxy.go | 2 +- internal/route/route.go | 2 +- internal/route/stream.go | 2 +- 15 files changed, 16 insertions(+), 16 deletions(-) rename internal/{watcher => }/health/check/docker.go (100%) rename internal/{watcher => }/health/check/fileserver.go (100%) rename internal/{watcher => }/health/check/http.go (100%) rename internal/{watcher => }/health/check/stream.go (100%) rename internal/{watcher => }/health/monitor/last_seen.go (100%) rename internal/{watcher => }/health/monitor/monitor.go (100%) rename internal/{watcher => }/health/monitor/monitor_test.go (100%) rename internal/{watcher => }/health/monitor/new.go (97%) diff --git a/agent/pkg/handler/check_health.go b/agent/pkg/handler/check_health.go index d511e7de..848167b5 100644 --- a/agent/pkg/handler/check_health.go +++ b/agent/pkg/handler/check_health.go @@ -9,8 +9,8 @@ import ( "time" "github.com/bytedance/sonic" + healthcheck "github.com/yusing/godoxy/internal/health/check" "github.com/yusing/godoxy/internal/types" - healthcheck "github.com/yusing/godoxy/internal/watcher/health/check" ) func CheckHealth(w http.ResponseWriter, r *http.Request) { diff --git a/internal/autocert/setup_test.go b/internal/autocert/setup_test.go index 335c2e57..39cb12fb 100644 --- a/internal/autocert/setup_test.go +++ b/internal/autocert/setup_test.go @@ -50,16 +50,16 @@ extra: require.Equal(t, "certs/extra.crt", merged0.CertPath) require.Equal(t, "certs/extra.key", merged0.KeyPath) // Inherited fields from main config: - require.Equal(t, "test@example.com", merged0.Email) // inherited - require.Equal(t, "custom", merged0.Provider) // inherited - require.Equal(t, []string{"example.com"}, merged0.Domains) // inherited + require.Equal(t, "test@example.com", merged0.Email) // inherited + require.Equal(t, "custom", merged0.Provider) // inherited + require.Equal(t, []string{"example.com"}, merged0.Domains) // inherited require.Equal(t, "https://ca.example.com:9000/acme/acme/directory", merged0.CADirURL) // inherited - require.Equal(t, map[string]strutils.Redacted{"key": "value"}, merged0.Options) // inherited - require.Equal(t, []string{"8.8.8.8"}, merged0.Resolvers) // inherited - require.Equal(t, []string{"ca.crt"}, merged0.CACerts) // inherited - require.Equal(t, "eabKid", merged0.EABKid) // inherited - require.Equal(t, "eabHmac", merged0.EABHmac) // inherited - require.Equal(t, cfg.HTTPClient, merged0.HTTPClient) // inherited + require.Equal(t, map[string]strutils.Redacted{"key": "value"}, merged0.Options) // inherited + require.Equal(t, []string{"8.8.8.8"}, merged0.Resolvers) // inherited + require.Equal(t, []string{"ca.crt"}, merged0.CACerts) // inherited + require.Equal(t, "eabKid", merged0.EABKid) // inherited + require.Equal(t, "eabHmac", merged0.EABHmac) // inherited + require.Equal(t, cfg.HTTPClient, merged0.HTTPClient) // inherited require.Nil(t, merged0.Extra) // Test: extra[1] overrides some fields, and inherits others. diff --git a/internal/watcher/health/check/docker.go b/internal/health/check/docker.go similarity index 100% rename from internal/watcher/health/check/docker.go rename to internal/health/check/docker.go diff --git a/internal/watcher/health/check/fileserver.go b/internal/health/check/fileserver.go similarity index 100% rename from internal/watcher/health/check/fileserver.go rename to internal/health/check/fileserver.go diff --git a/internal/watcher/health/check/http.go b/internal/health/check/http.go similarity index 100% rename from internal/watcher/health/check/http.go rename to internal/health/check/http.go diff --git a/internal/watcher/health/check/stream.go b/internal/health/check/stream.go similarity index 100% rename from internal/watcher/health/check/stream.go rename to internal/health/check/stream.go diff --git a/internal/watcher/health/monitor/last_seen.go b/internal/health/monitor/last_seen.go similarity index 100% rename from internal/watcher/health/monitor/last_seen.go rename to internal/health/monitor/last_seen.go diff --git a/internal/watcher/health/monitor/monitor.go b/internal/health/monitor/monitor.go similarity index 100% rename from internal/watcher/health/monitor/monitor.go rename to internal/health/monitor/monitor.go diff --git a/internal/watcher/health/monitor/monitor_test.go b/internal/health/monitor/monitor_test.go similarity index 100% rename from internal/watcher/health/monitor/monitor_test.go rename to internal/health/monitor/monitor_test.go diff --git a/internal/watcher/health/monitor/new.go b/internal/health/monitor/new.go similarity index 97% rename from internal/watcher/health/monitor/new.go rename to internal/health/monitor/new.go index 9715cbf8..3344c40a 100644 --- a/internal/watcher/health/monitor/new.go +++ b/internal/health/monitor/new.go @@ -9,8 +9,8 @@ import ( "github.com/rs/zerolog/log" "github.com/yusing/godoxy/internal/agentpool" "github.com/yusing/godoxy/internal/docker" + healthcheck "github.com/yusing/godoxy/internal/health/check" "github.com/yusing/godoxy/internal/types" - healthcheck "github.com/yusing/godoxy/internal/watcher/health/check" ) type Result = types.HealthCheckResult diff --git a/internal/idlewatcher/watcher.go b/internal/idlewatcher/watcher.go index de19e3fe..2515806c 100644 --- a/internal/idlewatcher/watcher.go +++ b/internal/idlewatcher/watcher.go @@ -14,13 +14,13 @@ import ( "github.com/yusing/ds/ordered" config "github.com/yusing/godoxy/internal/config/types" "github.com/yusing/godoxy/internal/docker" + "github.com/yusing/godoxy/internal/health/monitor" "github.com/yusing/godoxy/internal/idlewatcher/provider" idlewatcher "github.com/yusing/godoxy/internal/idlewatcher/types" nettypes "github.com/yusing/godoxy/internal/net/types" "github.com/yusing/godoxy/internal/route/routes" "github.com/yusing/godoxy/internal/types" "github.com/yusing/godoxy/internal/watcher/events" - "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/http/reverseproxy" strutils "github.com/yusing/goutils/strings" diff --git a/internal/route/fileserver.go b/internal/route/fileserver.go index b6a97aea..b7b2f4ab 100644 --- a/internal/route/fileserver.go +++ b/internal/route/fileserver.go @@ -7,12 +7,12 @@ import ( "path/filepath" config "github.com/yusing/godoxy/internal/config/types" + "github.com/yusing/godoxy/internal/health/monitor" "github.com/yusing/godoxy/internal/logging/accesslog" gphttp "github.com/yusing/godoxy/internal/net/gphttp" "github.com/yusing/godoxy/internal/net/gphttp/middleware" "github.com/yusing/godoxy/internal/route/routes" "github.com/yusing/godoxy/internal/types" - "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/task" ) diff --git a/internal/route/reverse_proxy.go b/internal/route/reverse_proxy.go index 4bc37866..08225aa5 100755 --- a/internal/route/reverse_proxy.go +++ b/internal/route/reverse_proxy.go @@ -7,6 +7,7 @@ import ( "github.com/yusing/godoxy/agent/pkg/agent" "github.com/yusing/godoxy/agent/pkg/agentproxy" config "github.com/yusing/godoxy/internal/config/types" + "github.com/yusing/godoxy/internal/health/monitor" "github.com/yusing/godoxy/internal/idlewatcher" "github.com/yusing/godoxy/internal/logging/accesslog" gphttp "github.com/yusing/godoxy/internal/net/gphttp" @@ -16,7 +17,6 @@ import ( "github.com/yusing/godoxy/internal/route/routes" route "github.com/yusing/godoxy/internal/route/types" "github.com/yusing/godoxy/internal/types" - "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/http/reverseproxy" "github.com/yusing/goutils/task" diff --git a/internal/route/route.go b/internal/route/route.go index 3d48f4e6..52f93301 100644 --- a/internal/route/route.go +++ b/internal/route/route.go @@ -18,6 +18,7 @@ import ( "github.com/yusing/godoxy/internal/agentpool" config "github.com/yusing/godoxy/internal/config/types" "github.com/yusing/godoxy/internal/docker" + "github.com/yusing/godoxy/internal/health/monitor" "github.com/yusing/godoxy/internal/homepage" homepagecfg "github.com/yusing/godoxy/internal/homepage/types" netutils "github.com/yusing/godoxy/internal/net" @@ -25,7 +26,6 @@ import ( "github.com/yusing/godoxy/internal/proxmox" "github.com/yusing/godoxy/internal/serialization" "github.com/yusing/godoxy/internal/types" - "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" strutils "github.com/yusing/goutils/strings" "github.com/yusing/goutils/task" diff --git a/internal/route/stream.go b/internal/route/stream.go index f8dac179..f337ff16 100755 --- a/internal/route/stream.go +++ b/internal/route/stream.go @@ -8,12 +8,12 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/yusing/godoxy/internal/health/monitor" "github.com/yusing/godoxy/internal/idlewatcher" nettypes "github.com/yusing/godoxy/internal/net/types" "github.com/yusing/godoxy/internal/route/routes" "github.com/yusing/godoxy/internal/route/stream" "github.com/yusing/godoxy/internal/types" - "github.com/yusing/godoxy/internal/watcher/health/monitor" gperr "github.com/yusing/goutils/errs" "github.com/yusing/goutils/task" ) From 7385761bdfdd38f99adb2a005020b8cad3b19f7c Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 18:14:14 +0800 Subject: [PATCH 20/51] fix(health): correct context handling, move NewMonitor, and improve docker health check errors - Correct BaseContext nil check in Context() method - Move NewMonitor from monitor.go to new.go - Export ErrDockerHealthCheckFailedTooManyTimes and add ErrDockerHealthCheckNotAvailable - Return ErrDockerHealthCheckNotAvailable when container has no health check configured - Only log first docker health check failure and skip logging for ErrDockerHealthCheckNotAvailable - Use mon.Context() instead of mon.task.Context() to avoid nil panic --- internal/health/check/docker.go | 9 +++--- internal/health/monitor/monitor.go | 49 +++++------------------------ internal/health/monitor/new.go | 50 +++++++++++++++++++++++++++--- 3 files changed, 58 insertions(+), 50 deletions(-) diff --git a/internal/health/check/docker.go b/internal/health/check/docker.go index 80567240..49462827 100644 --- a/internal/health/check/docker.go +++ b/internal/health/check/docker.go @@ -23,7 +23,8 @@ type DockerHealthcheckState struct { const dockerFailuresThreshold = 3 -var errDockerHealthCheckFailedTooManyTimes = errors.New("docker health check failed too many times") +var ErrDockerHealthCheckFailedTooManyTimes = errors.New("docker health check failed too many times") +var ErrDockerHealthCheckNotAvailable = errors.New("docker health check not available") func NewDockerHealthcheckState(client *docker.SharedClient, containerId string) *DockerHealthcheckState { client.InterceptHTTPClient(interceptDockerInspectResponse) @@ -36,7 +37,7 @@ func NewDockerHealthcheckState(client *docker.SharedClient, containerId string) func Docker(ctx context.Context, state *DockerHealthcheckState, containerId string, timeout time.Duration) (types.HealthCheckResult, error) { if state.numDockerFailures > dockerFailuresThreshold { - return types.HealthCheckResult{}, errDockerHealthCheckFailedTooManyTimes + return types.HealthCheckResult{}, ErrDockerHealthCheckFailedTooManyTimes } ctx, cancel := context.WithTimeout(ctx, timeout) @@ -76,9 +77,9 @@ func Docker(ctx context.Context, state *DockerHealthcheckState, containerId stri health := containerState.Health if health == nil { - // no health check from docker, directly use fallback + // no health check from docker, return error to trigger fallback state.numDockerFailures = dockerFailuresThreshold + 1 - return types.HealthCheckResult{}, errDockerHealthCheckFailedTooManyTimes + return types.HealthCheckResult{}, ErrDockerHealthCheckNotAvailable } state.numDockerFailures = 0 diff --git a/internal/health/monitor/monitor.go b/internal/health/monitor/monitor.go index 5f56ec40..64bd4f62 100644 --- a/internal/health/monitor/monitor.go +++ b/internal/health/monitor/monitor.go @@ -11,7 +11,6 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" config "github.com/yusing/godoxy/internal/config/types" - "github.com/yusing/godoxy/internal/docker" "github.com/yusing/godoxy/internal/notif" "github.com/yusing/godoxy/internal/types" gperr "github.com/yusing/goutils/errs" @@ -43,38 +42,6 @@ type ( var ErrNegativeInterval = gperr.New("negative interval") -func NewMonitor(r types.Route) types.HealthMonCheck { - target := &r.TargetURL().URL - - var mon types.HealthMonCheck - if r.IsAgent() { - mon = NewAgentProxiedMonitor(r.HealthCheckConfig(), r.GetAgent(), target) - } else { - switch r := r.(type) { - case types.ReverseProxyRoute: - mon = NewHTTPHealthMonitor(r.HealthCheckConfig(), target) - case types.FileServerRoute: - mon = NewFileServerHealthMonitor(r.HealthCheckConfig(), r.RootPath()) - case types.StreamRoute: - mon = NewStreamHealthMonitor(r.HealthCheckConfig(), target) - default: - log.Panic().Msgf("unexpected route type: %T", r) - } - } - if r.IsDocker() { - cont := r.ContainerInfo() - client, err := docker.NewClient(cont.DockerCfg, true) - if err != nil { - return mon - } - r.Task().OnCancel("close_docker_client", client.Close) - - fallback := mon - return NewDockerHealthMonitor(r.HealthCheckConfig(), client, cont.ContainerID, fallback) - } - return mon -} - func (mon *monitor) init(u *url.URL, cfg types.HealthCheckConfig, healthCheckFunc HealthCheckFunc) *monitor { if state := config.WorkingState.Load(); state != nil { cfg.ApplyDefaults(state.Value().Defaults.HealthCheck) @@ -96,16 +63,14 @@ func (mon *monitor) init(u *url.URL, cfg types.HealthCheckConfig, healthCheckFun return nil } -func (mon *monitor) ContextWithTimeout(cause string) (ctx context.Context, cancel context.CancelFunc) { - switch { - case mon.config.BaseContext != nil: - ctx = mon.config.BaseContext() - case mon.task != nil: - ctx = mon.task.Context() - default: - ctx = context.Background() +func (mon *monitor) Context() context.Context { + if mon.config.BaseContext != nil { + return mon.config.BaseContext() } - return context.WithTimeoutCause(ctx, mon.config.Timeout, gperr.New(cause)) + if mon.task != nil { + return mon.task.Context() + } + return context.Background() } func (mon *monitor) CheckHealth() (types.HealthCheckResult, error) { diff --git a/internal/health/monitor/new.go b/internal/health/monitor/new.go index 3344c40a..d7e99eb5 100644 --- a/internal/health/monitor/new.go +++ b/internal/health/monitor/new.go @@ -1,6 +1,7 @@ package monitor import ( + "errors" "fmt" "net/http" "net/url" @@ -16,6 +17,41 @@ import ( type Result = types.HealthCheckResult type Monitor = types.HealthMonCheck +// NewMonitor creates a health monitor based on the route type and configuration. +// +// See internal/health/monitor/README.md for detailed health check flow and conditions. +func NewMonitor(r types.Route) Monitor { + target := &r.TargetURL().URL + + var mon Monitor + if r.IsAgent() { + mon = NewAgentProxiedMonitor(r.HealthCheckConfig(), r.GetAgent(), target) + } else { + switch r := r.(type) { + case types.ReverseProxyRoute: + mon = NewHTTPHealthMonitor(r.HealthCheckConfig(), target) + case types.FileServerRoute: + mon = NewFileServerHealthMonitor(r.HealthCheckConfig(), r.RootPath()) + case types.StreamRoute: + mon = NewStreamHealthMonitor(r.HealthCheckConfig(), target) + default: + log.Panic().Msgf("unexpected route type: %T", r) + } + } + if r.IsDocker() { + cont := r.ContainerInfo() + client, err := docker.NewClient(cont.DockerCfg, true) + if err != nil { + return mon + } + r.Task().OnCancel("close_docker_client", client.Close) + + fallback := mon + return NewDockerHealthMonitor(r.HealthCheckConfig(), client, cont.ContainerID, fallback) + } + return mon +} + func NewHTTPHealthMonitor(config types.HealthCheckConfig, u *url.URL) Monitor { var method string if config.UseGet { @@ -27,7 +63,7 @@ func NewHTTPHealthMonitor(config types.HealthCheckConfig, u *url.URL) Monitor { var mon monitor mon.init(u, config, func(u *url.URL) (result Result, err error) { if u.Scheme == "h2c" { - return healthcheck.H2C(mon.task.Context(), u, method, config.Path, config.Timeout) + return healthcheck.H2C(mon.Context(), u, method, config.Path, config.Timeout) } return healthcheck.HTTP(u, method, config.Path, config.Timeout) }) @@ -45,7 +81,7 @@ func NewFileServerHealthMonitor(config types.HealthCheckConfig, path string) Mon func NewStreamHealthMonitor(config types.HealthCheckConfig, targetUrl *url.URL) Monitor { var mon monitor mon.init(targetUrl, config, func(u *url.URL) (result Result, err error) { - return healthcheck.Stream(mon.task.Context(), u, config.Timeout) + return healthcheck.Stream(mon.Context(), u, config.Timeout) }) return &mon } @@ -58,12 +94,18 @@ func NewDockerHealthMonitor(config types.HealthCheckConfig, client *docker.Share Path: "/containers/" + containerId + "/json", } logger := log.With().Str("host", client.DaemonHost()).Str("container_id", containerId).Logger() + isFirstFailure := true var mon monitor mon.init(displayURL, config, func(u *url.URL) (result Result, err error) { - result, err = healthcheck.Docker(mon.task.Context(), state, containerId, config.Timeout) + result, err = healthcheck.Docker(mon.Context(), state, containerId, config.Timeout) if err != nil { - logger.Err(err).Msg("docker health check failed, using fallback") + if isFirstFailure { + isFirstFailure = false + if !errors.Is(err, healthcheck.ErrDockerHealthCheckNotAvailable) { + logger.Err(err).Msg("docker health check failed, using fallback") + } + } return fallback.CheckHealth() } return result, nil From 7556a067164d1cc796c0f775827bd23a9adff643 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 18:17:06 +0800 Subject: [PATCH 21/51] feat(scriptsi): add script to sync implementation docs with wiki - Introduced a new `update-wiki` script to automate the synchronization of implementation documentation from the repository to the wiki. - Added necessary configuration files including `package.json`, `tsconfig.json`, and `.gitignore` for the new script. - Updated the Makefile to include a target for running the `update-wiki` script. --- Makefile | 6 +- scripts/update-wiki/.gitignore | 1 + scripts/update-wiki/bun.lock | 26 +++++ scripts/update-wiki/main.ts | 175 ++++++++++++++++++++++++++++++ scripts/update-wiki/package.json | 10 ++ scripts/update-wiki/tsconfig.json | 29 +++++ 6 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 scripts/update-wiki/.gitignore create mode 100644 scripts/update-wiki/bun.lock create mode 100644 scripts/update-wiki/main.ts create mode 100644 scripts/update-wiki/package.json create mode 100644 scripts/update-wiki/tsconfig.json diff --git a/Makefile b/Makefile index b5ca0f3e..5c0147ac 100755 --- a/Makefile +++ b/Makefile @@ -171,4 +171,8 @@ gen-api-types: gen-swagger # --disable-throw-on-error bunx --bun swagger-typescript-api generate --sort-types --generate-union-enums --axios --add-readonly --route-types \ --responses -o ${WEBUI_DIR}/lib -n api.ts -p internal/api/v1/docs/swagger.json - bunx --bun prettier --config ${WEBUI_DIR}/.prettierrc --write ${WEBUI_DIR}/lib/api.ts \ No newline at end of file + bunx --bun prettier --config ${WEBUI_DIR}/.prettierrc --write ${WEBUI_DIR}/lib/api.ts + +.PHONY: update-wiki +update-wiki: + DOCS_DIR=${DOCS_DIR} bun --bun scripts/update-wiki/main.ts diff --git a/scripts/update-wiki/.gitignore b/scripts/update-wiki/.gitignore new file mode 100644 index 00000000..b512c09d --- /dev/null +++ b/scripts/update-wiki/.gitignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/scripts/update-wiki/bun.lock b/scripts/update-wiki/bun.lock new file mode 100644 index 00000000..f618b17a --- /dev/null +++ b/scripts/update-wiki/bun.lock @@ -0,0 +1,26 @@ +{ + "lockfileVersion": 1, + "configVersion": 1, + "workspaces": { + "": { + "name": "update-wiki", + "devDependencies": { + "@types/bun": "latest", + }, + "peerDependencies": { + "typescript": "^5", + }, + }, + }, + "packages": { + "@types/bun": ["@types/bun@1.3.5", "", { "dependencies": { "bun-types": "1.3.5" } }, "sha512-RnygCqNrd3srIPEWBd5LFeUYG7plCoH2Yw9WaZGyNmdTEei+gWaHqydbaIRkIkcbXwhBT94q78QljxN0Sk838w=="], + + "@types/node": ["@types/node@25.0.3", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA=="], + + "bun-types": ["bun-types@1.3.5", "", { "dependencies": { "@types/node": "*" } }, "sha512-inmAYe2PFLs0SUbFOWSVD24sg1jFlMPxOjOSSCYqUgn4Hsc3rDc7dFvfVYjFPNHtov6kgUeulV4SxbuIV/stPw=="], + + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], + } +} diff --git a/scripts/update-wiki/main.ts b/scripts/update-wiki/main.ts new file mode 100644 index 00000000..09918414 --- /dev/null +++ b/scripts/update-wiki/main.ts @@ -0,0 +1,175 @@ +import { Glob } from "bun"; +import { linkSync } from "fs"; +import { mkdir, readdir, readFile, rm, writeFile } from "fs/promises"; +import path from "path"; + +type ImplDoc = { + /** Directory path relative to this repo, e.g. "internal/health/check" */ + pkgPath: string; + /** File name in wiki `src/impl/`, e.g. "internal-health-check.md" */ + docFileName: string; + /** Absolute source README path */ + srcPathAbs: string; + /** Absolute destination doc path */ + dstPathAbs: string; +}; + +const START_MARKER = "// GENERATED-IMPL-SIDEBAR-START"; +const END_MARKER = "// GENERATED-IMPL-SIDEBAR-END"; + +function escapeRegex(s: string) { + return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); +} + +function escapeSingleQuotedTs(s: string) { + return s.replace(/\\/g, "\\\\").replace(/'/g, "\\'"); +} + +function sanitizeFileStemFromPkgPath(pkgPath: string) { + // Convert a package path into a stable filename. + // Example: "internal/go-oidc/example" -> "internal-go-oidc-example" + // Keep it readable and unique (uses full path). + const parts = pkgPath + .split("/") + .filter(Boolean) + .map((p) => p.replace(/[^A-Za-z0-9._-]+/g, "-")); + const joined = parts.join("-"); + return joined.replace(/-+/g, "-").replace(/^-|-$/g, ""); +} + +async function listRepoReadmes(repoRootAbs: string): Promise { + const glob = new Glob("**/README.md"); + const readmes: string[] = []; + + for await (const rel of glob.scan({ + cwd: repoRootAbs, + onlyFiles: true, + dot: false, + })) { + // Bun returns POSIX-style rel paths. + if (rel === "README.md") continue; // exclude root README + if (rel.startsWith(".git/") || rel.includes("/.git/")) continue; + if (rel.startsWith("node_modules/") || rel.includes("/node_modules/")) + continue; + if (rel.startsWith("internal/go-oidc/")) continue; + if (rel.startsWith("internal/gopsutil/")) continue; + readmes.push(rel); + } + + // Deterministic order. + readmes.sort((a, b) => a.localeCompare(b)); + return readmes; +} + +async function ensureHardLink(srcAbs: string, dstAbs: string) { + await mkdir(path.dirname(dstAbs), { recursive: true }); + await rm(dstAbs, { force: true }); + // Prefer sync for better error surfaces in Bun on some platforms. + linkSync(srcAbs, dstAbs); +} + +async function syncImplDocs( + repoRootAbs: string, + wikiRootAbs: string +): Promise { + const implDirAbs = path.join(wikiRootAbs, "src", "impl"); + await mkdir(implDirAbs, { recursive: true }); + + const readmes = await listRepoReadmes(repoRootAbs); + const docs: ImplDoc[] = []; + const expectedFileNames = new Set(); + + for (const readmeRel of readmes) { + const pkgPath = path.posix.dirname(readmeRel); + if (!pkgPath || pkgPath === ".") continue; + + const docStem = sanitizeFileStemFromPkgPath(pkgPath); + if (!docStem) continue; + const docFileName = `${docStem}.md`; + + const srcPathAbs = path.join(repoRootAbs, readmeRel); + const dstPathAbs = path.join(implDirAbs, docFileName); + + await ensureHardLink(srcPathAbs, dstPathAbs); + + docs.push({ pkgPath, docFileName, srcPathAbs, dstPathAbs }); + expectedFileNames.add(docFileName); + } + + // Clean orphaned impl docs. + const existing = await readdir(implDirAbs, { withFileTypes: true }); + for (const ent of existing) { + if (!ent.isFile()) continue; + if (!ent.name.endsWith(".md")) continue; + if (expectedFileNames.has(ent.name)) continue; + await rm(path.join(implDirAbs, ent.name), { force: true }); + } + + // Deterministic for sidebar. + docs.sort((a, b) => a.pkgPath.localeCompare(b.pkgPath)); + return docs; +} + +function renderSidebarItems(docs: ImplDoc[], indent: string) { + // link: '/impl/.md' because VitePress `srcDir = "src"`. + if (docs.length === 0) return ""; + return ( + docs + .map((d) => { + const text = escapeSingleQuotedTs(d.pkgPath); + const link = escapeSingleQuotedTs(`/impl/${d.docFileName}`); + return `${indent}{ text: '${text}', link: '${link}' },`; + }) + .join("\n") + "\n" + ); +} + +async function updateVitepressSidebar(wikiRootAbs: string, docs: ImplDoc[]) { + const configPathAbs = path.join(wikiRootAbs, ".vitepress", "config.mts"); + if (!(await Bun.file(configPathAbs).exists())) { + throw new Error(`vitepress config not found: ${configPathAbs}`); + } + + const original = await readFile(configPathAbs, "utf8"); + + // Replace between markers with generated items. + // We keep indentation based on the marker line. + const markerRe = new RegExp( + `(^[\\t ]*)${escapeRegex(START_MARKER)}[\\s\\S]*?\\n\\1${escapeRegex( + END_MARKER + )}`, + "m" + ); + + const m = original.match(markerRe); + if (!m) { + throw new Error( + `sidebar markers not found in ${configPathAbs}. Expected lines: ${START_MARKER} ... ${END_MARKER}` + ); + } + const indent = m[1] ?? ""; + const generated = `${indent}${START_MARKER}\n${renderSidebarItems( + docs, + indent + )}${indent}${END_MARKER}`; + + const updated = original.replace(markerRe, generated); + if (updated !== original) { + await writeFile(configPathAbs, updated); + } +} + +async function main() { + // This script lives in `scripts/update-wiki/`, so repo root is two levels up. + const repoRootAbs = path.resolve(import.meta.dir, "../.."); + + // Required by task, but allow overriding via env for convenience. + const wikiRootAbs = Bun.env.DOCS_DIR + ? path.resolve(repoRootAbs, Bun.env.DOCS_DIR) + : path.resolve(repoRootAbs, "..", "godoxy-webui", "wiki"); + + const docs = await syncImplDocs(repoRootAbs, wikiRootAbs); + await updateVitepressSidebar(wikiRootAbs, docs); +} + +await main(); diff --git a/scripts/update-wiki/package.json b/scripts/update-wiki/package.json new file mode 100644 index 00000000..b5aec0d0 --- /dev/null +++ b/scripts/update-wiki/package.json @@ -0,0 +1,10 @@ +{ + "name": "update-wiki", + "private": true, + "devDependencies": { + "@types/bun": "latest" + }, + "peerDependencies": { + "typescript": "^5" + } +} diff --git a/scripts/update-wiki/tsconfig.json b/scripts/update-wiki/tsconfig.json new file mode 100644 index 00000000..bfa0fead --- /dev/null +++ b/scripts/update-wiki/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + // Environment setup & latest features + "lib": ["ESNext"], + "target": "ESNext", + "module": "Preserve", + "moduleDetection": "force", + "jsx": "react-jsx", + "allowJs": true, + + // Bundler mode + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + + // Best practices + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noImplicitOverride": true, + + // Some stricter flags (disabled by default) + "noUnusedLocals": false, + "noUnusedParameters": false, + "noPropertyAccessFromIndexSignature": false + } +} From 86f35878fb51ae1bb3eac1572e16d9f0a5c30636 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 18:18:17 +0800 Subject: [PATCH 22/51] feat(docs): add health check and monitor packages README; mermaid styling fix --- internal/autocert/README.md | 34 ++--- internal/health/check/README.md | 198 ++++++++++++++++++++++++++++++ internal/health/monitor/README.md | 33 +++++ 3 files changed, 248 insertions(+), 17 deletions(-) create mode 100644 internal/health/check/README.md create mode 100644 internal/health/monitor/README.md diff --git a/internal/autocert/README.md b/internal/autocert/README.md index 1b3f4a22..bc19feb3 100644 --- a/internal/autocert/README.md +++ b/internal/autocert/README.md @@ -71,10 +71,10 @@ flowchart TD T --> V[Update SNI Matcher] V --> G - style E fill:#90EE90 - style I fill:#FFD700 - style N fill:#90EE90 - style U fill:#FFA07A + style E fill:#22553F,color:#fff + style I fill:#8B8000,color:#fff + style N fill:#22553F,color:#fff + style U fill:#84261A,color:#fff ``` ## SNI Matching Flow @@ -97,9 +97,9 @@ flowchart LR F -->|No| G[Return default cert] end - style C fill:#90EE90 - style E fill:#87CEEB - style F fill:#FFD700 + style C fill:#27632A,color:#fff + style E fill:#18597A,color:#fff + style F fill:#836C03,color:#fff ``` ### Suffix Tree Structure @@ -280,7 +280,7 @@ autocert: email: admin@example.com domains: - example.com - - "*.example.com" + - '*.example.com' options: CF_API_TOKEN: your-api-token CF_ZONE_API_TOKEN: your-zone-token @@ -334,13 +334,13 @@ autocert: email: admin@example.com domains: - example.com - - "*.example.com" + - '*.example.com' cert_path: certs/example.com.crt key_path: certs/example.com.key extra: - domains: - api.example.com - - "*.api.example.com" + - '*.api.example.com' cert_path: certs/api.example.com.crt key_path: certs/api.example.com.key provider: cloudflare @@ -358,8 +358,8 @@ flowchart TD C --> D[Build SNI Matcher] D --> E[Register in SNI Tree] - style B fill:#87CEEB - style C fill:#FFD700 + style B fill:#1a2639,color:#fff + style C fill:#423300,color:#fff ``` ## Renewal Scheduling @@ -406,10 +406,10 @@ flowchart TD N --> D - style F fill:#FFD700 - style J fill:#FFD700 - style K fill:#90EE90 - style M fill:#FFA07A + style F fill:#423300,color:#fff + style J fill:#423300,color:#fff + style K fill:#174014,color:#fff + style M fill:#432829,color:#fff ``` **Notifications:** Renewal success/failure triggers system notifications with provider name. @@ -530,7 +530,7 @@ autocert: email: admin@example.com domains: - example.com - - "*.example.com" + - '*.example.com' options: CF_API_TOKEN: ${CF_API_TOKEN} resolvers: diff --git a/internal/health/check/README.md b/internal/health/check/README.md new file mode 100644 index 00000000..21a7d778 --- /dev/null +++ b/internal/health/check/README.md @@ -0,0 +1,198 @@ +# Health Check + +This package provides low-level health check implementations for different protocols and services in GoDoxy. + +## Health Check Types + +### Docker Health Check + +Checks the health status of Docker containers using the Docker API. + +**Flow:** + +```mermaid +flowchart TD + A[Docker Health Check] --> B{Docker Failures > Threshold?} + B -->|yes| C[Return Error: Too Many Failures] + B -->|no| D[Container Inspect API Call] + D --> E{Inspect Successful?} + E -->|no| F[Increment Failure Count] + E -->|yes| G[Parse Container State] + + G --> H{Container Status} + H -->|dead/exited/paused/restarting/removing| I[Unhealthy: Container State] + H -->|created| J[Unhealthy: Not Started] + H -->|running| K{Health Check Configured?} + + K -->|no| L[Return Error: No Health Check] + K -->|yes| M[Check Health Status] + M --> N{Health Status} + N -->|healthy| O[Healthy] + N -->|unhealthy| P[Unhealthy: Last Log Output] + + I --> Q[Reset Failure Count] + J --> Q + O --> Q + P --> Q +``` + +**Key Features:** + +- Intercepts Docker API responses to extract container state +- Tracks failure count with configurable threshold (3 failures) +- Supports containers with and without health check configurations +- Returns detailed error information from Docker health check logs + +### HTTP Health Check + +Performs HTTP/HTTPS health checks using fasthttp for optimal performance. + +**Flow:** + +```mermaid +flowchart TD + A[HTTP Health Check] --> B[Create FastHTTP Request] + B --> C[Set Headers and Method] + C --> D[Execute Request with Timeout] + D --> E{Request Successful?} + + E -->|no| F{Error Type} + F -->|TLS Error| G[Healthy: TLS Error Ignored] + F -->|Other Error| H[Unhealthy: Error Details] + + E -->|yes| I{Status Code} + I -->|5xx| J[Unhealthy: Server Error] + I -->|Other| K[Healthy] + + G --> L[Return Result with Latency] + H --> L + J --> L + K --> L +``` + +**Key Features:** + +- Uses fasthttp for high-performance HTTP requests +- Supports both GET and HEAD methods +- Configurable timeout and path +- Handles TLS certificate verification errors gracefully +- Returns latency measurements + +### H2C Health Check + +Performs HTTP/2 cleartext (h2c) health checks for services that support HTTP/2 without TLS. + +**Flow:** + +```mermaid +flowchart TD + A[H2C Health Check] --> B[Create HTTP/2 Transport] + B --> C[Set AllowHTTP: true] + C --> D[Create HTTP Request] + D --> E[Set Headers and Method] + E --> F[Execute Request with Timeout] + F --> G{Request Successful?} + + G -->|no| H[Unhealthy: Error Details] + G -->|yes| I[Check Status Code] + I --> J{Status Code} + J -->|5xx| K[Unhealthy: Server Error] + J -->|Other| L[Healthy] + + H --> M[Return Result with Latency] + K --> M + L --> M +``` + +**Key Features:** + +- Uses HTTP/2 transport with cleartext support +- Supports both GET and HEAD methods +- Configurable timeout and path +- Returns latency measurements + +### FileServer Health Check + +Checks if a file server root directory exists and is accessible. + +**Flow:** + +```mermaid +flowchart TD + A[FileServer Health Check] --> B[Start Timer] + B --> C[Stat Directory Path] + C --> D{Directory Exists?} + + D -->|no| E[Unhealthy: Path Not Found] + D -->|yes| F[Healthy: Directory Accessible] + D -->|error| G[Return Error] + + E --> H[Return Result with Latency] + F --> H + G --> I[Return Error] +``` + +**Key Features:** + +- Simple directory existence check +- Measures latency of filesystem operation +- Distinguishes between "not found" and other errors +- Returns detailed error information + +### Stream Health Check + +Checks stream endpoint connectivity by attempting to establish a network connection. + +**Flow:** + +```mermaid +flowchart TD + A[Stream Health Check] --> B[Create Dialer] + B --> C[Set Timeout and Fallback Delay] + C --> D[Start Timer] + D --> E[Dial Network Connection] + E --> F{Connection Successful?} + + F -->|no| G{Error Type} + G -->|Connection Errors| H[Unhealthy: Connection Failed] + G -->|Other Error| I[Return Error] + + F -->|yes| J[Close Connection] + J --> K[Healthy: Connection Established] + + H --> L[Return Result with Latency] + K --> L +``` + +**Key Features:** + +- Generic network connection check +- Supports any stream protocol (TCP, UDP, etc.) +- Handles common connection errors gracefully +- Measures connection establishment latency +- Automatically closes connections + +## Common Features + +### Error Handling + +All health checks implement consistent error handling: + +- **Temporary Errors**: Network timeouts, connection failures +- **Permanent Errors**: Invalid configurations, missing resources +- **Graceful Degradation**: Returns health status even when errors occur + +### Performance Monitoring + +- **Latency Measurement**: All checks measure execution time +- **Timeout Support**: Configurable timeouts prevent hanging +- **Resource Cleanup**: Proper cleanup of connections and resources + +### Integration + +These health checks are used by the monitor package to implement route-specific health monitoring: + +- HTTP/HTTPS routes use HTTP health checks +- File server routes use FileServer health checks +- Stream routes use Stream health checks +- Docker containers use Docker health checks with fallbacks diff --git a/internal/health/monitor/README.md b/internal/health/monitor/README.md new file mode 100644 index 00000000..ccd5dc26 --- /dev/null +++ b/internal/health/monitor/README.md @@ -0,0 +1,33 @@ +# Health Monitor + +This package provides health monitoring functionality for different types of routes in GoDoxy. + +## Health Check Flow + +```mermaid +flowchart TD + A[NewMonitor route] --> B{IsAgent route} + B -->|true| C[NewAgentProxiedMonitor] + B -->|false| D{IsDocker route} + D -->|true| E[NewDockerHealthMonitor] + D -->|false| F[Route Type Switch] + + F --> G[HTTP Monitor] + F --> H[FileServer Monitor] + F --> I[Stream Monitor] + + E --> J[Selected Monitor] + + C --> K[Agent Health Check] + G --> L{Scheme h2c?} + L -->|true| M[H2C Health Check] + L -->|false| N[HTTP Health Check] + H --> O[FileServer Health Check] + I --> P[Stream Health Check] + + K --> Q{IsDocker route} + Q -->|true| R[NewDockerHealthMonitor with Agent as Fallback] + Q -->|false| K + + R --> K +``` From 13441286d1083dbc8d9d217c8f085f0f0d9863e6 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 20:31:44 +0800 Subject: [PATCH 23/51] docs(idlewatcher): update README to include loading page and SSE endpoint details - Added information about the loading page (HTML + JS + CSS) and the SSE endpoint for wake events. - Clarified the health monitor implementation and readiness tracking in the architecture overview. - Correct state machine syntax. --- internal/idlewatcher/README.md | 131 +++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 54 deletions(-) diff --git a/internal/idlewatcher/README.md b/internal/idlewatcher/README.md index 3af170be..785d3295 100644 --- a/internal/idlewatcher/README.md +++ b/internal/idlewatcher/README.md @@ -2,6 +2,8 @@ Idlewatcher manages container lifecycle based on idle timeout. When a container is idle for a configured duration, it can be automatically stopped, paused, or killed. When a request comes in, the container is woken up automatically. +Idlewatcher also serves a small loading page (HTML + JS + CSS) and an SSE endpoint under [`internal/idlewatcher/types/paths.go`](internal/idlewatcher/types/paths.go:1) (prefixed with `/$godoxy/`) to provide wake events to browsers. + ## Architecture Overview ```mermaid @@ -36,14 +38,13 @@ graph TB ``` idlewatcher/ -├── cmd # Command execution utilities ├── debug.go # Debug utilities for watcher inspection ├── errors.go # Error types and conversion ├── events.go # Wake event types and broadcasting ├── handle_http.go # HTTP request handling and loading page -├── handle_http_debug.go # Debug HTTP handler (dev only) +├── handle_http_debug.go # Debug HTTP handler (!production builds) ├── handle_stream.go # Stream connection handling -├── health.go # Health monitoring interface +├── health.go # Health monitor implementation + readiness tracking ├── loading_page.go # Loading page HTML/CSS/JS templates ├── state.go # Container state management ├── watcher.go # Core Watcher implementation @@ -51,7 +52,10 @@ idlewatcher/ │ ├── docker.go # Docker container management │ └── proxmox.go # Proxmox LXC management ├── types/ -│ └── provider.go # Provider interface definition +│ ├── container_status.go # ContainerStatus enum +│ ├── paths.go # Loading page + SSE paths +│ ├── provider.go # Provider interface definition +│ └── waker.go # Waker interface (http + stream + health) └── html/ ├── loading_page.html # Loading page template ├── style.css # Loading page styles @@ -76,6 +80,9 @@ classDiagram -healthTicker: *time.Ticker -state: synk.Value~*containerState~ -provider: synk.Value~Provider~ + -readyNotifyCh: chan struct{} + -eventChs: *xsync.Map~chan *WakeEvent, struct{}~ + -eventHistory: []WakeEvent -dependsOn: []*dependency } @@ -96,6 +103,11 @@ classDiagram Watcher --> dependency : depends on ``` +Package-level helpers: + +- `watcherMap` is a global registry of watchers keyed by [`types.IdlewatcherConfig.Key()`](internal/types/idlewatcher.go:60), guarded by `watcherMapMu`. +- `singleFlight` is a global `singleflight.Group` keyed by container name to prevent duplicate wake calls. + ### Provider Interface Abstraction for different container backends: @@ -135,16 +147,26 @@ classDiagram ```mermaid stateDiagram-v2 - [*] --> Napping: Container stopped/paused - Napping --> Waking: Wake request - Waking --> Running: Container started - Running --> Starting: Container is running but not healthy - Starting --> Ready: Health check passes - Ready --> Napping: Idle timeout - Ready --> Error check fails: Health - Error --> Waking: Retry wake + [*] --> Napping: status=stopped|paused + + Napping --> Starting: provider start/unpause event + Starting --> Ready: health check passes + Starting --> Error: health check error / startup timeout + + Ready --> Napping: idle timeout (pause/stop/kill) + Ready --> Error: health check error + + Error --> Napping: provider stop/pause event + Error --> Starting: provider start/unpause event ``` +Implementation notes: + +- `Starting` is represented by `containerState{status: running, ready: false, startedAt: non-zero}`. +- `Ready` is represented by `containerState{status: running, ready: true}`. +- `Error` is represented by `containerState{status: error, err: non-nil}`. +- State is updated primarily from provider events in [`(*Watcher).watchUntilDestroy()`](internal/idlewatcher/watcher.go:553) and health checks in [`(*Watcher).checkUpdateState()`](internal/idlewatcher/health.go:104). + ## Lifecycle Flow ### Wake Flow (HTTP) @@ -154,34 +176,26 @@ sequenceDiagram participant C as Client participant W as Watcher participant P as Provider - participant H as HealthChecker - participant SSE as SSE Events + participant SSE as SSE (/\$godoxy/wake-events) C->>W: HTTP Request W->>W: resetIdleTimer() + Note over W: Handles /favicon.ico and /\$godoxy/* assets first + alt Container already ready - W->>W: return true (proceed) + W->>C: Reverse-proxy upstream (same request) else - alt No loading page configured - W->>P: ContainerStart() - W->>H: Wait for healthy - H-->>W: Healthy - W->>C: Continue request - else Loading page enabled - W->>P: ContainerStart() - W->>SSE: Send WakeEventStarting - W->>C: Serve loading page - loop Health checks - H->>H: Check health - H-->>W: Not healthy yet - W->>SSE: Send progress - end - H-->>W: Healthy - W->>SSE: Send WakeEventReady - C->>W: SSE connection - W->>SSE: Events streamed - C->>W: Poll/retry request - W->>W: return true (proceed) + W->>W: Wake() (singleflight + deps) + + alt Non-HTML request OR NoLoadingPage=true + W->>C: 100 Continue + W->>W: waitForReady() (readyNotifyCh) + W->>C: Reverse-proxy upstream (same request) + else HTML + loading page + W->>C: Serve loading page (HTML) + C->>SSE: Connect (EventSource) + Note over SSE: Streams history + live wake events + C->>W: Retry original request when WakeEventReady end end ``` @@ -192,8 +206,6 @@ sequenceDiagram sequenceDiagram participant C as Client participant W as Watcher - participant P as Provider - participant H as HealthChecker C->>W: Connect to stream W->>W: preDial hook @@ -201,10 +213,9 @@ sequenceDiagram alt Container ready W->>W: Pass through else - W->>P: ContainerStart() - W->>W: waitStarted() - W->>H: Wait for healthy - H-->>W: Healthy + W->>W: Wake() (singleflight + deps) + W->>W: waitStarted() (wait for route to be started) + W->>W: waitForReady() (readyNotifyCh) W->>C: Stream connected end ``` @@ -293,17 +304,31 @@ classDiagram WakeEvent --> WakeEventType ``` +Notes: + +- The SSE endpoint is [`idlewatcher.WakeEventsPath`](internal/idlewatcher/types/paths.go:3). +- Each SSE subscriber gets a dedicated buffered channel; the watcher also keeps an in-memory `eventHistory` that is sent to new subscribers first. +- `eventHistory` is cleared when the container transitions to napping (stop/pause). + ## State Machine ```mermaid stateDiagram-v2 + Napping --> Starting: provider start/unpause event + Starting --> Ready: Health check passes + Starting --> Error: Health check fails / startup timeout + Error --> Napping: provider stop/pause event + Error --> Starting: provider start/unpause event + Ready --> Napping: Idle timeout + Ready --> Napping: Manual stop + note right of Napping Container is stopped or paused Idle timer stopped end note - note right of Waking - Container is starting + note right of Starting + Container is running but not ready Health checking active Events broadcasted end note @@ -312,13 +337,6 @@ stateDiagram-v2 Container healthy Idle timer running end note - - Napping --> Waking: Wake() - Waking --> Ready: Health check passes - Waking --> Error: Health check fails - Error --> Waking: Retry - Ready --> Napping: Idle timeout - Ready --> Napping: Manual stop ``` ## Key Files @@ -332,11 +350,11 @@ stateDiagram-v2 | `provider/proxmox.go` | Proxmox LXC container operations | | `state.go` | Container state transitions | | `events.go` | Event broadcasting via SSE | -| `health.go` | Health monitor interface implementation | +| `health.go` | Health monitor implementation + readiness tracking | ## Configuration -See `types.IdlewatcherConfig` for configuration options: +See [`types.IdlewatcherConfig`](internal/types/idlewatcher.go:27) for configuration options: - `IdleTimeout`: Duration before container is put to sleep - `StopMethod`: pause, stop, or kill @@ -344,12 +362,17 @@ See `types.IdlewatcherConfig` for configuration options: - `StopTimeout`: Timeout for stop operation - `WakeTimeout`: Timeout for wake operation - `DependsOn`: List of dependent containers -- `StartEndpoint`: Optional endpoint restriction for wake requests +- `StartEndpoint`: Optional HTTP path restriction for wake requests - `NoLoadingPage`: Skip loading page, wait directly +Provider config (exactly one must be set): + +- `Docker`: container id/name + docker connection info +- `Proxmox`: `node` + `vmid` + ## Thread Safety - Uses `synk.Value` for atomic state updates - Uses `xsync.Map` for SSE subscriber management -- Uses `sync.RWMutex` for watcher map access +- Uses `sync.RWMutex` for watcher map (`watcherMapMu`) and SSE event history (`eventHistoryMu`) - Uses `singleflight.Group` to prevent duplicate wake calls From e9d7edef12b91fc73357e01e7c94df0d1e724025 Mon Sep 17 00:00:00 2001 From: yusing Date: Thu, 8 Jan 2026 23:39:19 +0800 Subject: [PATCH 24/51] docs: add per package README for implementation details (AI generated with human review) --- agent/cmd/README.md | 52 ++ agent/pkg/agent/README.md | 108 +++ agent/pkg/agentproxy/README.md | 122 ++++ agent/pkg/certs/README.md | 131 ++++ agent/pkg/env/README.md | 52 ++ agent/pkg/handler/README.md | 127 ++++ cmd/README.md | 73 +++ internal/acl/README.md | 282 ++++++++ internal/agentpool/README.md | 281 ++++++++ internal/api/v1/README.md | 197 ++++++ .../api/v1/metrics/{upime.go => uptime.go} | 0 internal/auth/README.md | 349 ++++++++++ internal/autocert/README.md | 614 ++++++------------ internal/config/README.md | 316 +++++++++ internal/config/query/README.md | 226 +++++++ internal/dnsproviders/README.md | 257 ++++++++ internal/docker/README.md | 433 ++++++++++++ internal/entrypoint/README.md | 308 +++++++++ internal/health/check/README.md | 353 +++++++--- internal/health/monitor/README.md | 334 +++++++++- internal/homepage/README.md | 358 ++++++++++ .../integrations/qbittorrent/README.md | 227 +++++++ internal/homepage/widgets/README.md | 188 ++++++ internal/idlewatcher/README.md | 569 +++++++--------- internal/idlewatcher/provider/README.md | 219 +++++++ internal/jsonstore/README.md | 364 +++++++++++ internal/logging/README.md | 286 ++------ internal/logging/accesslog/README.md | 493 ++++++++++++++ internal/logging/memlogger/README.md | 330 ++++++++++ internal/maxmind/README.md | 337 ++++++++++ internal/metrics/README.md | 333 +++------- internal/metrics/period/README.md | 470 ++++++++++++++ internal/metrics/systeminfo/README.md | 439 +++++++++++++ internal/metrics/uptime/README.md | 402 ++++++++++++ internal/net/README.md | 144 ++++ internal/net/gphttp/README.md | 146 +++++ internal/net/gphttp/loadbalancer/README.md | 304 +++++++++ internal/net/gphttp/middleware/README.md | 336 ++++++++++ .../net/gphttp/middleware/captcha/README.md | 264 ++++++++ .../net/gphttp/middleware/errorpage/README.md | 301 +++++++++ internal/notif/README.md | 332 ++++++++++ internal/proxmox/README.md | 322 +++++++++ internal/route/README.md | 325 +++++++++ internal/route/provider/README.md | 318 +++++++++ internal/route/routes/README.md | 269 ++++++++ internal/route/rules/README.md | 360 ++++++++++ internal/route/rules/presets/README.md | 202 ++++++ internal/route/stream/README.md | 306 +++++++++ internal/serialization/README.md | 388 +++++------ internal/watcher/README.md | 332 ++++++++++ internal/watcher/events/README.md | 444 +++++++++++++ socket-proxy/cmd/README.md | 54 ++ socket-proxy/pkg/README.md | 100 +++ socket-proxy/pkg/reverseproxy/README.md | 73 +++ 54 files changed, 13431 insertions(+), 1519 deletions(-) create mode 100644 agent/cmd/README.md create mode 100644 agent/pkg/agent/README.md create mode 100644 agent/pkg/agentproxy/README.md create mode 100644 agent/pkg/certs/README.md create mode 100644 agent/pkg/env/README.md create mode 100644 agent/pkg/handler/README.md create mode 100644 cmd/README.md create mode 100644 internal/acl/README.md create mode 100644 internal/agentpool/README.md create mode 100644 internal/api/v1/README.md rename internal/api/v1/metrics/{upime.go => uptime.go} (100%) create mode 100644 internal/auth/README.md create mode 100644 internal/config/README.md create mode 100644 internal/config/query/README.md create mode 100644 internal/dnsproviders/README.md create mode 100644 internal/docker/README.md create mode 100644 internal/entrypoint/README.md create mode 100644 internal/homepage/README.md create mode 100644 internal/homepage/integrations/qbittorrent/README.md create mode 100644 internal/homepage/widgets/README.md create mode 100644 internal/idlewatcher/provider/README.md create mode 100644 internal/jsonstore/README.md create mode 100644 internal/logging/accesslog/README.md create mode 100644 internal/logging/memlogger/README.md create mode 100644 internal/maxmind/README.md create mode 100644 internal/metrics/period/README.md create mode 100644 internal/metrics/systeminfo/README.md create mode 100644 internal/metrics/uptime/README.md create mode 100644 internal/net/README.md create mode 100644 internal/net/gphttp/README.md create mode 100644 internal/net/gphttp/loadbalancer/README.md create mode 100644 internal/net/gphttp/middleware/README.md create mode 100644 internal/net/gphttp/middleware/captcha/README.md create mode 100644 internal/net/gphttp/middleware/errorpage/README.md create mode 100644 internal/notif/README.md create mode 100644 internal/proxmox/README.md create mode 100644 internal/route/README.md create mode 100644 internal/route/provider/README.md create mode 100644 internal/route/routes/README.md create mode 100644 internal/route/rules/README.md create mode 100644 internal/route/rules/presets/README.md create mode 100644 internal/route/stream/README.md create mode 100644 internal/watcher/README.md create mode 100644 internal/watcher/events/README.md create mode 100644 socket-proxy/cmd/README.md create mode 100644 socket-proxy/pkg/README.md create mode 100644 socket-proxy/pkg/reverseproxy/README.md diff --git a/agent/cmd/README.md b/agent/cmd/README.md new file mode 100644 index 00000000..de459b38 --- /dev/null +++ b/agent/cmd/README.md @@ -0,0 +1,52 @@ +# agent/cmd + +The main entry point for the GoDoxy Agent, a secure monitoring and proxy agent that runs alongside Docker containers. + +## Overview + +This package contains the `main.go` entry point for the GoDoxy Agent. The agent is a TLS-enabled server that provides: + +- Secure Docker socket proxying with client certificate authentication +- HTTP proxy capabilities for container traffic +- System metrics collection and monitoring +- Health check endpoints + +## Architecture + +```mermaid +graph TD + A[main] --> B[Logger Init] + A --> C[Load CA Certificate] + A --> D[Load Server Certificate] + A --> E[Log Version Info] + A --> F[Start Agent Server] + A --> G[Start Socket Proxy] + A --> H[Start System Info Poller] + A --> I[Wait Exit] + + F --> F1[TLS with mTLS] + F --> F2[Agent Handler] + G --> G1[Docker Socket Proxy] +``` + +## Main Function Flow + +1. **Logger Setup**: Configures zerolog with console output +1. **Certificate Loading**: Loads CA and server certificates for TLS/mTLS +1. **Version Logging**: Logs agent version and configuration +1. **Agent Server**: Starts the main HTTPS server with agent handlers +1. **Socket Proxy**: Starts Docker socket proxy if configured +1. **System Monitoring**: Starts system info polling +1. **Graceful Shutdown**: Waits for exit signal (3 second timeout) + +## Configuration + +See `agent/pkg/env/README.md` for configuration options. + +## Dependencies + +- `agent/pkg/agent` - Core agent types and constants +- `agent/pkg/env` - Environment configuration +- `agent/pkg/server` - Server implementation +- `socketproxy/pkg` - Docker socket proxy +- `internal/metrics/systeminfo` - System metrics diff --git a/agent/pkg/agent/README.md b/agent/pkg/agent/README.md new file mode 100644 index 00000000..8d9800e4 --- /dev/null +++ b/agent/pkg/agent/README.md @@ -0,0 +1,108 @@ +# Agent Package + +The `agent` package provides the client-side implementation for interacting with GoDoxy agents. It handles agent configuration, secure communication via TLS, and provides utilities for agent deployment and management. + +## Architecture Overview + +```mermaid +graph TD + subgraph GoDoxy Server + AP[Agent Pool] --> AC[AgentConfig] + end + + subgraph Agent Communication + AC -->|HTTPS| AI[Agent Info API] + AC -->|TLS| ST[Stream Tunneling] + end + + subgraph Deployment + G[Generator] --> DC[Docker Compose] + G --> IS[Install Script] + end + + subgraph Security + NA[NewAgent] --> Certs[Certificates] + end +``` + +## File Structure + +| File | Purpose | +| -------------------------------------------------------- | --------------------------------------------------------- | +| [`config.go`](agent/pkg/agent/config.go) | Core configuration, initialization, and API client logic. | +| [`new_agent.go`](agent/pkg/agent/new_agent.go) | Agent creation and certificate generation logic. | +| [`docker_compose.go`](agent/pkg/agent/docker_compose.go) | Generator for agent Docker Compose configurations. | +| [`bare_metal.go`](agent/pkg/agent/bare_metal.go) | Generator for bare metal installation scripts. | +| [`env.go`](agent/pkg/agent/env.go) | Environment configuration types and constants. | +| [`common/`](agent/pkg/agent/common) | Shared constants and utilities for agents. | + +## Core Types + +### [`AgentConfig`](agent/pkg/agent/config.go:29) + +The primary struct used by the GoDoxy server to manage a connection to an agent. It stores the agent's address, metadata, and TLS configuration. + +### [`AgentInfo`](agent/pkg/agent/config.go:45) + +Contains basic metadata about the agent, including its version, name, and container runtime (Docker or Podman). + +### [`PEMPair`](agent/pkg/agent/new_agent.go:53) + +A utility struct for handling PEM-encoded certificate and key pairs, supporting encryption, decryption, and conversion to `tls.Certificate`. + +## Agent Creation and Certificate Management + +### Certificate Generation + +The [`NewAgent`](agent/pkg/agent/new_agent.go:147) function creates a complete certificate infrastructure for an agent: + +- **CA Certificate**: Self-signed root certificate with 1000-year validity. +- **Server Certificate**: For the agent's HTTPS server, signed by the CA. +- **Client Certificate**: For the GoDoxy server to authenticate with the agent. + +All certificates use ECDSA with P-256 curve and SHA-256 signatures. + +### Certificate Security + +- Certificates are encrypted using AES-GCM with a provided encryption key. +- The [`PEMPair`](agent/pkg/agent/new_agent.go:53) struct provides methods for encryption, decryption, and conversion to `tls.Certificate`. +- Base64 encoding is used for certificate storage and transmission. + +## Key Features + +### 1. Secure Communication + +All communication between the GoDoxy server and agents is secured using mutual TLS (mTLS). The [`AgentConfig`](agent/pkg/agent/config.go:29) handles the loading of CA and client certificates to establish secure connections. + +### 2. Agent Discovery and Initialization + +The [`Init`](agent/pkg/agent/config.go:231) and [`InitWithCerts`](agent/pkg/agent/config.go:110) methods allow the server to: + +- Fetch agent metadata (version, name, runtime). +- Verify compatibility between server and agent versions. +- Test support for TCP and UDP stream tunneling. + +### 3. Deployment Generators + +The package provides interfaces and implementations for generating deployment artifacts: + +- **Docker Compose**: Generates a `docker-compose.yml` for running the agent as a container via [`AgentComposeConfig.Generate()`](agent/pkg/agent/docker_compose.go:21). +- **Bare Metal**: Generates a shell script to install and run the agent as a systemd service via [`AgentEnvConfig.Generate()`](agent/pkg/agent/bare_metal.go:27). + +### 4. Fake Docker Host + +The package supports a "fake" Docker host scheme (`agent://`) to identify containers managed by an agent, allowing the GoDoxy server to route requests appropriately. See [`IsDockerHostAgent`](agent/pkg/agent/config.go:90) and [`GetAgentAddrFromDockerHost`](agent/pkg/agent/config.go:94). + +## Usage Example + +```go +cfg := &agent.AgentConfig{} +cfg.Parse("192.168.1.100:8081") + +ctx := context.Background() +if err := cfg.Init(ctx); err != nil { + log.Fatal(err) +} + +fmt.Printf("Connected to agent: %s (Version: %s)\n", cfg.Name, cfg.Version) +``` diff --git a/agent/pkg/agentproxy/README.md b/agent/pkg/agentproxy/README.md new file mode 100644 index 00000000..b95e54e8 --- /dev/null +++ b/agent/pkg/agentproxy/README.md @@ -0,0 +1,122 @@ +# agent/pkg/agentproxy + +Package for configuring HTTP proxy connections through the GoDoxy Agent using HTTP headers. + +## Overview + +This package provides types and functions for parsing and setting agent proxy configuration via HTTP headers. It supports both a modern base64-encoded JSON format and a legacy header-based format for backward compatibility. + +## Architecture + +```mermaid +graph LR + A[HTTP Request] --> B[ConfigFromHeaders] + B --> C{Modern Format?} + C -->|Yes| D[Parse X-Proxy-Config Base64 JSON] + C -->|No| E[Parse Legacy Headers] + D --> F[Config] + E --> F + + F --> G[SetAgentProxyConfigHeaders] + G --> H[Modern Headers] + G --> I[Legacy Headers] +``` + +## Public Types + +### Config + +```go +type Config struct { + Scheme string // Proxy scheme (http or https) + Host string // Proxy host (hostname or hostname:port) + HTTPConfig // Extended HTTP configuration +} +``` + +The `HTTPConfig` embedded type (from `internal/route/types`) includes: + +- `NoTLSVerify` - Skip TLS certificate verification +- `ResponseHeaderTimeout` - Timeout for response headers +- `DisableCompression` - Disable gzip compression + +## Public Functions + +### ConfigFromHeaders + +```go +func ConfigFromHeaders(h http.Header) (Config, error) +``` + +Parses proxy configuration from HTTP request headers. Tries modern format first, falls back to legacy format if not present. + +### proxyConfigFromHeaders + +```go +func proxyConfigFromHeaders(h http.Header) (Config, error) +``` + +Parses the modern base64-encoded JSON format from `X-Proxy-Config` header. + +### proxyConfigFromHeadersLegacy + +```go +func proxyConfigFromHeadersLegacy(h http.Header) Config +``` + +Parses the legacy header format: + +- `X-Proxy-Host` - Proxy host +- `X-Proxy-Https` - Whether to use HTTPS +- `X-Proxy-Skip-Tls-Verify` - Skip TLS verification +- `X-Proxy-Response-Header-Timeout` - Response timeout in seconds + +### SetAgentProxyConfigHeaders + +```go +func (cfg *Config) SetAgentProxyConfigHeaders(h http.Header) +``` + +Sets headers for modern format with base64-encoded JSON config. + +### SetAgentProxyConfigHeadersLegacy + +```go +func (cfg *Config) SetAgentProxyConfigHeadersLegacy(h http.Header) +``` + +Sets headers for legacy format with individual header fields. + +## Header Constants + +Modern headers: + +- `HeaderXProxyScheme` - Proxy scheme +- `HeaderXProxyHost` - Proxy host +- `HeaderXProxyConfig` - Base64-encoded JSON config + +Legacy headers (deprecated): + +- `HeaderXProxyHTTPS` +- `HeaderXProxySkipTLSVerify` +- `HeaderXProxyResponseHeaderTimeout` + +## Usage Example + +```go +// Reading configuration from incoming request headers +func handleRequest(w http.ResponseWriter, r *http.Request) { + cfg, err := agentproxy.ConfigFromHeaders(r.Header) + if err != nil { + http.Error(w, "Invalid proxy config", http.StatusBadRequest) + return + } + + // Use cfg.Scheme and cfg.Host to proxy the request + // ... +} +``` + +## Integration + +This package is used by `agent/pkg/handler/proxy_http.go` to configure reverse proxy connections based on request headers. diff --git a/agent/pkg/certs/README.md b/agent/pkg/certs/README.md new file mode 100644 index 00000000..058cd62c --- /dev/null +++ b/agent/pkg/certs/README.md @@ -0,0 +1,131 @@ +# agent/pkg/certs + +Certificate management package for creating and extracting certificate archives. + +## Overview + +This package provides utilities for packaging SSL certificates into ZIP archives and extracting them. It is used by the GoDoxy Agent to distribute certificates to clients in a convenient format. + +## Architecture + +```mermaid +graph LR + A[Raw Certs] --> B[ZipCert] + B --> C[ZIP Archive] + C --> D[ca.pem] + C --> E[cert.pem] + C --> F[key.pem] + + G[ZIP Archive] --> H[ExtractCert] + H --> I[ca, crt, key] +``` + +## Public Functions + +### ZipCert + +```go +func ZipCert(ca, crt, key []byte) ([]byte, error) +``` + +Creates a ZIP archive containing three PEM files: + +- `ca.pem` - CA certificate +- `cert.pem` - Server/client certificate +- `key.pem` - Private key + +**Parameters:** + +- `ca` - CA certificate in PEM format +- `crt` - Certificate in PEM format +- `key` - Private key in PEM format + +**Returns:** + +- ZIP archive bytes +- Error if packing fails + +### ExtractCert + +```go +func ExtractCert(data []byte) (ca, crt, key []byte, err error) +``` + +Extracts certificates from a ZIP archive created by `ZipCert`. + +**Parameters:** + +- `data` - ZIP archive bytes + +**Returns:** + +- `ca` - CA certificate bytes +- `crt` - Certificate bytes +- `key` - Private key bytes +- Error if extraction fails + +### AgentCertsFilepath + +```go +func AgentCertsFilepath(host string) (filepathOut string, ok bool) +``` + +Generates the file path for storing agent certificates. + +**Parameters:** + +- `host` - Agent hostname + +**Returns:** + +- Full file path within `certs/` directory +- `false` if host is invalid (contains path separators or special characters) + +### isValidAgentHost + +```go +func isValidAgentHost(host string) bool +``` + +Validates that a host string is safe for use in file paths. + +## Constants + +```go +const AgentCertsBasePath = "certs" +``` + +Base directory for storing certificate archives. + +```go +package main + +import ( + "os" + "github.com/yusing/godoxy/agent/pkg/certs" +) + +func main() { + // Read certificate files + caData, _ := os.ReadFile("ca.pem") + certData, _ := os.ReadFile("cert.pem") + keyData, _ := os.ReadFile("key.pem") + + // Create ZIP archive + zipData, err := certs.ZipCert(caData, certData, keyData) + if err != nil { + panic(err) + } + + // Save to file + os.WriteFile("agent-certs.zip", zipData, 0644) + + // Extract from archive + ca, crt, key, err := certs.ExtractCert(zipData) + // ... +} +``` + +## File Format + +The ZIP archive uses `zip.Store` compression (no compression) for fast creation and extraction. Each file is stored with its standard name (`ca.pem`, `cert.pem`, `key.pem`). diff --git a/agent/pkg/env/README.md b/agent/pkg/env/README.md new file mode 100644 index 00000000..be610be6 --- /dev/null +++ b/agent/pkg/env/README.md @@ -0,0 +1,52 @@ +# agent/pkg/env + +Environment configuration package for the GoDoxy Agent. + +## Overview + +This package manages environment variable parsing and provides a centralized location for all agent configuration options. It is automatically initialized on import. + +## Variables + +| Variable | Type | Default | Description | +| -------------------------- | ---------------- | ---------------------- | --------------------------------------- | +| `DockerSocket` | string | `/var/run/docker.sock` | Path to Docker socket | +| `AgentName` | string | System hostname | Agent identifier | +| `AgentPort` | int | `8890` | Agent server port | +| `AgentSkipClientCertCheck` | bool | `false` | Skip mTLS certificate verification | +| `AgentCACert` | string | (empty) | Base64 Encoded CA certificate + key | +| `AgentSSLCert` | string | (empty) | Base64 Encoded server certificate + key | +| `Runtime` | ContainerRuntime | `docker` | Container runtime (docker or podman) | + +## ContainerRuntime Type + +```go +type ContainerRuntime string + +const ( + ContainerRuntimeDocker ContainerRuntime = "docker" + ContainerRuntimePodman ContainerRuntime = "podman" +) +``` + +## Public Functions + +### DefaultAgentName + +```go +func DefaultAgentName() string +``` + +Returns the system hostname as the default agent name. Falls back to `"agent"` if hostname cannot be determined. + +### Load + +```go +func Load() +``` + +Reloads all environment variables from the environment. Called automatically on package init, but can be called again to refresh configuration. + +## Validation + +The `Load()` function validates that `Runtime` is either `docker` or `podman`. An invalid runtime causes a fatal error. diff --git a/agent/pkg/handler/README.md b/agent/pkg/handler/README.md new file mode 100644 index 00000000..1b887f4d --- /dev/null +++ b/agent/pkg/handler/README.md @@ -0,0 +1,127 @@ +# agent/pkg/handler + +HTTP request handler package for the GoDoxy Agent. + +## Overview + +This package provides the HTTP handler for the GoDoxy Agent server, including endpoints for: + +- Version information +- Agent name and runtime +- Health checks +- System metrics (via SSE) +- HTTP proxy routing +- Docker socket proxying + +## Architecture + +```mermaid +graph TD + A[HTTP Request] --> B[NewAgentHandler] + B --> C{ServeMux Router} + + C --> D[GET /version] + C --> E[GET /name] + C --> F[GET /runtime] + C --> G[GET /health] + C --> H[GET /system-info] + C --> I[GET /proxy/http/#123;path...#125;] + C --> J[ /#42; Docker Socket] + + H --> K[Gin Router] + K --> L[WebSocket Upgrade] + L --> M[SystemInfo Poller] +``` + +## Public Types + +### ServeMux + +```go +type ServeMux struct{ *http.ServeMux } +``` + +Wrapper around `http.ServeMux` with agent-specific endpoint helpers. + +**Methods:** + +- `HandleEndpoint(method, endpoint string, handler http.HandlerFunc)` - Registers handler with API base path +- `HandleFunc(endpoint string, handler http.HandlerFunc)` - Registers GET handler with API base path + +## Public Functions + +### NewAgentHandler + +```go +func NewAgentHandler() http.Handler +``` + +Creates and configures the HTTP handler for the agent server. Sets up: + +- Gin-based metrics handler with WebSocket support for SSE +- All standard agent endpoints +- HTTP proxy endpoint +- Docker socket proxy fallback + +## Endpoints + +| Endpoint | Method | Description | +| ----------------------- | -------- | ------------------------------------ | +| `/version` | GET | Returns agent version | +| `/name` | GET | Returns agent name | +| `/runtime` | GET | Returns container runtime | +| `/health` | GET | Health check with scheme query param | +| `/system-info` | GET | System metrics via SSE or WebSocket | +| `/proxy/http/{path...}` | GET/POST | HTTP proxy with config from headers | +| `/*` | \* | Docker socket proxy | + +## Sub-packages + +### proxy_http.go + +Handles HTTP proxy requests by reading configuration from request headers and proxying to the configured upstream. + +**Key Function:** + +- `ProxyHTTP(w, r)` - Proxies HTTP requests based on `X-Proxy-*` headers + +### check_health.go + +Handles health check requests for various schemes. + +**Key Function:** + +- `CheckHealth(w, r)` - Performs health checks with configurable scheme + +**Supported Schemes:** + +- `http`, `https` - HTTP health check +- `h2c` - HTTP/2 cleartext health check +- `tcp`, `udp`, `tcp4`, `udp4`, `tcp6`, `udp6` - TCP/UDP health check +- `fileserver` - File existence check + +## Usage Example + +```go +package main + +import ( + "net/http" + "github.com/yusing/godoxy/agent/pkg/handler" +) + +func main() { + mux := http.NewServeMux() + mux.Handle("/", handler.NewAgentHandler()) + + http.ListenAndServe(":8890", mux) +} +``` + +## WebSocket Support + +The handler includes a permissive WebSocket upgrader for internal use (no origin check). This enables real-time system metrics streaming via Server-Sent Events (SSE). + +## Docker Socket Integration + +All unmatched requests fall through to the Docker socket handler, allowing the agent to proxy Docker API calls when configured. diff --git a/cmd/README.md b/cmd/README.md new file mode 100644 index 00000000..60606cc0 --- /dev/null +++ b/cmd/README.md @@ -0,0 +1,73 @@ +# cmd + +Main entry point package for GoDoxy, a lightweight reverse proxy with WebUI for Docker containers. + +## Overview + +This package contains the `main.go` entry point that initializes and starts the GoDoxy server. It coordinates the initialization of all core components including configuration loading, API server, authentication, and monitoring services. + +## Architecture + +```mermaid +graph TD + A[main] --> B[Init Profiling] + A --> C[Init Logger] + A --> D[Parallel Init] + D --> D1[DNS Providers] + D --> D2[Icon Cache] + D --> D3[System Info Poller] + D --> D4[Middleware Compose Files] + A --> E[JWT Secret Setup] + A --> F[Create Directories] + A --> G[Load Config] + A --> H[Start Proxy Servers] + A --> I[Init Auth] + A --> J[Start API Server] + A --> K[Debug Server] + A --> L[Uptime Poller] + A --> M[Watch Changes] + A --> N[Wait Exit] +``` + +## Main Function Flow + +The `main()` function performs the following initialization steps: + +1. **Profiling Setup**: Initializes pprof endpoints for performance monitoring +1. **Logger Initialization**: Configures zerolog with memory logging +1. **Parallel Initialization**: Starts DNS providers, icon cache, system info poller, and middleware +1. **JWT Secret**: Ensures API JWT secret is set (generates random if not provided) +1. **Directory Preparation**: Creates required directories for logs, certificates, etc. +1. **Configuration Loading**: Loads YAML configuration and reports any errors +1. **Proxy Servers**: Starts HTTP/HTTPS proxy servers based on configuration +1. **Authentication**: Initializes authentication system with access control +1. **API Server**: Starts the REST API server with all configured routes +1. **Debug Server**: Starts the debug page server (development mode) +1. **Monitoring**: Starts uptime and system info polling +1. **Change Watcher**: Starts watching for Docker container and configuration changes +1. **Graceful Shutdown**: Waits for exit signal with configured timeout + +## Configuration + +The main configuration is loaded from `config/config.yml`. Required directories include: + +- `logs/` - Log files +- `config/` - Configuration directory +- `certs/` - SSL certificates +- `proxy/` - Proxy-related files + +## Environment Variables + +- `API_JWT_SECRET` - Secret key for JWT authentication (optional, auto-generated if not set) + +## Dependencies + +- `internal/api` - REST API handlers +- `internal/auth` - Authentication and ACL +- `internal/config` - Configuration management +- `internal/dnsproviders` - DNS provider integration +- `internal/homepage` - WebUI dashboard +- `internal/logging` - Logging infrastructure +- `internal/metrics` - System metrics collection +- `internal/route` - HTTP routing and middleware +- `github.com/yusing/goutils/task` - Task lifecycle management diff --git a/internal/acl/README.md b/internal/acl/README.md new file mode 100644 index 00000000..91426367 --- /dev/null +++ b/internal/acl/README.md @@ -0,0 +1,282 @@ +# ACL (Access Control List) + +Access control at the TCP connection level with IP/CIDR, timezone, and country-based filtering. + +## Overview + +The ACL package provides network-level access control by wrapping TCP listeners and validating incoming connections against configurable allow/deny rules. It integrates with MaxMind GeoIP for geographic-based filtering and supports access logging with notification batching. + +### Primary consumers + +- `internal/entrypoint` - Wraps the main TCP listener for connection filtering +- Operators - Configure rules via YAML configuration + +### Non-goals + +- HTTP request-level filtering (handled by middleware) +- Authentication or authorization (see `internal/auth`) +- VPN or tunnel integration + +### Stability + +Stable internal package. The public API is the `Config` struct and its methods. + +## Public API + +### Exported types + +```go +type Config struct { + Default string // "allow" or "deny" (default: "allow") + AllowLocal *bool // Allow private/loopback IPs (default: true) + Allow Matchers // Allow rules + Deny Matchers // Deny rules + Log *accesslog.ACLLoggerConfig // Access logging configuration + + Notify struct { + To []string // Notification providers + Interval time.Duration // Notification frequency (default: 1m) + IncludeAllowed *bool // Include allowed in notifications (default: false) + } +} +``` + +```go +type Matcher struct { + match MatcherFunc +} +``` + +```go +type Matchers []Matcher +``` + +### Exported functions and methods + +```go +func (c *Config) Validate() gperr.Error +``` + +Validates configuration and sets defaults. Must be called before `Start`. + +```go +func (c *Config) Start(parent task.Parent) gperr.Error +``` + +Initializes the ACL, starts the logger and notification goroutines. + +```go +func (c *Config) IPAllowed(ip net.IP) bool +``` + +Returns true if the IP is allowed based on configured rules. Performs caching and GeoIP lookup if needed. + +```go +func (c *Config) WrapTCP(lis net.Listener) net.Listener +``` + +Wraps a `net.Listener` to filter connections by IP. + +```go +func (matcher *Matcher) Parse(s string) error +``` + +Parses a matcher string in the format `{type}:{value}`. Supported types: `ip`, `cidr`, `tz`, `country`. + +## Architecture + +### Core components + +```mermaid +graph TD + A[TCP Listener] --> B[TCPListener Wrapper] + B --> C{IP Allowed?} + C -->|Yes| D[Accept Connection] + C -->|No| E[Close Connection] + + F[Config] --> G[Validate] + G --> H[Start] + H --> I[Matcher Evaluation] + I --> C + + J[MaxMind] -.-> K[IP Lookup] + K -.-> I + + L[Access Logger] -.-> M[Log & Notify] + M -.-> B +``` + +### Connection filtering flow + +```mermaid +sequenceDiagram + participant Client + participant TCPListener + participant Config + participant MaxMind + participant Logger + + Client->>TCPListener: Connection Request + TCPListener->>Config: IPAllowed(clientIP) + + alt Loopback IP + Config-->>TCPListener: true + else Private IP (allow_local) + Config-->>TCPListener: true + else Cached Result + Config-->>TCPListener: Cached Result + else Evaluate Allow Rules + Config->>Config: Check Allow list + alt Matches + Config->>Config: Cache true + Config-->>TCPListener: Allowed + else Evaluate Deny Rules + Config->>Config: Check Deny list + alt Matches + Config->>Config: Cache false + Config-->>TCPListener: Denied + else Default Action + Config->>MaxMind: Lookup GeoIP + MaxMind-->>Config: IPInfo + Config->>Config: Apply default rule + Config->>Config: Cache result + Config-->>TCPListener: Result + end + end + end + + alt Logging enabled + Config->>Logger: Log access attempt + end +``` + +### Matcher types + +| Type | Format | Example | +| -------- | ----------------- | --------------------- | +| IP | `ip:address` | `ip:192.168.1.1` | +| CIDR | `cidr:network` | `cidr:192.168.0.0/16` | +| TimeZone | `tz:timezone` | `tz:Asia/Shanghai` | +| Country | `country:ISOCode` | `country:GB` | + +## Configuration Surface + +### Config sources + +Configuration is loaded from `config/config.yml` under the `acl` key. + +### Schema + +```yaml +acl: + default: "allow" # "allow" or "deny" + allow_local: true # Allow private/loopback IPs + log: + log_allowed: false # Log allowed connections + notify: + to: ["gotify"] # Notification providers + interval: "1m" # Notification interval + include_allowed: false # Include allowed in notifications +``` + +### Hot-reloading + +Configuration requires restart. The ACL does not support dynamic rule updates. + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/maxmind` - IP geolocation lookup +- `internal/logging/accesslog` - Access logging +- `internal/notif` - Notifications +- `internal/task/task.go` - Lifetime management + +### Integration points + +```go +// Entrypoint uses ACL to wrap the TCP listener +aclListener := config.ACL.WrapTCP(listener) +http.Server.Serve(aclListener, entrypoint) +``` + +## Observability + +### Logs + +- `ACL started` - Configuration summary on start +- `log_notify_loop` - Access attempts (allowed/denied) + +Log levels: `Info` for startup, `Debug` for client closure. + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- Loopback and private IPs are always allowed unless explicitly denied +- Cache TTL is 1 minute to limit memory usage +- Notification channel has a buffer of 100 to prevent blocking +- Failed connections are immediately closed without response + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| --------------------------------- | ------------------------------------- | --------------------------------------------- | +| Invalid matcher syntax | Validation fails on startup | Fix configuration syntax | +| MaxMind database unavailable | GeoIP lookups return unknown location | Default action applies; cache hit still works | +| Notification provider unavailable | Notification dropped | Error logged, continues operation | +| Cache full | No eviction, uses Go map | No action needed | + +## Usage Examples + +### Basic configuration + +```go +aclConfig := &acl.Config{ + Default: "allow", + AllowLocal: ptr(true), + Allow: acl.Matchers{ + {match: matchIP(net.ParseIP("192.168.1.0/24"))}, + }, + Deny: acl.Matchers{ + {match: matchISOCode("CN")}, + }, +} +if err := aclConfig.Validate(); err != nil { + log.Fatal(err) +} +if err := aclConfig.Start(parent); err != nil { + log.Fatal(err) +} +``` + +### Wrapping a TCP listener + +```go +listener, err := net.Listen("tcp", ":443") +if err != nil { + log.Fatal(err) +} + +// Wrap with ACL +aclListener := aclConfig.WrapTCP(listener) + +// Use with HTTP server +server := &http.Server{} +server.Serve(aclListener) +``` + +### Creating custom matchers + +```go +matcher := &acl.Matcher{} +err := matcher.Parse("country:US") +if err != nil { + log.Fatal(err) +} + +// Use the matcher +allowed := matcher.match(ipInfo) +``` diff --git a/internal/agentpool/README.md b/internal/agentpool/README.md new file mode 100644 index 00000000..08b260a8 --- /dev/null +++ b/internal/agentpool/README.md @@ -0,0 +1,281 @@ +# Agent Pool + +Thread-safe pool for managing remote Docker agent connections. + +## Overview + +The agentpool package provides a centralized pool for storing and retrieving remote agent configurations. It enables GoDoxy to connect to Docker hosts via agent connections instead of direct socket access, enabling secure remote container management. + +### Primary consumers + +- `internal/route/provider` - Creates agent-based route providers +- `internal/docker` - Manages agent-based Docker client connections +- Configuration loading during startup + +### Non-goals + +- Agent lifecycle management (handled by `agent/pkg/agent`) +- Agent health monitoring +- Agent authentication/authorization + +### Stability + +Stable internal package. The pool uses `xsync.Map` for lock-free concurrent access. + +## Public API + +### Exported types + +```go +type Agent struct { + *agent.AgentConfig + httpClient *http.Client + fasthttpHcClient *fasthttp.Client +} +``` + +### Exported functions + +```go +func Add(cfg *agent.AgentConfig) (added bool) +``` + +Adds an agent to the pool. Returns `true` if added, `false` if already exists. Uses `LoadOrCompute` to prevent duplicates. + +```go +func Has(cfg *agent.AgentConfig) bool +``` + +Checks if an agent exists in the pool. + +```go +func Remove(cfg *agent.AgentConfig) +``` + +Removes an agent from the pool. + +```go +func RemoveAll() +``` + +Removes all agents from the pool. Called during configuration reload. + +```go +func Get(agentAddrOrDockerHost string) (*Agent, bool) +``` + +Retrieves an agent by address or Docker host URL. Automatically detects if the input is an agent address or Docker host URL and resolves accordingly. + +```go +func GetAgent(name string) (*Agent, bool) +``` + +Retrieves an agent by name. O(n) iteration over pool contents. + +```go +func List() []*Agent +``` + +Returns all agents as a slice. Creates a new copy for thread safety. + +```go +func Iter() iter.Seq2[string, *Agent] +``` + +Returns an iterator over all agents. Uses `xsync.Map.Range`. + +```go +func Num() int +``` + +Returns the number of agents in the pool. + +```go +func (agent *Agent) HTTPClient() *http.Client +``` + +Returns an HTTP client configured for the agent. + +## Architecture + +### Core components + +```mermaid +graph TD + A[Agent Config] --> B[Add to Pool] + B --> C[xsync.Map Storage] + C --> D{Get Request} + D -->|By Address| E[Load from map] + D -->|By Docker Host| F[Resolve agent addr] + D -->|By Name| G[Iterate & match] + + H[Docker Client] --> I[Get Agent] + I --> C + I --> J[HTTP Client] + J --> K[Agent Connection] + + L[Route Provider] --> M[List Agents] + M --> C +``` + +### Thread safety model + +The pool uses `xsync.Map[string, *Agent]` for concurrent-safe operations: + +- `Add`: `LoadOrCompute` prevents race conditions and duplicates +- `Get`: Lock-free read operations +- `Iter`: Consistent snapshot iteration via `Range` +- `Remove`: Thread-safe deletion + +### Test mode + +When running tests (binary ends with `.test`), a test agent is automatically added: + +```go +func init() { + if strings.HasSuffix(os.Args[0], ".test") { + agentPool.Store("test-agent", &Agent{ + AgentConfig: &agent.AgentConfig{ + Addr: "test-agent", + }, + }) + } +} +``` + +## Configuration Surface + +No direct configuration. Agents are added via configuration loading from `config/config.yml`: + +```yaml +providers: + agents: + - addr: agent.example.com:443 + name: remote-agent + tls: + ca_file: /path/to/ca.pem + cert_file: /path/to/cert.pem + key_file: /path/to/key.pem +``` + +## Dependency and Integration Map + +### Internal dependencies + +- `agent/pkg/agent` - Agent configuration and connection settings +- `xsync/v4` - Concurrent map implementation + +### External dependencies + +- `valyala/fasthttp` - Fast HTTP client for agent communication + +### Integration points + +```go +// Docker package uses agent pool for remote connections +if agent.IsDockerHostAgent(host) { + a, ok := agentpool.Get(host) + if !ok { + panic(fmt.Errorf("agent %q not found", host)) + } + opt := []client.Opt{ + client.WithHost(agent.DockerHost), + client.WithHTTPClient(a.HTTPClient()), + } +} +``` + +## Observability + +### Logs + +No specific logging in the agentpool package. Client creation/destruction is logged in the docker package. + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- TLS configuration is loaded from agent configuration +- Connection credentials are not stored in the pool after agent creation +- HTTP clients are created per-request to ensure credential freshness + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| -------------------- | -------------------- | ---------------------------- | +| Agent not found | Returns `nil, false` | Add agent to pool before use | +| Duplicate add | Returns `false` | Existing agent is preserved | +| Test mode activation | Test agent added | Only during test binaries | + +## Performance Characteristics + +- O(1) lookup by address +- O(n) iteration for name-based lookup +- Pre-sized to 10 entries via `xsync.WithPresize(10)` +- No locks required for read operations +- HTTP clients are created per-call to ensure fresh connections + +## Usage Examples + +### Adding an agent + +```go +agentConfig := &agent.AgentConfig{ + Addr: "agent.example.com:443", + Name: "my-agent", +} + +added := agentpool.Add(agentConfig) +if !added { + log.Println("Agent already exists") +} +``` + +### Retrieving an agent + +```go +// By address +agent, ok := agentpool.Get("agent.example.com:443") +if !ok { + log.Fatal("Agent not found") +} + +// By Docker host URL +agent, ok := agentpool.Get("http://docker-host:2375") +if !ok { + log.Fatal("Agent not found") +} + +// By name +agent, ok := agentpool.GetAgent("my-agent") +if !ok { + log.Fatal("Agent not found") +} +``` + +### Iterating over all agents + +```go +for addr, agent := range agentpool.Iter() { + log.Printf("Agent: %s at %s", agent.Name, addr) +} +``` + +### Using with Docker client + +```go +// When creating a Docker client with an agent host +if agent.IsDockerHostAgent(host) { + a, ok := agentpool.Get(host) + if !ok { + panic(fmt.Errorf("agent %q not found", host)) + } + opt := []client.Opt{ + client.WithHost(agent.DockerHost), + client.WithHTTPClient(a.HTTPClient()), + } + dockerClient, err := client.New(opt...) +} +``` diff --git a/internal/api/v1/README.md b/internal/api/v1/README.md new file mode 100644 index 00000000..658e3d4e --- /dev/null +++ b/internal/api/v1/README.md @@ -0,0 +1,197 @@ +# API v1 Package + +Implements the v1 REST API handlers for GoDoxy, exposing endpoints for managing routes, Docker containers, certificates, metrics, and system configuration. + +## Overview + +The `internal/api/v1` package implements the HTTP handlers that power GoDoxy's REST API. It uses the Gin web framework and provides endpoints for route management, container operations, certificate handling, system metrics, and configuration. + +### Primary Consumers + +- **WebUI**: The homepage dashboard and admin interface consume these endpoints + +### Non-goals + +- Authentication and authorization logic (delegated to `internal/auth`) +- Route proxying and request handling (handled by `internal/route`) +- Docker container lifecycle management (delegated to `internal/docker`) +- Certificate issuance and storage (handled by `internal/autocert`) + +### Stability + +This package is stable. Public API endpoints follow semantic versioning for request/response contracts. Internal implementation may change between minor versions. + +## Public API + +### Exported Types + +Types are defined in `goutils/apitypes`: + +| Type | Purpose | +| -------------------------- | -------------------------------- | +| `apitypes.ErrorResponse` | Standard error response format | +| `apitypes.SuccessResponse` | Standard success response format | + +### Handler Subpackages + +| Package | Purpose | +| ---------- | ---------------------------------------------- | +| `route` | Route listing, details, and playground testing | +| `docker` | Docker container management and monitoring | +| `cert` | Certificate information and renewal | +| `metrics` | System metrics and uptime information | +| `homepage` | Homepage items and category management | +| `file` | Configuration file read/write operations | +| `auth` | Authentication and session management | +| `agent` | Remote agent creation and management | + +## Architecture + +### Handler Organization + +Package structure mirrors the API endpoint paths (e.g., `auth/login.go` handles `/auth/login`). + +### Request Flow + +```mermaid +sequenceDiagram + participant Client + participant GinRouter + participant Handler + participant Service + participant Response + + Client->>GinRouter: HTTP Request + GinRouter->>Handler: Route to handler + Handler->>Service: Call service layer + Service-->>Handler: Data or error + Handler->>Response: Format JSON response + Response-->>Client: JSON or redirect +``` + +## Configuration Surface + +API listening address is configured with `GODOXY_API_ADDR` environment variable. + +## Dependency and Integration Map + +### Internal Dependencies + +| Package | Purpose | +| ----------------------- | --------------------------- | +| `internal/route/routes` | Route storage and iteration | +| `internal/docker` | Docker client management | +| `internal/config` | Configuration access | +| `internal/metrics` | System metrics collection | +| `internal/homepage` | Homepage item generation | +| `internal/agentpool` | Remote agent management | +| `internal/auth` | Authentication services | + +### External Dependencies + +| Package | Purpose | +| ------------------------------ | --------------------------- | +| `github.com/gin-gonic/gin` | HTTP routing and middleware | +| `github.com/gorilla/websocket` | WebSocket support | +| `github.com/moby/moby/client` | Docker API client | + +## Observability + +### Logs + +Handlers log at `INFO` level for requests and `ERROR` level for failures. Logs include: + +- Request path and method +- Response status code +- Error details (when applicable) + +### Metrics + +No dedicated metrics exposed by handlers. Request metrics collected by middleware. + +## Security Considerations + +- All endpoints (except `/api/v1/version`) require authentication +- Input validation using Gin binding tags +- Path traversal prevention in file operations +- WebSocket connections use same auth middleware as HTTP + +## Failure Modes and Recovery + +| Failure | Behavior | +| ----------------------------------- | ------------------------------------------ | +| Docker host unreachable | Returns partial results with errors logged | +| Certificate provider not configured | Returns 404 | +| Invalid request body | Returns 400 with error details | +| Authentication failure | Returns 302 redirect to login | +| Agent not found | Returns 404 | + +## Usage Examples + +### Listing All Routes via WebSocket + +```go +import ( + "github.com/gorilla/websocket" +) + +func watchRoutes(provider string) error { + url := "ws://localhost:8888/api/v1/route/list" + if provider != "" { + url += "?provider=" + provider + } + + conn, _, err := websocket.DefaultDialer.Dial(url, nil) + if err != nil { + return err + } + defer conn.Close() + + for { + _, message, err := conn.ReadMessage() + if err != nil { + return err + } + // message contains JSON array of routes + processRoutes(message) + } +} +``` + +### Getting Container Status + +```go +import ( + "encoding/json" + "net/http" +) + +type Container struct { + Server string `json:"server"` + Name string `json:"name"` + ID string `json:"id"` + Image string `json:"image"` +} + +func listContainers() ([]Container, error) { + resp, err := http.Get("http://localhost:8888/api/v1/docker/containers") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var containers []Container + if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { + return nil, err + } + return containers, nil +} +``` + +### Health Check + +```bash +curl http://localhost:8888/health +``` + +) diff --git a/internal/api/v1/metrics/upime.go b/internal/api/v1/metrics/uptime.go similarity index 100% rename from internal/api/v1/metrics/upime.go rename to internal/api/v1/metrics/uptime.go diff --git a/internal/auth/README.md b/internal/auth/README.md new file mode 100644 index 00000000..aa340207 --- /dev/null +++ b/internal/auth/README.md @@ -0,0 +1,349 @@ +# Authentication + +Authentication providers supporting OIDC and username/password authentication with JWT-based sessions. + +## Overview + +The auth package implements authentication middleware and login handlers that integrate with GoDoxy's HTTP routing system. It provides flexible authentication that can be enabled/disabled based on configuration and supports multiple authentication providers. + +### Primary consumers + +- `internal/route/rules` - Authentication middleware for routes +- `internal/api/v1/auth` - Login and session management endpoints +- `internal/homepage` - WebUI login page + +### Non-goals + +- ACL or authorization (see `internal/acl`) +- User management database +- Multi-factor authentication +- Rate limiting (basic OIDC rate limiting only) + +### Stability + +Stable internal package. Public API consists of the `Provider` interface and initialization functions. + +## Public API + +### Exported types + +```go +type Provider interface { + CheckToken(r *http.Request) error + LoginHandler(w http.ResponseWriter, r *http.Request) + PostAuthCallbackHandler(w http.ResponseWriter, r *http.Request) + LogoutHandler(w http.ResponseWriter, r *http.Request) +} +``` + +### OIDC Provider + +```go +type OIDCProvider struct { + oauthConfig *oauth2.Config + oidcProvider *oidc.Provider + oidcVerifier *oidc.IDTokenVerifier + endSessionURL *url.URL + allowedUsers []string + allowedGroups []string + rateLimit *rate.Limiter +} +``` + +### Username/Password Provider + +```go +type UserPassAuth struct { + username string + pwdHash []byte + secret []byte + tokenTTL time.Duration +} +``` + +### Exported functions + +```go +func Initialize() error +``` + +Sets up authentication providers based on environment configuration. Returns error if OIDC issuer is configured but cannot be reached. + +```go +func IsEnabled() bool +``` + +Returns whether authentication is enabled. Checks `DEBUG_DISABLE_AUTH`, `API_JWT_SECRET`, and `OIDC_ISSUER_URL`. + +```go +func IsOIDCEnabled() bool +``` + +Returns whether OIDC authentication is configured. + +```go +func GetDefaultAuth() Provider +``` + +Returns the configured authentication provider. + +```go +func AuthCheckHandler(w http.ResponseWriter, r *http.Request) +``` + +HTTP handler that checks if the request has a valid token. Returns 200 if valid, invokes login handler otherwise. + +```go +func AuthOrProceed(w http.ResponseWriter, r *http.Request) bool +``` + +Authenticates request or proceeds if valid. Returns `false` if login handler was invoked, `true` if authenticated. + +```go +func ProceedNext(w http.ResponseWriter, r *http.Request) +``` + +Continues to the next handler after successful authentication. + +```go +func NewUserPassAuth(username, password string, secret []byte, tokenTTL time.Duration) (*UserPassAuth, error) +``` + +Creates a new username/password auth provider with bcrypt password hashing. + +```go +func NewUserPassAuthFromEnv() (*UserPassAuth, error) +``` + +Creates username/password auth from environment variables `API_USER`, `API_PASSWORD`, `API_JWT_SECRET`. + +```go +func NewOIDCProvider(issuerURL, clientID, clientSecret string, allowedUsers, allowedGroups []string) (*OIDCProvider, error) +``` + +Creates a new OIDC provider. Returns error if issuer cannot be reached or no allowed users/groups are configured. + +```go +func NewOIDCProviderFromEnv() (*OIDCProvider, error) +``` + +Creates OIDC provider from environment variables `OIDC_ISSUER_URL`, `OIDC_CLIENT_ID`, `OIDC_CLIENT_SECRET`, etc. + +## Architecture + +### Core components + +```mermaid +graph TD + A[HTTP Request] --> B{Auth Enabled?} + B -->|No| C[Proceed Direct] + B -->|Yes| D[Check Token] + D -->|Valid| E[Proceed] + D -->|Invalid| F[Login Handler] + + G[OIDC Provider] --> H[Token Validation] + I[UserPass Provider] --> J[Credential Check] + + F --> K{OIDC Configured?} + K -->|Yes| G + K -->|No| I + + subgraph Cookie Management + L[Token Cookie] + M[State Cookie] + N[Session Cookie] + end +``` + +### OIDC authentication flow + +```mermaid +sequenceDiagram + participant User + participant App + participant IdP + + User->>App: Access Protected Resource + App->>App: Check Token + alt No valid token + App-->>User: Redirect to /auth/ + User->>IdP: Login & Authorize + IdP-->>User: Redirect with Code + User->>App: /auth/callback?code=... + App->>IdP: Exchange Code for Token + IdP-->>App: Access Token + ID Token + App->>App: Validate Token + App->>App: Check allowed users/groups + App-->>User: Protected Resource + else Valid token exists + App-->>User: Protected Resource + end +``` + +### Username/password flow + +```mermaid +sequenceDiagram + participant User + participant App + + User->>App: POST /auth/callback + App->>App: Validate credentials + alt Valid + App->>App: Generate JWT + App-->>User: Set token cookie, redirect to / + else Invalid + App-->>User: 401 Unauthorized + end +``` + +## Configuration Surface + +### Environment variables + +| Variable | Description | +| ------------------------ | ----------------------------------------------------------- | +| `DEBUG_DISABLE_AUTH` | Set to "true" to disable auth for debugging | +| `API_JWT_SECRET` | Secret key for JWT token validation (enables userpass auth) | +| `API_USER` | Username for userpass authentication | +| `API_PASSWORD` | Password for userpass authentication | +| `API_JWT_TOKEN_TTL` | Token TTL duration (default: 24h) | +| `OIDC_ISSUER_URL` | OIDC provider URL (enables OIDC) | +| `OIDC_CLIENT_ID` | OIDC client ID | +| `OIDC_CLIENT_SECRET` | OIDC client secret | +| `OIDC_REDIRECT_URL` | OIDC redirect URL | +| `OIDC_ALLOWED_USERS` | Comma-separated list of allowed users | +| `OIDC_ALLOWED_GROUPS` | Comma-separated list of allowed groups | +| `OIDC_SCOPES` | Comma-separated OIDC scopes (default: openid,profile,email) | +| `OIDC_RATE_LIMIT` | Rate limit requests (default: 10) | +| `OIDC_RATE_LIMIT_PERIOD` | Rate limit period (default: 1m) | + +### Hot-reloading + +Authentication configuration requires restart. No dynamic reconfiguration is supported. + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/common` - Environment variable access + +### External dependencies + +- `golang.org/x/crypto/bcrypt` - Password hashing +- `github.com/coreos/go-oidc/v3/oidc` - OIDC protocol +- `golang.org/x/oauth2` - OAuth2/OIDC implementation +- `github.com/golang-jwt/jwt/v5` - JWT token handling +- `golang.org/x/time/rate` - OIDC rate limiting + +### Integration points + +```go +// Route middleware uses AuthOrProceed +routeHandler := func(w http.ResponseWriter, r *http.Request) { + if !auth.AuthOrProceed(w, r) { + return // Auth failed, login handler was invoked + } + // Continue with authenticated request +} +``` + +## Observability + +### Logs + +- OIDC provider initialization errors +- Token validation failures +- Rate limit exceeded events + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- JWT tokens use HS512 signing for userpass auth +- OIDC tokens are validated against the issuer +- Session tokens are scoped by client ID to prevent conflicts +- Passwords are hashed with bcrypt (cost 10) +- OIDC rate limiting prevents brute-force attacks +- State parameter prevents CSRF attacks +- Refresh tokens are stored and invalidated on logout + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| ------------------------ | ------------------------------ | ----------------------------- | +| OIDC issuer unreachable | Initialize returns error | Fix network/URL configuration | +| Invalid JWT secret | Initialize uses API_JWT_SECRET | Provide correct secret | +| Token expired | CheckToken returns error | User must re-authenticate | +| User not in allowed list | Returns ErrUserNotAllowed | Add user to allowed list | +| Rate limit exceeded | Returns 429 Too Many Requests | Wait for rate limit reset | + +## Usage Examples + +### Basic setup + +```go +// Initialize authentication during startup +err := auth.Initialize() +if err != nil { + log.Fatal(err) +} + +// Check if auth is enabled +if auth.IsEnabled() { + log.Println("Authentication is enabled") +} + +// Check OIDC status +if auth.IsOIDCEnabled() { + log.Println("OIDC authentication configured") +} +``` + +### Using AuthOrProceed middleware + +```go +func protectedHandler(w http.ResponseWriter, r *http.Request) { + if !auth.AuthOrProceed(w, r) { + return // Auth failed, login handler was invoked + } + // Continue with authenticated request +} +``` + +### Using AuthCheckHandler + +```go +http.HandleFunc("/api/", auth.AuthCheckHandler(apiHandler)) +``` + +### Custom OIDC provider + +```go +provider, err := auth.NewOIDCProvider( + "https://your-idp.com", + "your-client-id", + "your-client-secret", + []string{"user1", "user2"}, + []string{"group1"}, +) +if err != nil { + log.Fatal(err) +} +``` + +### Custom userpass provider + +```go +provider, err := auth.NewUserPassAuth( + "admin", + "password123", + []byte("jwt-secret-key"), + 24*time.Hour, +) +if err != nil { + log.Fatal(err) +} +``` diff --git a/internal/autocert/README.md b/internal/autocert/README.md index bc19feb3..c69d3fde 100644 --- a/internal/autocert/README.md +++ b/internal/autocert/README.md @@ -2,42 +2,116 @@ Automated SSL certificate management using the ACME protocol (Let's Encrypt and compatible CAs). -## Architecture Overview +## Overview -``` -┌────────────────────────────────────────────────────────────────────────────┐ -│ GoDoxy Proxy │ -├────────────────────────────────────────────────────────────────────────────┤ -│ ┌──────────────────────┐ ┌─────────────────────────────────────────┐ │ -│ │ Config.State │────▶│ autocert.Provider │ │ -│ │ (config loading) │ │ ┌───────────────────────────────────┐ │ │ -│ └──────────────────────┘ │ │ main Provider │ │ │ -│ │ │ - Primary certificate │ │ │ -│ │ │ - SNI matcher │ │ │ -│ │ │ - Renewal scheduler │ │ │ -│ │ └───────────────────────────────────┘ │ │ -│ │ ┌───────────────────────────────────┐ │ │ -│ │ │ extraProviders[] │ │ │ -│ │ │ - Additional certifictes │ │ │ -│ │ │ - Different domains/A │ │ │ -│ │ └───────────────────────────────────┘ │ │ -│ └─────────────────────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────┐ │ -│ │ TLS Handshake │ │ -│ │ GetCert(ClientHelloInf) │ │ -│ └────────────────────────────────┘ │ -└────────────────────────────────────────────────────────────────────────────┘ +### Purpose + +This package provides complete SSL certificate lifecycle management: + +- ACME account registration and management +- Certificate issuance via DNS-01 challenge +- Automatic renewal scheduling (30 days before expiry) +- SNI-based certificate selection for multi-domain setups + +### Primary Consumers + +- `internal/net/gphttp/` - TLS handshake certificate provider +- `internal/api/v1/cert/` - REST API for certificate management +- Configuration loading via `internal/config/` + +### Non-goals + +- HTTP-01 challenge support +- Certificate transparency log monitoring +- OCSP stapling +- Private CA support (except via custom CADirURL) + +### Stability + +Internal package with stable public APIs. ACME protocol compliance depends on lego library. + +## Public API + +### Config (`config.go`) + +```go +type Config struct { + Email string // ACME account email + Domains []string // Domains to certify + CertPath string // Output cert path + KeyPath string // Output key path + Extra []ConfigExtra // Additional cert configs + ACMEKeyPath string // ACME account private key + Provider string // DNS provider name + Options map[string]strutils.Redacted // Provider options + Resolvers []string // DNS resolvers + CADirURL string // Custom ACME CA directory + CACerts []string // Custom CA certificates + EABKid string // External Account Binding Key ID + EABHmac string // External Account Binding HMAC +} + +// Merge extra config with main provider +func MergeExtraConfig(mainCfg *Config, extraCfg *ConfigExtra) ConfigExtra ``` -## Certificate Lifecycle +### Provider (`provider.go`) + +```go +type Provider struct { + logger zerolog.Logger + cfg *Config + user *User + legoCfg *lego.Config + client *lego.Client + lastFailure time.Time + legoCert *certificate.Resource + tlsCert *tls.Certificate + certExpiries CertExpiries + extraProviders []*Provider + sniMatcher sniMatcher +} + +// Create new provider (initializes extras atomically) +func NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, error) + +// TLS certificate getter for SNI +func (p *Provider) GetCert(hello *tls.ClientHelloInfo) (*tls.Certificate, error) + +// Certificate info for API +func (p *Provider) GetCertInfos() ([]CertInfo, error) + +// Provider name ("main" or "extra[N]") +func (p *Provider) GetName() string + +// Obtain certificate if not exists +func (p *Provider) ObtainCertIfNotExistsAll() error + +// Force immediate renewal +func (p *Provider) ForceExpiryAll() bool + +// Schedule automatic renewal +func (p *Provider) ScheduleRenewalAll(parent task.Parent) + +// Print expiry dates +func (p *Provider) PrintCertExpiriesAll() +``` + +### User (`user.go`) + +```go +type User struct { + Email string // Account email + Registration *registration.Resource // ACME registration + Key crypto.PrivateKey // Account key +} +``` + +## Architecture + +### Certificate Lifecycle ```mermaid ---- -config: - theme: redux-dark-color ---- flowchart TD A[Start] --> B[Load Existing Cert] B --> C{Cert Exists?} @@ -70,16 +144,9 @@ flowchart TD T --> V[Update SNI Matcher] V --> G - - style E fill:#22553F,color:#fff - style I fill:#8B8000,color:#fff - style N fill:#22553F,color:#fff - style U fill:#84261A,color:#fff ``` -## SNI Matching Flow - -When a TLS client connects with Server Name Indication (SNI), the proxy needs to select the correct certificate. +### SNI Matching Flow ```mermaid flowchart LR @@ -96,183 +163,48 @@ flowchart LR F -->|Yes| D F -->|No| G[Return default cert] end - - style C fill:#27632A,color:#fff - style E fill:#18597A,color:#fff - style F fill:#836C03,color:#fff ``` ### Suffix Tree Structure -The `sniMatcher` uses an optimized suffix tree for efficient wildcard matching: - ``` Certificate: *.example.com, example.com, *.api.example.com exact: - "example.com" → Provider_A + "example.com" -> Provider_A root: └── "com" └── "example" - ├── "*" → Provider_A [wildcard at *.example.com] + ├── "*" -> Provider_A [wildcard at *.example.com] └── "api" - └── "*" → Provider_B [wildcard at *.api.example.com] + └── "*" -> Provider_B [wildcard at *.api.example.com] ``` -## Key Components +## Configuration Surface -### Config +### Provider Types -Configuration for certificate management, loaded from `config/autocert.yml`. +| Type | Description | Use Case | +| -------------- | ---------------------------- | ------------------------- | +| `local` | No ACME, use existing cert | Pre-existing certificates | +| `pseudo` | Mock provider for testing | Development | +| ACME providers | Let's Encrypt, ZeroSSL, etc. | Production | -```go -type Config struct { - Email string // ACME account email - Domains []string // Domains to certifiy - CertPath string // Output cert path - KeyPath string // Output key path - Extra []ConfigExtra // Additional cert configs - ACMEKeyPath string // ACME account private key (shared by all extras) - Provider string // DNS provider name - Options map[string]strutils.Redacted // Provider-specific options - Resolvers []string // DNS resolvers for DNS-01 - CADirURL string // Custom ACME CA directory - CACerts []string // Custom CA certificates - EABKid string // External Account Binding Key ID - EABHmac string // External Account Binding HMAC +### Supported DNS Providers - idx int // 0: main, 1+: extra[i] -} +| Provider | Name | Required Options | +| ------------ | -------------- | ----------------------------------- | +| Cloudflare | `cloudflare` | `CF_API_TOKEN` | +| Route 53 | `route53` | AWS credentials | +| DigitalOcean | `digitalocean` | `DO_API_TOKEN` | +| GoDaddy | `godaddy` | `GD_API_KEY`, `GD_API_SECRET` | +| OVH | `ovh` | `OVH_ENDPOINT`, `OVH_APP_KEY`, etc. | +| CloudDNS | `clouddns` | GCP credentials | +| AzureDNS | `azuredns` | Azure credentials | +| DuckDNS | `duckdns` | `DUCKDNS_TOKEN` | -type ConfigExtra Config -``` - -**Extra Provider Merging:** Extra configurations are merged with the main config using `MergeExtraConfig()`, inheriting most settings from the main provider while allowing per-certificate overrides for `Provider`, `Email`, `Domains`, `Options`, `Resolvers`, `CADirURL`, `CACerts`, `EABKid`, `EABHmac`, and `HTTPClient`. The `ACMEKeyPath` is shared across all providers. - -**Validation:** - -- Extra configs must have unique `cert_path` and `key_path` values (no duplicates across main or any extra provider) - -### ConfigExtra - -Extra certificate configuration type. Uses `MergeExtraConfig()` to inherit settings from the main provider: - -```go -func MergeExtraConfig(mainCfg *Config, extraCfg *ConfigExtra) ConfigExtra -``` - -Fields that can be overridden per extra provider: - -- `Provider` - DNS provider name -- `Email` - ACME account email -- `Domains` - Certificate domains -- `Options` - Provider-specific options -- `Resolvers` - DNS resolvers -- `CADirURL` - Custom ACME CA directory -- `CACerts` - Custom CA certificates -- `EABKid` / `EABHmac` - External Account Binding credentials -- `HTTPClient` - Custom HTTP client - -Fields inherited from main config (shared): - -- `ACMEKeyPath` - ACME account private key (same for all) - -**Provider Types:** - -- `local` - No ACME, use existing certificate (default) -- `pseudo` - Mock provider for testing -- `custom` - Custom ACME CA with `CADirURL` - -### Provider - -Main certificate management struct that handles: - -- Certificate issuance and renewal -- Loading certificates from disk -- SNI-based certificate selection -- Renewal scheduling - -```go -type Provider struct { - logger zerolog.Logger // Provider-scoped logger - - cfg *Config // Configuration - user *User // ACME account - legoCfg *lego.Config // LEGO client config - client *lego.Client // ACME client - lastFailure time.Time // Last renewal failure - legoCert *certificate.Resource // Cached cert resource - tlsCert *tls.Certificate // Parsed TLS certificate - certExpiries CertExpiries // Domain → expiry map - extraProviders []*Provider // Additional certificates - sniMatcher sniMatcher // SNI → Provider mapping - forceRenewalCh chan struct{} // Force renewal trigger channel - scheduleRenewalOnce sync.Once // Prevents duplicate renewal scheduling -} -``` - -**Logging:** Each provider has a scoped logger with provider name ("main" or "extra[N]") for consistent log context. - -**Key Methods:** - -- `NewProvider(cfg *Config, user *User, legoCfg *lego.Config) (*Provider, error)` - Creates provider and initializes extra providers atomically -- `GetCert(hello *tls.ClientHelloInfo)` - Returns certificate for TLS handshake -- `GetName()` - Returns provider name ("main" or "extra[N]") -- `ObtainCert()` - Obtains new certificate via ACME -- `ObtainCertAll()` - Renews/obtains certificates for main and all extra providers -- `ObtainCertIfNotExistsAll()` - Obtains certificates only if they don't exist on disk -- `ForceExpiryAll()` - Triggers forced certificate renewal for main and all extra providers -- `ScheduleRenewalAll(parent task.Parent)` - Schedules automatic renewal for all providers -- `PrintCertExpiriesAll()` - Logs certificate expiry dates for all providers - -### User - -ACME account representation implementing lego's `acme.User` interface. - -```go -type User struct { - Email string // Account email - Registration *registration.Resource // ACME registration - Key crypto.PrivateKey // Account key -} -``` - -### sniMatcher - -Efficient SNI-to-Provider lookup with exact and wildcard matching. - -```go -type sniMatcher struct { - exact map[string]*Provider // Exact domain matches - root sniTreeNode // Wildcard suffix tree -} - -type sniTreeNode struct { - children map[string]*sniTreeNode // DNS label → child node - wildcard *Provider // Wildcard match at this level -} -``` - -## DNS Providers - -Supported DNS providers for DNS-01 challenge validation: - -| Provider | Name | Description | -| ------------ | -------------- | ---------------------------------------- | -| Cloudflare | `cloudflare` | Cloudflare DNS | -| Route 53 | `route53` | AWS Route 53 | -| DigitalOcean | `digitalocean` | DigitalOcean DNS | -| GoDaddy | `godaddy` | GoDaddy DNS | -| OVH | `ovh` | OVHcloud DNS | -| CloudDNS | `clouddns` | Google Cloud DNS | -| AzureDNS | `azuredns` | Azure DNS | -| DuckDNS | `duckdns` | DuckDNS | -| and more... | | See `internal/dnsproviders/providers.go` | - -### Provider Configuration - -Each provider accepts configuration via the `options` map: +### Example Configuration ```yaml autocert: @@ -280,53 +212,14 @@ autocert: email: admin@example.com domains: - example.com - - '*.example.com' + - "*.example.com" options: - CF_API_TOKEN: your-api-token - CF_ZONE_API_TOKEN: your-zone-token + auth_token: ${CF_API_TOKEN} resolvers: - 1.1.1.1:53 ``` -## ACME Integration - -### Account Registration - -```mermaid -flowchart TD - A[Load or Generate ACME Key] --> B[Init LEGO Client] - B --> C[Resolve Account by Key] - C --> D{Account Exists?} - D -->|Yes| E[Continue with existing] - D -->|No| F{Has EAB?} - F -->|Yes| G[Register with EAB] - F -->|No| H[Register with TOS Agreement] - G --> I[Save Registration] - H --> I -``` - -### DNS-01 Challenge - -```mermaid -sequenceDiagram - participant C as ACME CA - participant P as GoDoxy - participant D as DNS Provider - - P->>C: Request certificate for domain - C->>P: Present DNS-01 challenge - P->>D: Create TXT record _acme-challenge.domain - D-->>P: Record created - P->>C: Challenge ready - C->>D: Verify DNS TXT record - D-->>C: Verification success - C->>P: Issue certificate - P->>D: Clean up TXT record -``` - -## Multi-Certificate Support - -The package supports multiple certificates through the `extra` configuration: +### Extra Providers ```yaml autocert: @@ -334,212 +227,81 @@ autocert: email: admin@example.com domains: - example.com - - '*.example.com' + - "*.example.com" cert_path: certs/example.com.crt key_path: certs/example.com.key + options: + auth_token: ${CF_API_TOKEN} extra: - domains: - api.example.com - - '*.api.example.com' + - "*.api.example.com" cert_path: certs/api.example.com.crt key_path: certs/api.example.com.key - provider: cloudflare - email: admin@api.example.com ``` -### Extra Provider Setup +## Dependency and Integration Map -Extra providers are initialized atomically within `NewProvider()`: +### External Dependencies -```mermaid -flowchart TD - A[NewProvider] --> B{Merge Config with Extra} - B --> C[Create Provider per Extra] - C --> D[Build SNI Matcher] - D --> E[Register in SNI Tree] +- `github.com/go-acme/lego/v4` - ACME protocol implementation +- `github.com/rs/zerolog` - Structured logging - style B fill:#1a2639,color:#fff - style C fill:#423300,color:#fff -``` +### Internal Dependencies -## Renewal Scheduling +- `internal/task/task.go` - Lifetime management +- `internal/notif/` - Renewal notifications +- `internal/config/` - Configuration loading +- `internal/dnsproviders/` - DNS provider implementations -### Renewal Timing +## Observability -- **Initial Check**: Certificate expiry is checked at startup -- **Renewal Window**: Renewal scheduled for 1 month before expiry -- **Cooldown on Failure**: 1-hour cooldown after failed renewal -- **Request Cooldown**: 15-second cooldown after startup (prevents rate limiting) -- **Force Renewal**: `forceRenewalCh` channel allows triggering immediate renewal +### Logs -### Force Renewal +| Level | When | +| ------- | ----------------------------- | +| `Info` | Certificate obtained/renewed | +| `Info` | Registration reused | +| `Warn` | Renewal failure | +| `Error` | Certificate retrieval failure | -The `forceRenewalCh` channel (buffered size 1) enables immediate certificate renewal on demand: +### Notifications -```go -// Trigger forced renewal for main and all extra providers -provider.ForceExpiryAll() -``` +- Certificate renewal success/failure +- Service startup with expiry dates -```mermaid -flowchart TD - A[Start] --> B[Calculate Renewal Time] - B --> C[expiry - 30 days] - C --> D[Start Timer] +## Security Considerations - D --> E{Event?} - E -->|forceRenewalCh| F[Force Renewal] - E -->|Timer| G[Check Failure Cooldown] - E -->|Context Done| H[Exit] +- Account private key stored at `certs/acme.key` (mode 0600) +- Certificate private keys stored at configured paths (mode 0600) +- Certificate files world-readable (mode 0644) +- ACME account email used for Let's Encrypt ToS +- EAB credentials for zero-touch enrollment - G --> H1{Recently Failed?} - H1 -->|Yes| I[Skip, Wait Next Event] - H1 -->|No| J[Attempt Renewal] +## Failure Modes and Recovery - J --> K{Renewal Success?} - K -->|Yes| L[Reset Failure, Notify Success] - K -->|No| M[Update Failure Time, Notify Failure] - - L --> N[Reset Timer] - I --> N - M --> D - - N --> D - - style F fill:#423300,color:#fff - style J fill:#423300,color:#fff - style K fill:#174014,color:#fff - style M fill:#432829,color:#fff -``` - -**Notifications:** Renewal success/failure triggers system notifications with provider name. - -### CertState - -Certificate state tracking: - -```go -const ( - CertStateValid // Certificate is valid and up-to-date - CertStateExpired // Certificate has expired or needs renewal - CertStateMismatch // Certificate domains don't match config -) -``` - -### RenewMode - -Controls renewal behavior: - -```go -const ( - renewModeForce // Force renewal, bypass cooldown and state check - renewModeIfNeeded // Renew only if expired or domain mismatch -) -``` - -## File Structure - -``` -internal/autocert/ -├── README.md # This file -├── config.go # Config struct and validation -├── provider.go # Provider implementation -├── setup.go # Extra provider setup -├── sni_matcher.go # SNI matching logic -├── providers.go # DNS provider registration -├── state.go # Certificate state enum -├── user.go # ACME user/account -├── paths.go # Default paths -└── types/ - └── provider.go # Provider interface -``` - -## Default Paths - -| Constant | Default Value | Description | -| -------------------- | ---------------- | ------------------------ | -| `CertFileDefault` | `certs/cert.crt` | Default certificate path | -| `KeyFileDefault` | `certs/priv.key` | Default private key path | -| `ACMEKeyFileDefault` | `certs/acme.key` | Default ACME account key | - -Failure tracking file is generated per-certificate: `/.last_failure-` - -## Error Handling - -The package uses structured error handling with `gperr`: - -- **ErrMissingField** - Required configuration field missing -- **ErrDuplicatedPath** - Duplicate certificate/key paths in extras -- **ErrInvalidDomain** - Invalid domain format -- **ErrUnknownProvider** - Unknown DNS provider -- **ErrGetCertFailure** - Certificate retrieval failed - -**Error Context:** All errors are prefixed with provider name ("main" or "extra[N]") via `fmtError()` for clear attribution. +| Failure Mode | Impact | Recovery | +| ------------------------------ | -------------------------- | ----------------------------- | +| DNS-01 challenge timeout | Certificate issuance fails | Check DNS provider API | +| Rate limiting (too many certs) | 1-hour cooldown | Wait or use different account | +| DNS provider API error | Renewal fails | 1-hour cooldown, retry | +| Certificate domains mismatch | Must re-obtain | Force renewal via API | +| Account key corrupted | Must register new account | New key, may lose certs | ### Failure Tracking -Last failure is persisted per-certificate to prevent rate limiting: +Last failure persisted per-certificate to prevent rate limiting: + +``` +File: /.last_failure- +Where hash = SHA256(certPath|keyPath)[:6] +``` + +## Usage Examples + +### Initial Setup ```go -// File: /.last_failure- where hash is SHA256(certPath|keyPath)[:6] -``` - -**Cooldown Checks:** Last failure is checked in `obtainCertIfNotExists()` (15-second startup cooldown) and `renew()` (1-hour failure cooldown). The `renewModeForce` bypasses cooldown checks entirely. - -## Integration with GoDoxy - -The autocert package integrates with GoDoxy's configuration system: - -```mermaid -flowchart LR - subgraph Config - direction TB - A[config.yml] --> B[Parse Config] - B --> C[AutoCert Config] - end - - subgraph State - C --> D[NewProvider] - D --> E[Schedule Renewal] - E --> F[Set Active Provider] - end - - subgraph Server - F --> G[TLS Handshake] - G --> H[GetCert via SNI] - H --> I[Return Certificate] - end -``` - -### REST API - -Force certificate renewal via WebSocket endpoint: - -| Endpoint | Method | Description | -| -------------------- | ------ | ----------------------------------------- | -| `/api/v1/cert/renew` | GET | Triggers `ForceExpiryAll()` via WebSocket | - -The endpoint streams live logs during the renewal process. - -## Usage Example - -```yaml -# config/config.yml -autocert: - provider: cloudflare - email: admin@example.com - domains: - - example.com - - '*.example.com' - options: - CF_API_TOKEN: ${CF_API_TOKEN} - resolvers: - - 1.1.1.1:53 - - 8.8.8.8:53 -``` - -```go -// In config initialization autocertCfg := state.AutoCert user, legoCfg, err := autocertCfg.GetLegoConfig() if err != nil { @@ -558,3 +320,21 @@ if err := provider.ObtainCertIfNotExistsAll(); err != nil { provider.ScheduleRenewalAll(state.Task()) provider.PrintCertExpiriesAll() ``` + +### Force Renewal via API + +```go +// WebSocket endpoint: GET /api/v1/cert/renew +if provider.ForceExpiryAll() { + // Wait for renewal to complete + provider.WaitRenewalDone(ctx) +} +``` + +## Testing Notes + +- `config_test.go` - Configuration validation +- `provider_test/` - Provider functionality tests +- `sni_test.go` - SNI matching tests +- `multi_cert_test.go` - Extra provider tests +- Integration tests require mock DNS provider diff --git a/internal/config/README.md b/internal/config/README.md new file mode 100644 index 00000000..5ad73378 --- /dev/null +++ b/internal/config/README.md @@ -0,0 +1,316 @@ +# Configuration Management + +Centralized YAML configuration management with thread-safe state access and provider initialization. + +## Overview + +The config package implements the core configuration management system for GoDoxy, handling YAML configuration loading, provider initialization, route loading, and state transitions. It uses atomic pointers for thread-safe state access and integrates all configuration components. + +### Primary consumers + +- `cmd/main.go` - Initializes configuration state on startup +- `internal/route/provider` - Accesses configuration for route creation +- `internal/api/v1` - Exposes configuration via REST API +- All packages that need to access active configuration + +### Non-goals + +- Dynamic provider registration after initialization (require config reload) + +### Stability + +Stable internal package. Public API consists of `State` interface and state management functions. + +## Public API + +### Exported types + +```go +type Config struct { + ACL *acl.Config + AutoCert *autocert.Config + Entrypoint entrypoint.Config + Providers Providers + MatchDomains []string + Homepage homepage.Config + Defaults Defaults + TimeoutShutdown int +} + +type Providers struct { + Files []string + Docker map[string]types.DockerProviderConfig + Agents []*agent.AgentConfig + Notification []*notif.NotificationConfig + Proxmox []proxmox.Config + MaxMind *maxmind.Config +} +``` + +### State interface + +```go +type State interface { + Task() *task.Task + Context() context.Context + Value() *Config + EntrypointHandler() http.Handler + ShortLinkMatcher() config.ShortLinkMatcher + AutoCertProvider() server.CertProvider + LoadOrStoreProvider(key string, value types.RouteProvider) (actual types.RouteProvider, loaded bool) + DeleteProvider(key string) + IterProviders() iter.Seq2[string, types.RouteProvider] + StartProviders() error + NumProviders() int +} +``` + +### Exported functions + +```go +func NewState() config.State +``` + +Creates a new configuration state with empty providers map. + +```go +func GetState() config.State +``` + +Returns the active configuration state. Thread-safe via atomic load. + +```go +func SetState(state config.State) +``` + +Sets the active configuration state. Also updates active configs for ACL, entrypoint, homepage, and autocert. + +```go +func HasState() bool +``` + +Returns true if a state is currently active. + +```go +func Value() *config.Config +``` + +Returns the current configuration values. + +```go +func (state *state) InitFromFile(filename string) error +``` + +Initializes state from a YAML file. Uses default config if file doesn't exist. + +```go +func (state *state) Init(data []byte) error +``` + +Initializes state from raw YAML data. Validates, then initializes MaxMind, Proxmox, providers, AutoCert, notifications, access logger, and entrypoint. + +```go +func (state *state) StartProviders() error +``` + +Starts all route providers concurrently. + +```go +func (state *state) IterProviders() iter.Seq2[string, types.RouteProvider] +``` + +Returns an iterator over all providers. + +## Architecture + +### Core components + +```mermaid +graph TD + A[config.yml] --> B[State] + B --> C{Initialize} + C --> D[Validate YAML] + C --> E[Init MaxMind] + C --> F[Init Proxmox] + C --> G[Load Route Providers] + C --> H[Init AutoCert] + C --> I[Init Notifications] + C --> J[Init Entrypoint] + + K[ActiveConfig] -.-> B + + subgraph Providers + G --> L[Docker Provider] + G --> M[File Provider] + G --> N[Agent Provider] + end + + subgraph State Management + B --> O[xsync.Map Providers] + B --> P[Entrypoint] + B --> Q[AutoCert Provider] + B --> R[task.Task] + end +``` + +### Initialization pipeline + +```mermaid +sequenceDiagram + participant YAML + participant State + participant MaxMind + participant Proxmox + participant Providers + participant AutoCert + participant Notif + participant Entrypoint + + YAML->>State: Parse & Validate + par Initialize in parallel + State->>MaxMind: Initialize + State->>Proxmox: Initialize + and + State->>Providers: Load Route Providers + Providers->>State: Store Providers + end + State->>AutoCert: Initialize + State->>Notif: Initialize + State->>Entrypoint: Configure + State->>State: Start Providers +``` + +### Thread safety model + +```go +var stateMu sync.RWMutex + +func GetState() config.State { + return config.ActiveState.Load() +} + +func SetState(state config.State) { + stateMu.Lock() + defer stateMu.Unlock() + config.ActiveState.Store(state) +} +``` + +Uses `sync.RWMutex` for write synchronization and `sync/atomic` for read operations. + +## Configuration Surface + +### Config sources + +Configuration is loaded from `config/config.yml`. + +### Hot-reloading + +Configuration supports hot-reloading via editing `config/config.yml`. + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/acl` - Access control configuration +- `internal/autocert` - SSL certificate management +- `internal/entrypoint` - HTTP entrypoint setup +- `internal/route/provider` - Route providers (Docker, file, agent) +- `internal/maxmind` - GeoIP configuration +- `internal/notif` - Notification providers +- `internal/proxmox` - LXC container management +- `internal/homepage/types` - Dashboard configuration +- `github.com/yusing/goutils/task` - Object lifecycle management + +### External dependencies + +- `github.com/goccy/go-yaml` - YAML parsing +- `github.com/puzpuzpuz/xsync/v4` - Concurrent map + +### Integration points + +```go +// API uses config/query to access state +providers := statequery.RouteProviderList() + +// Route providers access config state +for _, p := range config.GetState().IterProviders() { + // Process provider +} +``` + +## Observability + +### Logs + +- Configuration parsing and validation errors +- Provider initialization results +- Route loading summary +- Full configuration dump (at debug level) + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- Configuration file permissions should be restricted (contains secrets) +- TLS certificates are loaded from files specified in config +- Agent credentials are passed via configuration +- No secrets are logged (except in debug mode with full config dump) + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| ----------------------------- | ----------------------------------- | -------------------------- | +| Invalid YAML | Init returns error | Fix YAML syntax | +| Missing required fields | Validation fails | Add required fields | +| Provider initialization fails | Error aggregated and returned | Fix provider configuration | +| Duplicate provider key | Error logged, first provider kept | Rename provider | +| Route loading fails | Error aggregated, other routes load | Fix route configuration | + +## Performance Characteristics + +- Providers are loaded concurrently +- Routes are loaded concurrently per provider +- State access is lock-free for reads +- Atomic pointer for state swap + +## Usage Examples + +### Loading configuration + +```go +state := config.NewState() +err := state.InitFromFile("config.yml") +if err != nil { + log.Fatal(err) +} + +config.SetState(state) +``` + +### Accessing configuration + +```go +if config.HasState() { + cfg := config.Value() + log.Printf("Entrypoint middleware count: %d", len(cfg.Entrypoint.Middlewares)) + log.Printf("Docker providers: %d", len(cfg.Providers.Docker)) +} +``` + +### Iterating providers + +```go +for name, provider := range config.GetState().IterProviders() { + log.Printf("Provider: %s, Routes: %d", name, provider.NumRoutes()) +} +``` + +### Accessing entrypoint handler + +```go +state := config.GetState() +http.Handle("/", state.EntrypointHandler()) +``` diff --git a/internal/config/query/README.md b/internal/config/query/README.md new file mode 100644 index 00000000..52427323 --- /dev/null +++ b/internal/config/query/README.md @@ -0,0 +1,226 @@ +# Configuration Query + +Read-only access to the active configuration state, including route providers and system statistics. + +## Overview + +The `internal/config/query` package offers read-only access to the active configuration state. It provides functions to dump route providers, list providers, search for routes, and retrieve system statistics. This package is primarily used by the API layer to expose configuration information. + +### Primary consumers + +- `internal/api/v1` - REST API endpoints for configuration queries +- `internal/homepage` - Dashboard statistics display +- Operators - CLI tools and debugging interfaces + +### Non-goals + +- Configuration modification (see `internal/config`) +- Provider lifecycle management +- Dynamic state updates + +### Stability + +Stable internal package. Functions are simple read-only accessors. + +## Public API + +### Exported types + +```go +type RouteProviderListResponse struct { + ShortName string `json:"short_name"` + FullName string `json:"full_name"` +} +``` + +```go +type Statistics struct { + Total uint16 `json:"total"` + ReverseProxies types.RouteStats `json:"reverse_proxies"` + Streams types.RouteStats `json:"streams"` + Providers map[string]types.ProviderStats `json:"providers"` +} +``` + +### Exported functions + +```go +func DumpRouteProviders() map[string]types.RouteProvider +``` + +Returns all route providers as a map keyed by their short name. Thread-safe access via `config.ActiveState.Load()`. + +```go +func RouteProviderList() []RouteProviderListResponse +``` + +Returns a list of route providers with their short and full names. Useful for API responses. + +```go +func SearchRoute(alias string) types.Route +``` + +Searches for a route by alias across all providers. Returns `nil` if not found. + +```go +func GetStatistics() Statistics +``` + +Aggregates statistics from all route providers, including total routes, reverse proxies, streams, and per-provider stats. + +## Architecture + +### Core components + +``` +config/query/ +├── query.go # Provider and route queries +└── stats.go # Statistics aggregation +``` + +### Data flow + +```mermaid +graph TD + A[API Request] --> B[config/query Functions] + B --> C{Query Type} + C -->|Provider List| D[ActiveState.Load] + C -->|Route Search| E[Iterate Providers] + C -->|Statistics| F[Aggregate from All Providers] + D --> G[Return Provider Data] + E --> H[Return Found Route or nil] + F --> I[Return Statistics] +``` + +### Thread safety model + +All functions use `config.ActiveState.Load()` for thread-safe read access: + +```go +func DumpRouteProviders() map[string]types.RouteProvider { + state := config.ActiveState.Load() + entries := make(map[string]types.RouteProvider, state.NumProviders()) + for _, p := range state.IterProviders() { + entries[p.ShortName()] = p + } + return entries +} +``` + +## Configuration Surface + +No configuration. This package only reads from the active state. + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/config/types` - `ActiveState` atomic pointer and `State` interface +- `internal/types` - Route provider and route types + +### Integration points + +```go +// API endpoint uses query functions +func ListProviders(w http.ResponseWriter, r *http.Request) { + providers := statequery.RouteProviderList() + json.NewEncoder(w).Encode(providers) +} +``` + +## Observability + +### Logs + +No logging in the query package itself. + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- Read-only access prevents state corruption +- No sensitive data is exposed beyond what the configuration already contains +- Caller should handle nil state gracefully + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| -------------------- | -------------------------- | ------------------------------ | +| No active state | Functions return empty/nil | Initialize config first | +| Provider returns nil | Skipped in iteration | Provider should not return nil | +| Route not found | Returns nil | Expected behavior | + +## Performance Characteristics + +- O(n) where n is number of providers for provider queries +- O(n * m) where m is routes per provider for route search +- O(n) for statistics aggregation +- No locking required (uses atomic load) + +## Usage Examples + +### Listing all providers + +```go +providers := statequery.RouteProviderList() +for _, p := range providers { + fmt.Printf("Short: %s, Full: %s\n", p.ShortName, p.FullName) +} +``` + +### Getting all providers as a map + +```go +providers := statequery.DumpRouteProviders() +for shortName, provider := range providers { + fmt.Printf("%s: %s\n", shortName, provider.String()) +} +``` + +### Searching for a route + +```go +route := statequery.SearchRoute("my-service") +if route != nil { + fmt.Printf("Found route: %s\n", route.Alias()) +} +``` + +### Getting system statistics + +```go +stats := statequery.GetStatistics() +fmt.Printf("Total routes: %d\n", stats.Total) +fmt.Printf("Reverse proxies: %d\n", stats.ReverseProxies.Total) +for name, providerStats := range stats.Providers { + fmt.Printf("Provider %s: %d routes\n", name, providerStats.RPs.Total) +} +``` + +### Integration with API + +```go +func handleGetProviders(w http.ResponseWriter, r *http.Request) { + providers := statequery.RouteProviderList() + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(providers) +} + +func handleGetStats(w http.ResponseWriter, r *http.Request) { + stats := statequery.GetStatistics() + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(stats) +} + +func handleFindRoute(w http.ResponseWriter, r *http.Request) { + alias := r.URL.Query().Get("alias") + route := statequery.SearchRoute(alias) + if route == nil { + http.NotFound(w, r) + return + } + json.NewEncoder(w).Encode(route) +} +``` diff --git a/internal/dnsproviders/README.md b/internal/dnsproviders/README.md new file mode 100644 index 00000000..0d79a718 --- /dev/null +++ b/internal/dnsproviders/README.md @@ -0,0 +1,257 @@ +# DNS Providers + +DNS provider integrations for Let's Encrypt certificate management via the lego library. + +## Overview + +The dnsproviders package registers and initializes DNS providers supported by the ACME protocol implementation (lego). It provides a unified interface for configuring DNS-01 challenge providers for SSL certificate issuance. + +### Primary consumers + +- `internal/autocert` - Uses registered providers for certificate issuance +- Operators - Configure DNS providers via YAML + +### Non-goals + +- DNS zone management +- Record creation/deletion outside ACME challenges +- Provider-specific features beyond DNS-01 + +### Stability + +Stable internal package. Provider registry is extensible. + +## Public API + +### Exported constants + +```go +const ( + Local = "local" // Dummy local provider for static certificates + Pseudo = "pseudo" // Pseudo provider for testing +) +``` + +### Exported functions + +```go +func InitProviders() +``` + +Registers all available DNS providers with the autocert package. Called during initialization. + +```go +func NewDummyDefaultConfig() *Config +``` + +Creates a dummy default config for testing providers. + +```go +func NewDummyDNSProviderConfig() map[string]any +``` + +Creates a dummy provider configuration for testing. + +## Architecture + +### Core components + +```mermaid +graph TD + A[AutoCert] --> B[DNS Provider Registry] + B --> C[Provider Factory] + C --> D[Lego DNS Provider] + + subgraph Supported Providers + E[Cloudflare] + F[AWS Route53] + G[DigitalOcean] + H[Google Cloud DNS] + I[And 20+ more...] + end + + B --> E + B --> F + B --> G + B --> H + B --> I +``` + +### Supported providers + +| Provider | Key | Description | +| -------------- | --------------- | --------------------- | +| ACME DNS | `acmedns` | ACME DNS server | +| Azure DNS | `azuredns` | Microsoft Azure DNS | +| Cloudflare | `cloudflare` | Cloudflare DNS | +| CloudNS | `cloudns` | ClouDNS | +| CloudDNS | `clouddns` | Google Cloud DNS | +| DigitalOcean | `digitalocean` | DigitalOcean DNS | +| DuckDNS | `duckdns` | DuckDNS | +| EdgeDNS | `edgedns` | Akamai EdgeDNS | +| GoDaddy | `godaddy` | GoDaddy DNS | +| Google Domains | `googledomains` | Google Domains DNS | +| Hetzner | `hetzner` | Hetzner DNS | +| Hostinger | `hostinger` | Hostinger DNS | +| HTTP Request | `httpreq` | Generic HTTP provider | +| INWX | `inwx` | INWX DNS | +| IONOS | `ionos` | IONOS DNS | +| Linode | `linode` | Linode DNS | +| Namecheap | `namecheap` | Namecheap DNS | +| Netcup | `netcup` | netcup DNS | +| Netlify | `netlify` | Netlify DNS | +| OVH | `ovh` | OVHcloud DNS | +| Oracle Cloud | `oraclecloud` | Oracle Cloud DNS | +| Porkbun | `porkbun` | Porkbun DNS | +| RFC 2136 | `rfc2136` | BIND/named (RFC 2136) | +| Scaleway | `scaleway` | Scaleway DNS | +| SpaceShip | `spaceship` | SpaceShip DNS | +| Timeweb Cloud | `timewebcloud` | Timeweb Cloud DNS | +| Vercel | `vercel` | Vercel DNS | +| Vultr | `vultr` | Vultr DNS | +| Google Cloud | `gcloud` | Google Cloud DNS | + +## Configuration Surface + +### Config sources + +Configuration is loaded from `config/config.yml` under the `autocert` key. + +### Schema + +```yaml +autocert: + provider: cloudflare + email: admin@example.com + domains: + - example.com + - "*.example.com" + options: # provider-specific options + auth_token: your-api-token +``` + +### Hot-reloading + +Supports hot-reloading via editing `config/config.yml`. + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/autocert` - Provider registry and certificate issuance + +### External dependencies + +- `github.com/go-acme/lego/v4/providers/dns/*` - All lego DNS providers + +### Integration points + +```go +// In autocert package +var Providers = map[string]DNSProvider{ + "local": dnsproviders.NewDummyDefaultConfig, + "pseudo": dnsproviders.NewDummyDefaultConfig, + // ... registered providers +} + +type DNSProvider func(*any, ...any) (provider.Config, error) +``` + +## Observability + +### Logs + +- Provider initialization messages from lego +- DNS challenge validation attempts +- Certificate issuance progress + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- API credentials are passed to provider configuration +- Credentials are stored in configuration files (should be protected) +- DNS-01 challenge requires TXT record creation capability +- Provider should have minimal DNS permissions (only TXT records) + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| --------------------- | --------------------------- | -------------------------------------- | +| Invalid credentials | Provider returns error | Verify credentials | +| DNS propagation delay | Challenge fails temporarily | Retry with longer propagation time | +| Provider unavailable | Certificate issuance fails | Use alternative provider | +| Unsupported provider | Key not found in registry | Register provider or use supported one | + +## Performance Characteristics + +- Provider initialization is O(1) per provider +- DNS-01 challenge depends on DNS propagation time +- Certificate issuance may take several seconds + +## Usage Examples + +### Initialization + +```go +import "github.com/yusing/godoxy/internal/dnsproviders" + +func init() { + dnsproviders.InitProviders() +} +``` + +### Using with AutoCert + +```go +import "github.com/yusing/godoxy/internal/autocert" + +// Providers are automatically registered +providers := autocert.Providers + +provider, ok := providers["cloudflare"] +if !ok { + log.Fatal("Cloudflare provider not available") +} + +config := provider.DefaultConfig() +``` + +### Getting provider configuration + +```go +// Access registered providers +for name, factory := range autocert.Providers { + cfg := factory.DefaultConfig() + log.Printf("Provider %s: %+v", name, cfg) +} +``` + +### Certificate issuance flow + +```mermaid +sequenceDiagram + participant User + participant AutoCert + participant DNSProvider + participant DNS + participant LetsEncrypt + + User->>AutoCert: Request Certificate + AutoCert->>DNSProvider: Get DNS Config + DNSProvider-->>AutoCert: Provider Config + + AutoCert->>LetsEncrypt: DNS-01 Challenge + LetsEncrypt->>DNS: Verify TXT Record + DNS-->>LetsEncrypt: Verification Result + + alt Verification Successful + LetsEncrypt-->>AutoCert: Certificate + AutoCert-->>User: TLS Certificate + else Verification Failed + LetsEncrypt-->>AutoCert: Error + AutoCert-->>User: Error + end +``` diff --git a/internal/docker/README.md b/internal/docker/README.md new file mode 100644 index 00000000..3cb2e89f --- /dev/null +++ b/internal/docker/README.md @@ -0,0 +1,433 @@ +# Docker Integration + +Docker container discovery, connection management, and label-based route configuration. + +## Overview + +The docker package implements Docker container integration, providing shared client connections, container parsing from Docker API responses, label processing for route configuration, and container filtering capabilities. + +### Primary consumers + +- `internal/route/provider` - Creates Docker-based route providers +- `internal/idlewatcher` - Container idle detection +- Operators - Configure routes via Docker labels + +### Non-goals + +- Docker image building or management +- Container lifecycle operations (start/stop) +- Volume management +- Docker Swarm orchestration + +### Stability + +Stable internal package. Public API consists of client management and container parsing functions. + +## Public API + +### Exported types + +```go +type SharedClient struct { + *client.Client + cfg types.DockerProviderConfig + refCount atomic.Int32 + closedOn atomic.Int64 + key string + addr string + dial func(ctx context.Context) (net.Conn, error) + unique bool +} +``` + +```go +type Container struct { + DockerCfg types.DockerProviderConfig + Image Image + ContainerName string + ContainerID string + Labels map[string]string + ActualLabels map[string]string + Mounts []Mount + Network string + PublicPortMapping map[int]PortSummary + PrivatePortMapping map[int]PortSummary + Aliases []string + IsExcluded bool + IsExplicit bool + IsHostNetworkMode bool + Running bool + State string + PublicHostname string + PrivateHostname string + Agent *agentpool.Agent + IdlewatcherConfig *IdlewatcherConfig +} +``` + +### Exported functions + +```go +func NewClient(cfg types.DockerProviderConfig, unique ...bool) (*SharedClient, error) +``` + +Creates or returns a Docker client. Reuses existing clients for the same URL. Thread-safe. + +```go +func Clients() map[string]*SharedClient +``` + +Returns all currently connected clients. Callers must close returned clients. + +```go +func FromDocker(c *container.Summary, dockerCfg types.DockerProviderConfig) *types.Container +``` + +Converts Docker API container summary to internal container type. Parses labels for route configuration. + +```go +func UpdatePorts(ctx context.Context, c *Container) error +``` + +Refreshes port mappings from container inspect. + +```go +func DockerComposeProject(c *Container) string +``` + +Returns the Docker Compose project name. + +```go +func DockerComposeService(c *Container) string +``` + +Returns the Docker Compose service name. + +```go +func Dependencies(c *Container) []string +``` + +Returns container dependencies from labels. + +```go +func IsBlacklisted(c *Container) bool +``` + +Checks if container should be excluded from routing. + +## Architecture + +### Core components + +```mermaid +graph TD + A[Docker API] --> B[SharedClient Pool] + B --> C{Client Request} + C -->|New Client| D[Create Connection] + C -->|Existing| E[Increment RefCount] + + F[Container List] --> G[FromDocker Parser] + G --> H[Container Struct] + H --> I[Route Builder] + + J[Container Labels] --> K[Label Parser] + K --> L[Route Config] + + subgraph Client Pool + B --> M[clientMap] + N[Cleaner Goroutine] + end +``` + +### Client lifecycle + +```mermaid +stateDiagram-v2 + [*] --> New: NewClient() called + New --> Shared: Refcount = 1, stored in pool + Shared --> Shared: Same URL, increment refcount + Shared --> Idle: Close() called, refcount = 0 + Idle --> Closed: 10s timeout elapsed + Idle --> Shared: NewClient() for same URL + Closed --> [*]: Client closed + Unique --> [*]: Close() immediately +``` + +### Container parsing flow + +```mermaid +sequenceDiagram + participant Provider + participant SharedClient + participant DockerAPI + participant ContainerParser + participant RouteBuilder + + Provider->>SharedClient: NewClient(cfg) + SharedClient->>SharedClient: Check Pool + alt Existing Client + SharedClient->>SharedClient: Increment RefCount + else New Client + SharedClient->>DockerAPI: Connect + DockerAPI-->>SharedClient: Client + end + + Provider->>SharedClient: ListContainers() + SharedClient->>DockerAPI: GET /containers/json + DockerAPI-->>SharedClient: Container List + SharedClient-->>Provider: Container List + + loop For Each Container + Provider->>ContainerParser: FromDocker() + ContainerParser->>ContainerParser: Parse Labels + ContainerParser->>ContainerParser: Resolve Hostnames + ContainerParser-->>Provider: *Container + end + + Provider->>RouteBuilder: Create Routes + RouteBuilder-->>Provider: Routes +``` + +### Client pool management + +The docker package maintains a pool of shared clients: + +```go +var ( + clientMap = make(map[string]*SharedClient, 10) + clientMapMu sync.RWMutex +) + +func initClientCleaner() { + cleaner := task.RootTask("docker_clients_cleaner", true) + go func() { + ticker := time.NewTicker(cleanInterval) + for { + select { + case <-ticker.C: + closeTimedOutClients() + case <-cleaner.Context().Done(): + // Cleanup all clients + } + } + }() +} +``` + +## Configuration Surface + +### Docker provider configuration + +```yaml +providers: + docker: + local: ${DOCKER_HOST} + remote1: + scheme: tcp + host: docker1.local + port: 2375 + remote2: + scheme: tls + host: docker2.local + port: 2375 + tls: + ca_file: /path/to/ca.pem + cert_file: /path/to/cert.pem + key_file: /path/to/key.pem +``` + +### Route configuration labels + +Route labels use the format `proxy..` where `` is the route alias (or `*` for wildcard). The base labels apply to all routes. + +| Label | Description | Example | +| ---------------------- | ------------------------------- | ------------------------------- | +| `proxy.aliases` | Route aliases (comma-separated) | `proxy.aliases: www,app` | +| `proxy.exclude` | Exclude from routing | `proxy.exclude: true` | +| `proxy.network` | Docker network | `proxy.network: frontend` | +| `proxy..host` | Override hostname | `proxy.app.host: 192.168.1.100` | +| `proxy..port` | Target port | `proxy.app.port: 8080` | +| `proxy..scheme` | HTTP scheme | `proxy.app.scheme: https` | +| `proxy..*` | Any route-specific setting | `proxy.app.no_tls_verify: true` | + +#### Wildcard alias + +Use `proxy.*.` to apply settings to all routes: + +```yaml +labels: + proxy.aliases: app1,app2 + proxy.*.scheme: https + proxy.app1.port: 3000 # overrides wildcard +``` + +### Idle watcher labels + +| Label | Description | Example | +| ----------------------- | ------------------------------- | ---------------------------------- | +| `proxy.idle_timeout` | Idle timeout duration | `proxy.idle_timeout: 30m` | +| `proxy.wake_timeout` | Max time to wait for wake | `proxy.wake_timeout: 10s` | +| `proxy.stop_method` | Stop method (pause, stop, kill) | `proxy.stop_method: stop` | +| `proxy.stop_signal` | Signal to send (e.g., SIGTERM) | `proxy.stop_signal: SIGTERM` | +| `proxy.stop_timeout` | Stop timeout in seconds | `proxy.stop_timeout: 30` | +| `proxy.depends_on` | Container dependencies | `proxy.depends_on: database` | +| `proxy.start_endpoint` | Optional path restriction | `proxy.start_endpoint: /api/ready` | +| `proxy.no_loading_page` | Skip loading page | `proxy.no_loading_page: true` | + +### Docker Compose labels + +Those are created by Docker Compose. + +| Label | Description | +| ------------------------------- | -------------------- | +| `com.docker.compose.project` | Compose project name | +| `com.docker.compose.service` | Service name | +| `com.docker.compose.depends_on` | Dependencies | + +## Dependency and Integration Map + +### Internal dependencies + +- `internal/agentpool` - Agent-based Docker host connections +- `internal/maxmind` - Container geolocation +- `internal/types` - Container and provider types +- `internal/task/task.go` - Lifetime management + +### External dependencies + +- `github.com/docker/cli/cli/connhelper` - Connection helpers +- `github.com/moby/moby/client` - Docker API client +- `github.com/docker/go-connections/nat` - Port parsing + +### Integration points + +```go +// Route provider uses docker for container discovery +client, err := docker.NewClient(cfg) +containers, err := client.ContainerList(ctx, container.ListOptions{}) + +for _, c := range containers { + container := docker.FromDocker(c, cfg) + // Create routes from container +} +``` + +## Observability + +### Logs + +- Client initialization and cleanup +- Connection errors +- Container parsing errors + +### Metrics + +No metrics are currently exposed. + +## Security Considerations + +- Docker socket access requires proper permissions +- TLS certificates for remote connections +- Agent-based connections are authenticated via TLS +- Database containers are automatically blacklisted + +### Blacklist detection + +Containers are automatically blacklisted if they: + +- Mount database directories: + - `/var/lib/postgresql/data` + - `/var/lib/mysql` + - `/var/lib/mongodb` + - `/var/lib/mariadb` + - `/var/lib/memcached` + - `/var/lib/rabbitmq` +- Expose database ports: + - 5432 (PostgreSQL) + - 3306 (MySQL/MariaDB) + - 6379 (Redis) + - 11211 (Memcached) + - 27017 (MongoDB) + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| -------------------------- | ---------------------------- | ------------------------ | +| Docker socket inaccessible | NewClient returns error | Fix socket permissions | +| Remote connection failed | NewClient returns error | Check network/tls config | +| Container inspect failed | UpdatePorts returns error | Container may be stopped | +| Invalid labels | Container created with error | Fix label syntax | +| Agent not found | Panic during client creation | Add agent to pool | + +## Performance Characteristics + +- Client pooling reduces connection overhead +- Reference counting prevents premature cleanup +- Background cleaner removes idle clients after 10s +- O(n) container parsing where n is container count + +## Usage Examples + +### Creating a Docker client + +```go +dockerCfg := types.DockerProviderConfig{ + URL: "unix:///var/run/docker.sock", +} + +client, err := docker.NewClient(dockerCfg) +if err != nil { + log.Fatal(err) +} +defer client.Close() +``` + +### Using unique client + +```go +// Create a unique client that won't be shared +client, err := docker.NewClient(cfg, true) +if err != nil { + log.Fatal(err) +} +// Remember to close when done +client.Close() +``` + +### Getting all clients + +```go +clients := docker.Clients() +for host, client := range clients { + log.Printf("Connected to: %s", host) +} +// Use clients... +// Close all clients when done +for _, client := range clients { + client.Close() +} +``` + +### Parsing containers + +```go +containers, err := dockerClient.ContainerList(ctx, container.ListOptions{}) +for _, c := range containers { + container := docker.FromDocker(c, dockerCfg) + if container.Errors != nil { + log.Printf("Container %s has errors: %v", container.ContainerName, container.Errors) + continue + } + log.Printf("Container: %s, Aliases: %v", container.ContainerName, container.Aliases) +} +``` + +### Checking if container is blacklisted + +```go +container := docker.FromDocker(c, dockerCfg) +if docker.IsBlacklisted(container) { + log.Printf("Container %s is blacklisted, skipping", container.ContainerName) + continue +} +``` diff --git a/internal/entrypoint/README.md b/internal/entrypoint/README.md new file mode 100644 index 00000000..bef905a1 --- /dev/null +++ b/internal/entrypoint/README.md @@ -0,0 +1,308 @@ +# Entrypoint + +The entrypoint package provides the main HTTP entry point for GoDoxy, handling domain-based routing, middleware application, short link matching, and access logging. + +## Overview + +The entrypoint package implements the primary HTTP handler that receives all incoming requests, determines the target route based on hostname, applies middleware, and forwards requests to the appropriate route handler. + +### Key Features + +- Domain-based route lookup with subdomain support +- Short link (`go/` domain) handling +- Middleware chain application +- Access logging for all requests +- Configurable not-found handling +- Per-domain route resolution + +## Architecture + +```mermaid +graph TD + A[HTTP Request] --> B[Entrypoint Handler] + B --> C{Access Logger?} + C -->|Yes| D[Wrap Response Recorder] + C -->|No| E[Skip Logging] + + D --> F[Find Route by Host] + E --> F + + F --> G{Route Found?} + G -->|Yes| H{Middleware?} + G -->|No| I{Short Link?} + I -->|Yes| J[Short Link Handler] + I -->|No| K{Not Found Handler?} + K -->|Yes| L[Not Found Handler] + K -->|No| M[Serve 404] + + H -->|Yes| N[Apply Middleware] + H -->|No| O[Direct Route] + N --> O + + O --> P[Route ServeHTTP] + P --> Q[Response] + + L --> R[404 Response] + J --> Q + M --> R +``` + +## Core Components + +### Entrypoint Structure + +```go +type Entrypoint struct { + middleware *middleware.Middleware + notFoundHandler http.Handler + accessLogger accesslog.AccessLogger + findRouteFunc func(host string) types.HTTPRoute + shortLinkTree *ShortLinkMatcher +} +``` + +### Active Config + +```go +var ActiveConfig atomic.Pointer[entrypoint.Config] +``` + +## Public API + +### Creation + +```go +// NewEntrypoint creates a new entrypoint instance. +func NewEntrypoint() Entrypoint +``` + +### Configuration + +```go +// SetFindRouteDomains configures domain-based route lookup. +func (ep *Entrypoint) SetFindRouteDomains(domains []string) + +// SetMiddlewares loads and configures middleware chain. +func (ep *Entrypoint) SetMiddlewares(mws []map[string]any) error + +// SetNotFoundRules configures the not-found handler. +func (ep *Entrypoint) SetNotFoundRules(rules rules.Rules) + +// SetAccessLogger initializes access logging. +func (ep *Entrypoint) SetAccessLogger(parent task.Parent, cfg *accesslog.RequestLoggerConfig) error + +// ShortLinkMatcher returns the short link matcher. +func (ep *Entrypoint) ShortLinkMatcher() *ShortLinkMatcher +``` + +### Request Handling + +```go +// ServeHTTP is the main HTTP handler. +func (ep *Entrypoint) ServeHTTP(w http.ResponseWriter, r *http.Request) + +// FindRoute looks up a route by hostname. +func (ep *Entrypoint) FindRoute(s string) types.HTTPRoute +``` + +## Usage + +### Basic Setup + +```go +ep := entrypoint.NewEntrypoint() + +// Configure domain matching +ep.SetFindRouteDomains([]string{".example.com", "example.com"}) + +// Configure middleware +err := ep.SetMiddlewares([]map[string]any{ + {"rate_limit": map[string]any{"requests_per_second": 100}}, +}) +if err != nil { + log.Fatal(err) +} + +// Configure access logging +err = ep.SetAccessLogger(parent, &accesslog.RequestLoggerConfig{ + Path: "/var/log/godoxy/access.log", +}) +if err != nil { + log.Fatal(err) +} + +// Start server +http.ListenAndServe(":80", &ep) +``` + +### Route Lookup Logic + +The entrypoint uses multiple strategies to find routes: + +1. **Subdomain Matching**: For `sub.domain.com`, looks for `sub` +1. **Exact Match**: Looks for the full hostname +1. **Port Stripping**: Strips port from host if present + +```go +func findRouteAnyDomain(host string) types.HTTPRoute { + // Try subdomain (everything before first dot) + idx := strings.IndexByte(host, '.') + if idx != -1 { + target := host[:idx] + if r, ok := routes.HTTP.Get(target); ok { + return r + } + } + + // Try exact match + if r, ok := routes.HTTP.Get(host); ok { + return r + } + + // Try stripping port + if before, _, ok := strings.Cut(host, ":"); ok { + if r, ok := routes.HTTP.Get(before); ok { + return r + } + } + + return nil +} +``` + +### Short Links + +Short links use a special `.short` domain: + +```go +// Request to: https://abc.short.example.com +// Looks for route with alias "abc" +if strings.EqualFold(host, common.ShortLinkPrefix) { + // Handle short link + ep.shortLinkTree.ServeHTTP(w, r) +} +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant Client + participant Entrypoint + participant Middleware + participant Route + participant Logger + + Client->>Entrypoint: GET /path + Entrypoint->>Entrypoint: FindRoute(host) + alt Route Found + Entrypoint->>Logger: Get ResponseRecorder + Logger-->>Entrypoint: Recorder + Entrypoint->>Middleware: ServeHTTP(routeHandler) + alt Has Middleware + Middleware->>Middleware: Process Chain + end + Middleware->>Route: Forward Request + Route-->>Middleware: Response + Middleware-->>Entrypoint: Response + else Short Link + Entrypoint->>ShortLinkTree: Match short code + ShortLinkTree-->>Entrypoint: Redirect + else Not Found + Entrypoint->>NotFoundHandler: Serve 404 + NotFoundHandler-->>Entrypoint: 404 Page + end + + Entrypoint->>Logger: Log Request + Logger-->>Entrypoint: Complete + Entrypoint-->>Client: Response +``` + +## Not-Found Handling + +When no route is found, the entrypoint: + +1. Attempts to serve a static error page file +1. Logs the 404 request +1. Falls back to the configured error page +1. Returns 404 status code + +```go +func (ep *Entrypoint) serveNotFound(w http.ResponseWriter, r *http.Request) { + if served := middleware.ServeStaticErrorPageFile(w, r); !served { + log.Error(). + Str("method", r.Method). + Str("url", r.URL.String()). + Str("remote", r.RemoteAddr). + Msgf("not found: %s", r.Host) + + errorPage, ok := errorpage.GetErrorPageByStatus(http.StatusNotFound) + if ok { + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write(errorPage) + } else { + http.NotFound(w, r) + } + } +} +``` + +## Configuration Structure + +```go +type Config struct { + Middlewares []map[string]any `json:"middlewares"` + Rules rules.Rules `json:"rules"` + AccessLog *accesslog.RequestLoggerConfig `json:"access_log"` +} +``` + +## Middleware Integration + +The entrypoint supports middleware chains configured via YAML: + +```yaml +entrypoint: + middlewares: + - use: rate_limit + average: 100 + burst: 200 + bypass: + - remote 192.168.1.0/24 + - use: redirect_http +``` + +## Access Logging + +Access logging wraps the response recorder to capture: + +- Request method and URL +- Response status code +- Response size +- Request duration +- Client IP address + +```go +func (ep *Entrypoint) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if ep.accessLogger != nil { + rec := accesslog.GetResponseRecorder(w) + w = rec + defer func() { + ep.accessLogger.Log(r, rec.Response()) + accesslog.PutResponseRecorder(rec) + }() + } + // ... handle request +} +``` + +## Integration Points + +The entrypoint integrates with: + +- **Route Registry**: HTTP route lookup +- **Middleware**: Request processing chain +- **AccessLog**: Request logging +- **ErrorPage**: 404 error pages +- **ShortLink**: Short link handling diff --git a/internal/health/check/README.md b/internal/health/check/README.md index 21a7d778..bd27f498 100644 --- a/internal/health/check/README.md +++ b/internal/health/check/README.md @@ -1,14 +1,128 @@ -# Health Check +# Health Check Package -This package provides low-level health check implementations for different protocols and services in GoDoxy. +Low-level health check implementations for different protocols and services in GoDoxy. -## Health Check Types +## Overview -### Docker Health Check +### Purpose -Checks the health status of Docker containers using the Docker API. +This package provides health check implementations for various protocols: -**Flow:** +- **HTTP/HTTPS** - Standard HTTP health checks with fasthttp +- **H2C** - HTTP/2 cleartext health checks +- **Docker** - Container health status via Docker API +- **FileServer** - Directory accessibility checks +- **Stream** - Generic network connection checks + +### Primary Consumers + +- `internal/health/monitor/` - Route health monitoring +- `internal/metrics/uptime/` - Uptime poller integration + +### Non-goals + +- Complex health check logic (response body validation, etc.) +- Authentication/authorization in health checks +- Multi-step health checks (login then check) + +### Stability + +Internal package. Public functions are stable but may be extended with new parameters. + +## Public API + +### HTTP Health Check (`http.go`) + +```go +func HTTP( + url *url.URL, + method string, + path string, + timeout time.Duration, +) (types.HealthCheckResult, error) +``` + +### H2C Health Check (`http.go`) + +```go +func H2C( + ctx context.Context, + url *url.URL, + method string, + path string, + timeout time.Duration, +) (types.HealthCheckResult, error) +``` + +### Docker Health Check (`docker.go`) + +```go +func Docker( + ctx context.Context, + containerID string, +) (types.HealthCheckResult, error) +``` + +### FileServer Health Check (`fileserver.go`) + +```go +func FileServer( + url *url.URL, +) (types.HealthCheckResult, error) +``` + +### Stream Health Check (`stream.go`) + +```go +func Stream( + url *url.URL, +) (types.HealthCheckResult, error) +``` + +### Common Types (`internal/types/`) + +```go +type HealthCheckResult struct { + Healthy bool + Latency time.Duration + Detail string +} + +type HealthStatus int + +const ( + StatusHealthy HealthStatus = 0 + StatusUnhealthy HealthStatus = 1 + StatusError HealthStatus = 2 +) +``` + +## Architecture + +### HTTP Health Check Flow + +```mermaid +flowchart TD + A[HTTP Health Check] --> B[Create FastHTTP Request] + B --> C[Set Headers and Method] + C --> D[Execute Request with Timeout] + D --> E{Request Successful?} + + E -->|no| F{Error Type} + F -->|TLS Error| G[Healthy: TLS Error Ignored] + F -->|Other Error| H[Unhealthy: Error Details] + + E -->|yes| I{Status Code} + I -->|5xx| J[Unhealthy: Server Error] + I -->|Other| K[Healthy] + + G --> L[Return Result with Latency] + H --> L + J --> L + K --> L +``` + +### Docker Health Check Flow ```mermaid flowchart TD @@ -36,53 +150,7 @@ flowchart TD P --> Q ``` -**Key Features:** - -- Intercepts Docker API responses to extract container state -- Tracks failure count with configurable threshold (3 failures) -- Supports containers with and without health check configurations -- Returns detailed error information from Docker health check logs - -### HTTP Health Check - -Performs HTTP/HTTPS health checks using fasthttp for optimal performance. - -**Flow:** - -```mermaid -flowchart TD - A[HTTP Health Check] --> B[Create FastHTTP Request] - B --> C[Set Headers and Method] - C --> D[Execute Request with Timeout] - D --> E{Request Successful?} - - E -->|no| F{Error Type} - F -->|TLS Error| G[Healthy: TLS Error Ignored] - F -->|Other Error| H[Unhealthy: Error Details] - - E -->|yes| I{Status Code} - I -->|5xx| J[Unhealthy: Server Error] - I -->|Other| K[Healthy] - - G --> L[Return Result with Latency] - H --> L - J --> L - K --> L -``` - -**Key Features:** - -- Uses fasthttp for high-performance HTTP requests -- Supports both GET and HEAD methods -- Configurable timeout and path -- Handles TLS certificate verification errors gracefully -- Returns latency measurements - -### H2C Health Check - -Performs HTTP/2 cleartext (h2c) health checks for services that support HTTP/2 without TLS. - -**Flow:** +### H2C Health Check Flow ```mermaid flowchart TD @@ -104,18 +172,7 @@ flowchart TD L --> M ``` -**Key Features:** - -- Uses HTTP/2 transport with cleartext support -- Supports both GET and HEAD methods -- Configurable timeout and path -- Returns latency measurements - -### FileServer Health Check - -Checks if a file server root directory exists and is accessible. - -**Flow:** +### FileServer Health Check Flow ```mermaid flowchart TD @@ -132,18 +189,7 @@ flowchart TD G --> I[Return Error] ``` -**Key Features:** - -- Simple directory existence check -- Measures latency of filesystem operation -- Distinguishes between "not found" and other errors -- Returns detailed error information - -### Stream Health Check - -Checks stream endpoint connectivity by attempting to establish a network connection. - -**Flow:** +### Stream Health Check Flow ```mermaid flowchart TD @@ -164,35 +210,144 @@ flowchart TD K --> L ``` -**Key Features:** +## Configuration Surface -- Generic network connection check -- Supports any stream protocol (TCP, UDP, etc.) -- Handles common connection errors gracefully -- Measures connection establishment latency -- Automatically closes connections +No explicit configuration per health check. Parameters are passed directly: -## Common Features +| Check Type | Parameters | +| ---------- | ----------------------------------- | +| HTTP | URL, Method, Path, Timeout | +| H2C | Context, URL, Method, Path, Timeout | +| Docker | Context, ContainerID | +| FileServer | URL (path component used) | +| Stream | URL (scheme, host, port used) | -### Error Handling +### HTTP Headers -All health checks implement consistent error handling: +All HTTP/H2C checks set: -- **Temporary Errors**: Network timeouts, connection failures -- **Permanent Errors**: Invalid configurations, missing resources -- **Graceful Degradation**: Returns health status even when errors occur +- `User-Agent: GoDoxy/` +- `Accept: text/plain,text/html,*/*;q=0.8` +- `Accept-Encoding: identity` +- `Cache-Control: no-cache` +- `Pragma: no-cache` -### Performance Monitoring +## Dependency and Integration Map -- **Latency Measurement**: All checks measure execution time -- **Timeout Support**: Configurable timeouts prevent hanging -- **Resource Cleanup**: Proper cleanup of connections and resources +### External Dependencies -### Integration +- `github.com/valyala/fasthttp` - High-performance HTTP client +- `golang.org/x/net/http2` - HTTP/2 transport +- Docker socket (for Docker health check) -These health checks are used by the monitor package to implement route-specific health monitoring: +### Internal Dependencies -- HTTP/HTTPS routes use HTTP health checks -- File server routes use FileServer health checks -- Stream routes use Stream health checks -- Docker containers use Docker health checks with fallbacks +- `internal/types/` - Health check result types +- `goutils/version/` - User-Agent version + +## Observability + +### Logs + +No direct logging in health check implementations. Errors are returned as part of `HealthCheckResult.Detail`. + +### Metrics + +- Check latency (returned in result) +- Success/failure rates (tracked by caller) + +## Security Considerations + +- TLS certificate verification skipped (`InsecureSkipVerify: true`) +- Docker socket access required for Docker health check +- No authentication in health check requests +- User-Agent identifies GoDoxy for server-side filtering + +## Failure Modes and Recovery + +### HTTP/H2C + +| Failure Mode | Result | Notes | +| --------------------- | --------- | ------------------------------- | +| Connection timeout | Unhealthy | Detail: timeout message | +| TLS certificate error | Healthy | Handled gracefully | +| 5xx response | Unhealthy | Detail: status text | +| 4xx response | Healthy | Client error considered healthy | + +### Docker + +| Failure Mode | Result | Notes | +| -------------------------- | --------- | ------------------------------ | +| API call failure | Error | Throws error to caller | +| Container not running | Unhealthy | State: "Not Started" | +| Container dead/exited | Unhealthy | State logged | +| No health check configured | Error | Requires health check in image | + +### FileServer + +| Failure Mode | Result | Notes | +| ----------------- | --------- | ------------------------ | +| Path not found | Unhealthy | Detail: "path not found" | +| Permission denied | Error | Returned to caller | +| Other OS error | Error | Returned to caller | + +### Stream + +| Failure Mode | Result | Notes | +| ---------------------- | --------- | --------------------- | +| Connection refused | Unhealthy | Detail: error message | +| Network unreachable | Unhealthy | Detail: error message | +| DNS resolution failure | Unhealthy | Detail: error message | +| Context deadline | Unhealthy | Detail: timeout | + +## Usage Examples + +### HTTP Health Check + +```go +url, _ := url.Parse("http://localhost:8080/health") +result, err := healthcheck.HTTP(url, "GET", "/health", 10*time.Second) +if err != nil { + fmt.Printf("Error: %v\n", err) +} +fmt.Printf("Healthy: %v, Latency: %v, Detail: %s\n", + result.Healthy, result.Latency, result.Detail) +``` + +### H2C Health Check + +```go +ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +defer cancel() + +url, _ := url.Parse("h2c://localhost:8080") +result, err := healthcheck.H2C(ctx, url, "GET", "/health", 10*time.Second) +``` + +### Docker Health Check + +```go +ctx := context.Background() +result, err := healthcheck.Docker(ctx, "abc123def456") +``` + +### FileServer Health Check + +```go +url, _ := url.Parse("file:///var/www/html") +result, err := healthcheck.FileServer(url) +``` + +### Stream Health Check + +```go +url, _ := url.Parse("tcp://localhost:5432") +result, err := healthcheck.Stream(url) +``` + +## Testing Notes + +- Unit tests for each health check type +- Mock Docker server for Docker health check tests +- Integration tests require running services +- Timeout handling tests diff --git a/internal/health/monitor/README.md b/internal/health/monitor/README.md index ccd5dc26..51841d89 100644 --- a/internal/health/monitor/README.md +++ b/internal/health/monitor/README.md @@ -1,33 +1,317 @@ -# Health Monitor +# Health Monitor Package -This package provides health monitoring functionality for different types of routes in GoDoxy. +Route health monitoring with configurable check intervals, retry policies, and notification integration. -## Health Check Flow +## Overview + +### Purpose + +This package provides health monitoring for different route types in GoDoxy: + +- Monitors service health via configurable check functions +- Tracks consecutive failures with configurable thresholds +- Sends notifications on status changes +- Provides last-seen tracking for idle detection + +### Primary Consumers + +- `internal/route/` - Route health monitoring +- `internal/api/v1/metrics/` - Uptime poller integration +- WebUI - Health status display + +### Non-goals + +- Health check execution itself (delegated to `internal/health/check/`) +- Alert routing (handled by `internal/notif/`) +- Automatic remediation + +### Stability + +Internal package with stable public interfaces. `HealthMonitor` interface is stable. + +## Public API + +### Types + +```go +type HealthCheckFunc func(url *url.URL) (result types.HealthCheckResult, err error) +``` + +### HealthMonitor Interface + +```go +type HealthMonitor interface { + Start(parent task.Parent) gperr.Error + Task() *task.Task + Finish(reason any) + UpdateURL(url *url.URL) + URL() *url.URL + Config() *types.HealthCheckConfig + Status() types.HealthStatus + Uptime() time.Duration + Latency() time.Duration + Detail() string + Name() string + String() string + CheckHealth() (types.HealthCheckResult, error) +} +``` + +### Monitor Creation (`new.go`) + +```go +// Create monitor for agent-proxied routes +func NewAgentProxiedMonitor( + ctx context.Context, + cfg types.HealthCheckConfig, + url *url.URL, +) (HealthMonitor, error) + +// Create monitor for Docker containers +func NewDockerHealthMonitor( + ctx context.Context, + cfg types.HealthCheckConfig, + url *url.URL, + containerID string, +) (HealthMonitor, error) + +// Create monitor for HTTP routes +func NewHTTPMonitor( + ctx context.Context, + cfg types.HealthCheckConfig, + url *url.URL, +) HealthMonitor + +// Create monitor for H2C (HTTP/2 cleartext) routes +func NewH2CMonitor( + ctx context.Context, + cfg types.HealthCheckConfig, + url *url.URL, +) HealthMonitor + +// Create monitor for file server routes +func NewFileServerMonitor( + cfg types.HealthCheckConfig, + url *url.URL, +) HealthMonitor + +// Create monitor for stream routes +func NewStreamMonitor( + cfg types.HealthCheckConfig, + url *url.URL, +) HealthMonitor + +// Unified monitor factory (routes to appropriate type) +func NewMonitor( + ctx context.Context, + cfg types.HealthCheckConfig, + url *url.URL, +) (HealthMonitor, error) +``` + +## Architecture + +### Monitor Selection Flow ```mermaid flowchart TD - A[NewMonitor route] --> B{IsAgent route} + A[NewMonitor route] --> B{IsAgent route?} B -->|true| C[NewAgentProxiedMonitor] - B -->|false| D{IsDocker route} + B -->|false| D{IsDocker route?} D -->|true| E[NewDockerHealthMonitor] - D -->|false| F[Route Type Switch] - - F --> G[HTTP Monitor] - F --> H[FileServer Monitor] - F --> I[Stream Monitor] - - E --> J[Selected Monitor] - - C --> K[Agent Health Check] - G --> L{Scheme h2c?} - L -->|true| M[H2C Health Check] - L -->|false| N[HTTP Health Check] - H --> O[FileServer Health Check] - I --> P[Stream Health Check] - - K --> Q{IsDocker route} - Q -->|true| R[NewDockerHealthMonitor with Agent as Fallback] - Q -->|false| K - - R --> K + D -->|false| F{Has h2c scheme?} + F -->|true| G[NewH2CMonitor] + F -->|false| H{Has http/https scheme?} + H -->|true| I[NewHTTPMonitor] + H -->|false| J{Is file:// scheme?} + J -->|true| K[NewFileServerMonitor] + J -->|false| L[NewStreamMonitor] ``` + +### Monitor State Machine + +```mermaid +stateDiagram-v2 + [*] --> Starting: First check + Starting --> Healthy: Check passes + Starting --> Unhealthy: Check fails + Healthy --> Unhealthy: 5 consecutive failures + Healthy --> Error: Check error + Error --> Healthy: Check passes + Error --> Unhealthy: 5 consecutive failures + Unhealthy --> Healthy: Check passes + Unhealthy --> Error: Check error + [*] --> Stopped: Task cancelled +``` + +### Component Structure + +```mermaid +classDiagram + class monitor { + -service string + -config types.HealthCheckConfig + -url synk.Value~*url.URL~ + -status synk.Value~HealthStatus~ + -lastResult synk.Value~HealthCheckResult~ + -checkHealth HealthCheckFunc + -startTime time.Time + -task *task.Task + +Start(parent task.Parent) + +CheckHealth() (HealthCheckResult, error) + +Status() HealthStatus + +Uptime() time.Duration + +Latency() time.Duration + +Detail() string + } + + class HealthMonitor { + <> + +Start(parent task.Parent) + +Task() *task.Task + +Status() HealthStatus + } +``` + +## Configuration Surface + +### HealthCheckConfig + +```go +type HealthCheckConfig struct { + Interval time.Duration // Check interval (default: 30s) + Timeout time.Duration // Check timeout (default: 10s) + Path string // Health check path + Method string // HTTP method (GET/HEAD) + Retries int // Consecutive failures before notification (-1 for immediate) + BaseContext func() context.Context +} +``` + +### Defaults + +| Field | Default | +| -------- | ------- | +| Interval | 30s | +| Timeout | 10s | +| Method | GET | +| Path | "/" | +| Retries | 3 | + +### Applying Defaults + +```go +cfg.ApplyDefaults(state.Value().Defaults.HealthCheck) +``` + +## Dependency and Integration Map + +### Internal Dependencies + +- `internal/task/task.go` - Lifetime management +- `internal/notif/` - Status change notifications +- `internal/health/check/` - Health check implementations +- `internal/types/` - Health status types +- `internal/config/types/` - Working state + +### External Dependencies + +- `github.com/puzpuzpuz/xsync/v4` - Atomic values + +## Observability + +### Logs + +| Level | When | +| ------- | ------------------------------ | +| `Info` | Service comes up | +| `Warn` | Service goes down | +| `Error` | Health check error | +| `Error` | Monitor stopped after 5 trials | + +### Notifications + +- Service up notification (with latency) +- Service down notification (with last seen time) +- Immediate notification when `Retries < 0` + +### Metrics + +- Consecutive failure count +- Last check latency +- Monitor uptime + +## Failure Modes and Recovery + +| Failure Mode | Impact | Recovery | +| --------------------------- | -------------------------------------- | ----------------------- | +| 5 consecutive check errors | Monitor enters Error state, task stops | Manual restart required | +| Health check function panic | Monitor crashes | Automatic cleanup | +| Context cancellation | Monitor stops gracefully | Stopped state | +| URL update to invalid | Check will fail | Manual URL fix | + +### Status Transitions + +| From | To | Condition | +| --------- | --------- | ------------------------------ | +| Starting | Healthy | Check passes | +| Starting | Unhealthy | Check fails | +| Healthy | Unhealthy | `Retries` consecutive failures | +| Healthy | Error | Check returns error | +| Unhealthy | Healthy | Check passes | +| Error | Healthy | Check passes | + +## Usage Examples + +### Creating an HTTP Monitor + +```go +cfg := types.HealthCheckConfig{ + Interval: 15 * time.Second, + Timeout: 5 * time.Second, + Path: "/health", + Retries: 3, +} +url, _ := url.Parse("http://localhost:8080") + +monitor := monitor.NewHTTPMonitor(context.Background(), cfg, url) +if err := monitor.Start(parent); err != nil { + return err +} + +// Check status +fmt.Printf("Status: %s\n", monitor.Status()) +fmt.Printf("Latency: %v\n", monitor.Latency()) +``` + +### Creating a Docker Monitor + +```go +monitor, err := monitor.NewDockerHealthMonitor( + context.Background(), + cfg, + url, + containerID, +) +if err != nil { + return err +} +monitor.Start(parent) +``` + +### Unified Factory + +```go +monitor, err := monitor.NewMonitor(ctx, cfg, url) +if err != nil { + return err +} +monitor.Start(parent) +``` + +## Testing Notes + +- `monitor_test.go` - Monitor lifecycle tests +- Mock health check functions for deterministic testing +- Status transition coverage tests +- Notification trigger tests diff --git a/internal/homepage/README.md b/internal/homepage/README.md new file mode 100644 index 00000000..86642102 --- /dev/null +++ b/internal/homepage/README.md @@ -0,0 +1,358 @@ +# Homepage + +The homepage package provides the GoDoxy WebUI dashboard with support for categories, favorites, widgets, and dynamic item configuration. + +## Overview + +The homepage package implements the WebUI dashboard, managing homepage items, categories, sorting methods, and widget integration for monitoring container status and providing interactive features. + +### Key Features + +- Dynamic homepage item management +- Category-based organization (All, Favorites, Hidden, Others) +- Multiple sort methods (clicks, alphabetical, custom) +- Widget support for live data display +- Icon URL handling with favicon integration +- Item override configuration +- Click tracking and statistics + +## Architecture + +```mermaid +graph TD + A[HomepageMap] --> B{Category Management} + B --> C[All] + B --> D[Favorites] + B --> E[Hidden] + B --> F[Others] + + G[Item] --> H[ItemConfig] + H --> I[Widget Config] + H --> J[Icon] + H --> K[Category] + + L[Widgets] --> M[HTTP Widget] + N[Sorting] --> O[Clicks] + N --> P[Alphabetical] + N --> Q[Custom] +``` + +## Core Types + +### Homepage Structure + +```go +type HomepageMap struct { + ordered.Map[string, *Category] +} + +type Homepage []*Category + +type Category struct { + Items []*Item + Name string +} + +type Item struct { + ItemConfig + SortOrder int + FavSortOrder int + AllSortOrder int + Clicks int + Widgets []Widget + Alias string + Provider string + OriginURL string + ContainerID string +} + +type ItemConfig struct { + Show bool + Name string + Icon *IconURL + Category string + Description string + URL string + Favorite bool + WidgetConfig *widgets.Config +} +``` + +### Sort Methods + +```go +const ( + SortMethodClicks = "clicks" + SortMethodAlphabetical = "alphabetical" + SortMethodCustom = "custom" +) +``` + +### Categories + +```go +const ( + CategoryAll = "All" + CategoryFavorites = "Favorites" + CategoryHidden = "Hidden" + CategoryOthers = "Others" +) +``` + +## Public API + +### Creation + +```go +// NewHomepageMap creates a new homepage map with default categories. +func NewHomepageMap(total int) *HomepageMap +``` + +### Item Management + +```go +// Add adds an item to appropriate categories. +func (c *HomepageMap) Add(item *Item) + +// GetOverride returns the override configuration for an item. +func (cfg Item) GetOverride() Item +``` + +### Sorting + +```go +// Sort sorts a category by the specified method. +func (c *Category) Sort(method SortMethod) +``` + +## Usage + +### Creating a Homepage Map + +```go +homepageMap := homepage.NewHomepageMap(100) // Reserve space for 100 items +``` + +### Adding Items + +```go +item := &homepage.Item{ + Alias: "my-app", + Provider: "docker", + OriginURL: "http://myapp.local", + ItemConfig: homepage.ItemConfig{ + Name: "My Application", + Show: true, + Favorite: true, + Category: "Docker", + Description: "My Docker application", + }, +} + +homepageMap.Add(item) +``` + +### Sorting Categories + +```go +allCategory := homepageMap.Get(homepage.CategoryAll) +if allCategory != nil { + allCategory.Sort(homepage.SortMethodClicks) +} +``` + +### Filtering by Category + +```go +favorites := homepageMap.Get(homepage.CategoryFavorites) +for _, item := range favorites.Items { + fmt.Printf("Favorite: %s\n", item.Name) +} +``` + +## Widgets + +The homepage supports widgets for each item: + +```go +type Widget struct { + Label string + Value string +} + +type Config struct { + // Widget configuration +} +``` + +### Widget Types + +Widgets can display various types of information: + +- **Status**: Container health status +- **Stats**: Usage statistics +- **Links**: Quick access links +- **Custom**: Provider-specific data + +## Icon Handling + +Icons are handled via `IconURL` type: + +```go +type IconURL struct { + // Icon URL with various sources +} + +// Automatic favicon fetching from item URL +``` + +## Categories + +### Default Categories + +| Category | Description | +| --------- | ------------------------ | +| All | Contains all items | +| Favorites | User-favorited items | +| Hidden | Items with `Show: false` | +| Others | Uncategorized items | + +### Custom Categories + +Custom categories are created dynamically: + +```go +// Adding to custom category +item := &homepage.Item{ + ItemConfig: homepage.ItemConfig{ + Name: "App", + Category: "Development", + }, +} +homepageMap.Add(item) +// "Development" category is auto-created +``` + +## Override Configuration + +Items can have override configurations for customization: + +```go +// GetOverride returns the effective configuration +func (cfg Item) GetOverride() Item { + return overrideConfigInstance.GetOverride(cfg) +} +``` + +## Sorting Methods + +### Clicks Sort + +Sorts by click count (most clicked first): + +```go +func (c *Category) sortByClicks() { + slices.SortStableFunc(c.Items, func(a, b *Item) int { + if a.Clicks > b.Clicks { + return -1 + } + if a.Clicks < b.Clicks { + return 1 + } + return strings.Compare(title(a.Name), title(b.Name)) + }) +} +``` + +### Alphabetical Sort + +Sorts alphabetically by name: + +```go +func (c *Category) sortByAlphabetical() { + slices.SortStableFunc(c.Items, func(a, b *Item) int { + return strings.Compare(title(a.Name), title(b.Name)) + }) +} +``` + +### Custom Sort + +Sorts by predefined sort order: + +```go +func (c *Category) sortByCustom() { + // Uses SortOrder, FavSortOrder, AllSortOrder fields +} +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant RouteProvider + participant HomepageMap + participant Category + participant Widget + + RouteProvider->>HomepageMap: Add(Item) + HomepageMap->>HomepageMap: Add to All + HomepageMap->>HomepageMap: Add to Category + alt Item.Favorite + HomepageMap->>CategoryFavorites: Add item + else !Item.Show + HomepageMap->>CategoryHidden: Add item + end + + User->>HomepageMap: Get Category + HomepageMap-->>User: Items + + User->>Category: Sort(method) + Category-->>User: Sorted Items + + User->>Item: Get Widgets + Item->>Widget: Fetch Data + Widget-->>Item: Widget Data + Item-->>User: Display Widgets +``` + +## Integration Points + +The homepage package integrates with: + +- **Route Provider**: Item discovery from routes +- **Container**: Container status and metadata +- **Widgets**: Live data display +- **API**: Frontend data API +- **Configuration**: Default and override configs + +## Configuration + +### Active Configuration + +```go +var ActiveConfig atomic.Pointer[Config] +``` + +### Configuration Structure + +```go +type Config struct { + UseDefaultCategories bool + // ... other options +} +``` + +## Serialization + +The package registers default value factories for serialization: + +```go +func init() { + serialization.RegisterDefaultValueFactory(func() *ItemConfig { + return &ItemConfig{ + Show: true, + } + }) +} +``` diff --git a/internal/homepage/integrations/qbittorrent/README.md b/internal/homepage/integrations/qbittorrent/README.md new file mode 100644 index 00000000..5d06ee7a --- /dev/null +++ b/internal/homepage/integrations/qbittorrent/README.md @@ -0,0 +1,227 @@ +# qBittorrent Integration Package + +This package provides a qBittorrent widget for the GoDoxy homepage dashboard, enabling real-time monitoring of torrent status and transfer statistics. + +> [!WARNING] +> +> This package is a work in progress and is not stable. + +## Overview + +The `internal/homepage/integrations/qbittorrent` package implements the `widgets.Widget` interface for qBittorrent. It provides functionality to connect to a qBittorrent instance and fetch transfer information. + +## Architecture + +### Core Components + +``` +integrations/qbittorrent/ +├── client.go # Client and API methods +├── transfer_info.go # Transfer info widget data +└── version.go # Version checking +└── logs.go # Log fetching +``` + +### Main Types + +```go +type Client struct { + URL string + Username string + Password string +} +``` + +## API Reference + +### Client Methods + +#### Initialize + +Connects to the qBittorrent API and verifies authentication. + +```go +func (c *Client) Initialize(ctx context.Context, url string, cfg map[string]any) error +``` + +**Parameters:** + +- `ctx` - Context for the HTTP request +- `url` - Base URL of the qBittorrent instance +- `cfg` - Configuration map containing `username` and `password` + +**Returns:** + +- `error` - Connection or authentication error + +**Example:** + +```go +client := &qbittorrent.Client{} +err := client.Initialize(ctx, "http://localhost:8080", map[string]any{ + "username": "admin", + "password": "your-password", +}) +if err != nil { + log.Fatalf("Failed to connect: %v", err) +} +``` + +#### Data + +Returns current transfer statistics as name-value pairs. + +```go +func (c *Client) Data(ctx context.Context) ([]widgets.NameValue, error) +``` + +**Returns:** + +- `[]widgets.NameValue` - Transfer statistics +- `error` - API request error + +**Example:** + +```go +data, err := client.Data(ctx) +if err != nil { + log.Fatal(err) +} +for _, nv := range data { + fmt.Printf("%s: %s\n", nv.Name, nv.Value) +} +// Output: +// Status: connected +// Download: 1.5 GB +// Upload: 256 MB +// Download Speed: 5.2 MB/s +// Upload Speed: 1.1 MB/s +``` + +### Internal Methods + +#### doRequest + +Performs an HTTP request to the qBittorrent API. + +```go +func (c *Client) doRequest(ctx context.Context, method, endpoint string, query url.Values, body io.Reader) (*http.Response, error) +``` + +#### jsonRequest + +Performs a JSON API request and unmarshals the response. + +```go +func jsonRequest[T any](ctx context.Context, client *Client, endpoint string, query url.Values) (result T, err error) +``` + +## Data Types + +### TransferInfo + +Represents transfer statistics from qBittorrent. + +```go +type TransferInfo struct { + ConnectionStatus string `json:"connection_status"` + SessionDownloads uint64 `json:"dl_info_data"` + SessionUploads uint64 `json:"up_info_data"` + DownloadSpeed uint64 `json:"dl_info_speed"` + UploadSpeed uint64 `json:"up_info_speed"` +} +``` + +## API Endpoints + +| Endpoint | Method | Description | +| ----------------------- | ------ | ----------------------- | +| `/api/v2/transfer/info` | GET | Get transfer statistics | +| `/api/v2/app/version` | GET | Get qBittorrent version | + +## Usage Example + +### Complete Widget Usage + +```go +package main + +import ( + "context" + "fmt" + "github.com/yusing/godoxy/internal/homepage/integrations/qbittorrent" + "github.com/yusing/godoxy/internal/homepage/widgets" +) + +func main() { + ctx := context.Background() + + // Create and initialize client + client := &qbittorrent.Client{} + err := client.Initialize(ctx, "http://localhost:8080", map[string]any{ + "username": "admin", + "password": "password123", + }) + if err != nil { + fmt.Printf("Connection failed: %v\n", err) + return + } + + // Get transfer data + data, err := client.Data(ctx) + if err != nil { + fmt.Printf("Failed to get data: %v\n", err) + return + } + + // Display in dashboard format + fmt.Println("qBittorrent Status:") + fmt.Println(strings.Repeat("-", 30)) + for _, nv := range data { + fmt.Printf(" %-15s %s\n", nv.Name+":", nv.Value) + } +} +``` + +## Integration with Homepage Widgets + +```mermaid +graph TD + A[Homepage Dashboard] --> B[Widget Config] + B --> C{qBittorrent Provider} + C --> D[Create Client] + D --> E[Initialize with credentials] + E --> F[Fetch Transfer Info] + F --> G[Format as NameValue pairs] + G --> H[Render in UI] +``` + +### Widget Configuration + +```yaml +widgets: + - provider: qbittorrent + config: + url: http://localhost:8080 + username: admin + password: password123 +``` + +## Error Handling + +```go +// Handle HTTP errors +resp, err := client.doRequest(ctx, http.MethodGet, endpoint, query, body) +if err != nil { + return nil, err +} +if resp.StatusCode != http.StatusOK { + return nil, widgets.ErrHTTPStatus.Subject(resp.Status) +} +``` + +## Related Packages + +- `internal/homepage/widgets` - Widget framework and interface +- `github.com/bytedance/sonic` - JSON serialization +- `github.com/yusing/goutils/strings` - String utilities for formatting diff --git a/internal/homepage/widgets/README.md b/internal/homepage/widgets/README.md new file mode 100644 index 00000000..c5aefa39 --- /dev/null +++ b/internal/homepage/widgets/README.md @@ -0,0 +1,188 @@ +# Homepage Widgets Package + +> [!WARNING] +> +> This package is a work in progress and is not stable. + +This package provides a widget framework for the GoDoxy homepage dashboard, enabling integration with various service providers to display real-time data. + +## Overview + +The `internal/homepage/widgets` package defines the widget interface and common utilities for building homepage widgets. It provides a standardized way to integrate external services into the homepage dashboard. + +## Architecture + +### Core Components + +``` +widgets/ +├── widgets.go # Widget interface and config +└── http.go # HTTP client and error definitions +``` + +### Data Types + +```go +type Config struct { + Provider string `json:"provider"` + Config Widget `json:"config"` +} + +type Widget interface { + Initialize(ctx context.Context, url string, cfg map[string]any) error + Data(ctx context.Context) ([]NameValue, error) +} + +type NameValue struct { + Name string `json:"name"` + Value string `json:"value"` +} +``` + +### Constants + +```go +const ( + WidgetProviderQbittorrent = "qbittorrent" +) +``` + +### Errors + +```go +var ErrInvalidProvider = gperr.New("invalid provider") +var ErrHTTPStatus = gperr.New("http status") +``` + +## API Reference + +### Widget Interface + +```go +type Widget interface { + // Initialize sets up the widget with connection configuration + Initialize(ctx context.Context, url string, cfg map[string]any) error + + // Data returns current widget data as name-value pairs + Data(ctx context.Context) ([]NameValue, error) +} +``` + +### Configuration + +#### Config.UnmarshalMap + +Parses widget configuration from a map. + +```go +func (cfg *Config) UnmarshalMap(m map[string]any) error +``` + +**Parameters:** + +- `m` - Map containing `provider` and `config` keys + +**Returns:** + +- `error` - Parsing or validation error + +**Example:** + +```go +widgetCfg := widgets.Config{} +err := widgetCfg.UnmarshalMap(map[string]any{ + "provider": "qbittorrent", + "config": map[string]any{ + "username": "admin", + "password": "password123", + }, +}) +``` + +### HTTP Client + +```go +var HTTPClient = &http.Client{ + Timeout: 10 * time.Second, +} +``` + +### Available Providers + +- **qbittorrent** - qBittorrent torrent client integration (WIP) + +## Usage Example + +### Creating a Custom Widget + +```go +package mywidget + +import ( + "context" + "github.com/yusing/godoxy/internal/homepage/widgets" +) + +type MyWidget struct { + URL string + APIKey string +} + +func (m *MyWidget) Initialize(ctx context.Context, url string, cfg map[string]any) error { + m.URL = url + m.APIKey = cfg["api_key"].(string) + return nil +} + +func (m *MyWidget) Data(ctx context.Context) ([]widgets.NameValue, error) { + // Fetch data and return as name-value pairs + return []widgets.NameValue{ + {Name: "Status", Value: "Online"}, + {Name: "Uptime", Value: "24h"}, + }, nil +} +``` + +### Registering the Widget + +```go +// In widgets initialization +widgetProviders["mywidget"] = struct{}{} +``` + +### Using the Widget in Homepage + +```go +// Fetch widget data +widget := getWidget("qbittorrent") +data, err := widget.Data(ctx) +if err != nil { + log.Fatal(err) +} + +// Display data +for _, nv := range data { + fmt.Printf("%s: %s\n", nv.Name, nv.Value) +} +``` + +## Integration with Homepage + +```mermaid +graph TD + A[Homepage Dashboard] --> B[Widget Config] + B --> C[Widget Factory] + C --> D{Provider Type} + D -->|qbittorrent| E[qBittorrent Widget] + D -->|custom| F[Custom Widget] + E --> G[Initialize] + F --> G + G --> H[Data Fetch] + H --> I[Render UI] +``` + +## Related Packages + +- `internal/homepage/integrations/qbittorrent` - qBittorrent widget implementation +- `internal/serialization` - Configuration unmarshaling utilities +- `github.com/yusing/goutils/errs` - Error handling diff --git a/internal/idlewatcher/README.md b/internal/idlewatcher/README.md index 785d3295..56af1d86 100644 --- a/internal/idlewatcher/README.md +++ b/internal/idlewatcher/README.md @@ -1,378 +1,293 @@ # Idlewatcher -Idlewatcher manages container lifecycle based on idle timeout. When a container is idle for a configured duration, it can be automatically stopped, paused, or killed. When a request comes in, the container is woken up automatically. +Manages container lifecycle based on idle timeout, automatically stopping/pausing containers and waking them on request. -Idlewatcher also serves a small loading page (HTML + JS + CSS) and an SSE endpoint under [`internal/idlewatcher/types/paths.go`](internal/idlewatcher/types/paths.go:1) (prefixed with `/$godoxy/`) to provide wake events to browsers. +## Overview -## Architecture Overview +The `internal/idlewatcher` package implements idle-based container lifecycle management for GoDoxy. When a container is idle for a configured duration, it can be automatically stopped, paused, or killed. When a request arrives, the container is woken up automatically. -```mermaid -graph TB - subgraph Request Flow - HTTP[HTTP Request] -->|Intercept| W[Watcher] - Stream[Stream Request] -->|Intercept| W - end +### Primary Consumers - subgraph Wake Process - W -->|Wake| Wake[Wake Container] - Wake -->|Check Status| State[Container State] - Wake -->|Wait Ready| Health[Health Check] - Wake -->|Events| SSE[SSE Events] - end +- **Route layer**: Routes with idlewatcher config integrate with this package to manage container lifecycle +- **HTTP handlers**: Serve loading pages and SSE events during wake-up +- **Stream handlers**: Handle stream connections with idle detection - subgraph Idle Management - Timer[Idle Timer] -->|Timeout| Stop[Stop Container] - State -->|Running| Timer - State -->|Stopped| Timer - end +### Non-goals - subgraph Providers - Docker[DockerProvider] --> DockerAPI[Docker API] - Proxmox[ProxmoxProvider] --> ProxmoxAPI[Proxmox API] - end +- Does not implement container runtime operations directly (delegates to providers) +- Does not manage container dependencies beyond wake ordering +- Does not provide health checking (delegates to `internal/health/monitor`) - W -->|Uses| Providers +### Stability + +Internal package with stable public API. Changes to exported types require backward compatibility. + +## Public API + +### Exported Types + +```go +// Watcher manages lifecycle of a single container +type Watcher struct { + // Embedded route helper for proxy/stream/health + routeHelper + + cfg *types.IdlewatcherConfig + + // Thread-safe state containers + provider synk.Value[idlewatcher.Provider] + state synk.Value[*containerState] + lastReset synk.Value[time.Time] + + // Timers and channels + idleTicker *time.Ticker + healthTicker *time.Ticker + readyNotifyCh chan struct{} + + // SSE event broadcasting (HTTP routes only) + eventChs *xsync.Map[chan *WakeEvent, struct{}] + eventHistory []WakeEvent +} ``` -## Directory Structure - -``` -idlewatcher/ -├── debug.go # Debug utilities for watcher inspection -├── errors.go # Error types and conversion -├── events.go # Wake event types and broadcasting -├── handle_http.go # HTTP request handling and loading page -├── handle_http_debug.go # Debug HTTP handler (!production builds) -├── handle_stream.go # Stream connection handling -├── health.go # Health monitor implementation + readiness tracking -├── loading_page.go # Loading page HTML/CSS/JS templates -├── state.go # Container state management -├── watcher.go # Core Watcher implementation -├── provider/ # Container provider implementations -│ ├── docker.go # Docker container management -│ └── proxmox.go # Proxmox LXC management -├── types/ -│ ├── container_status.go # ContainerStatus enum -│ ├── paths.go # Loading page + SSE paths -│ ├── provider.go # Provider interface definition -│ └── waker.go # Waker interface (http + stream + health) -└── html/ - ├── loading_page.html # Loading page template - ├── style.css # Loading page styles - └── loading.js # Loading page JavaScript +```go +// WakeEvent is broadcast via SSE during wake-up +type WakeEvent struct { + Type WakeEventType + Message string + Timestamp time.Time + Error string +} ``` -## Core Components +### Exported Functions/Methods -### Watcher +```go +// NewWatcher creates or reuses a watcher for the given route and config +func NewWatcher(parent task.Parent, r types.Route, cfg *types.IdlewatcherConfig) (*Watcher, error) -The main component that manages a single container's lifecycle: +// Wake wakes the container, blocking until ready +func (w *Watcher) Wake(ctx context.Context) error + +// Start begins the idle watcher loop +func (w *Watcher) Start(parent task.Parent) gperr.Error + +// ServeHTTP serves the loading page and SSE events +func (w *Watcher) ServeHTTP(rw http.ResponseWriter, r *http.Request) + +// ListenAndServe handles stream connections with idle detection +func (w *Watcher) ListenAndServe(ctx context.Context, preDial, onRead nettypes.HookFunc) + +// Key returns the unique key for this watcher +func (w *Watcher) Key() string +``` + +### Package-level Variables + +```go +var ( + // watcherMap is a global registry keyed by config.Key() + watcherMap map[string]*Watcher + watcherMapMu sync.RWMutex + + // singleFlight prevents duplicate wake calls for the same container + singleFlight singleflight.Group +) +``` + +## Architecture + +### Core Components ```mermaid classDiagram class Watcher { - +string Key() string - +Wake(ctx context.Context) error - +Start(parent task.Parent) gperr.Error - +ServeHTTP(rw ResponseWriter, r *Request) - +ListenAndServe(ctx context.Context, predial, onRead HookFunc) - -idleTicker: *time.Ticker - -healthTicker: *time.Ticker - -state: synk.Value~*containerState~ - -provider: synk.Value~Provider~ - -readyNotifyCh: chan struct{} - -eventChs: *xsync.Map~chan *WakeEvent, struct{}~ - -eventHistory: []WakeEvent - -dependsOn: []*dependency + +Wake(ctx) error + +Start(parent) gperr.Error + +ServeHTTP(ResponseWriter, *Request) + +ListenAndServe(ctx, preDial, onRead) + +Key() string } class containerState { - +status: ContainerStatus - +ready: bool - +err: error - +startedAt: time.Time - +healthTries: int + status ContainerStatus + ready bool + err error + startedAt time.Time + healthTries int } - class dependency { - +*Watcher - +waitHealthy: bool + class idlewatcher.Provider { + <> + +ContainerPause(ctx) error + +ContainerStart(ctx) error + +ContainerStop(ctx, signal, timeout) error + +ContainerStatus(ctx) (ContainerStatus, error) + +Watch(ctx) (eventCh, errCh) } Watcher --> containerState : manages - Watcher --> dependency : depends on + Watcher --> idlewatcher.Provider : uses ``` -Package-level helpers: - -- `watcherMap` is a global registry of watchers keyed by [`types.IdlewatcherConfig.Key()`](internal/types/idlewatcher.go:60), guarded by `watcherMapMu`. -- `singleFlight` is a global `singleflight.Group` keyed by container name to prevent duplicate wake calls. - -### Provider Interface - -Abstraction for different container backends: +### Component Interactions ```mermaid -classDiagram - class Provider { - <> - +ContainerPause(ctx) error - +ContainerUnpause(ctx) error - +ContainerStart(ctx) error - +ContainerStop(ctx, signal, timeout) error - +ContainerKill(ctx, signal) error - +ContainerStatus(ctx) (ContainerStatus, error) - +Watch(ctx) (eventCh, errCh) - +Close() - } - - class DockerProvider { - +client: *docker.SharedClient - +watcher: watcher.DockerWatcher - +containerID: string - } - - class ProxmoxProvider { - +*proxmox.Node - +vmid: int - +lxcName: string - +running: bool - } - - Provider <|-- DockerProvider - Provider <|-- ProxmoxProvider +flowchart TD + A[HTTP Request] --> B{Container Ready?} + B -->|Yes| C[Proxy Request] + B -->|No| D[Wake Container] + D --> E[SingleFlight Check] + E --> F[Wake Dependencies] + F --> G[Start Container] + G --> H[Health Check] + H -->|Pass| I[Notify Ready] + I --> J[SSE Event] + J --> K[Loading Page] + K --> L[Retry Request] ``` -### Container Status +### State Machine ```mermaid stateDiagram-v2 - [*] --> Napping: status=stopped|paused + [*] --> Napping: Container stopped/paused - Napping --> Starting: provider start/unpause event - Starting --> Ready: health check passes - Starting --> Error: health check error / startup timeout - - Ready --> Napping: idle timeout (pause/stop/kill) - Ready --> Error: health check error - - Error --> Napping: provider stop/pause event - Error --> Starting: provider start/unpause event -``` - -Implementation notes: - -- `Starting` is represented by `containerState{status: running, ready: false, startedAt: non-zero}`. -- `Ready` is represented by `containerState{status: running, ready: true}`. -- `Error` is represented by `containerState{status: error, err: non-nil}`. -- State is updated primarily from provider events in [`(*Watcher).watchUntilDestroy()`](internal/idlewatcher/watcher.go:553) and health checks in [`(*Watcher).checkUpdateState()`](internal/idlewatcher/health.go:104). - -## Lifecycle Flow - -### Wake Flow (HTTP) - -```mermaid -sequenceDiagram - participant C as Client - participant W as Watcher - participant P as Provider - participant SSE as SSE (/\$godoxy/wake-events) - - C->>W: HTTP Request - W->>W: resetIdleTimer() - Note over W: Handles /favicon.ico and /\$godoxy/* assets first - - alt Container already ready - W->>C: Reverse-proxy upstream (same request) - else - W->>W: Wake() (singleflight + deps) - - alt Non-HTML request OR NoLoadingPage=true - W->>C: 100 Continue - W->>W: waitForReady() (readyNotifyCh) - W->>C: Reverse-proxy upstream (same request) - else HTML + loading page - W->>C: Serve loading page (HTML) - C->>SSE: Connect (EventSource) - Note over SSE: Streams history + live wake events - C->>W: Retry original request when WakeEventReady - end - end -``` - -### Stream Wake Flow - -```mermaid -sequenceDiagram - participant C as Client - participant W as Watcher - - C->>W: Connect to stream - W->>W: preDial hook - W->>W: wakeFromStream() - alt Container ready - W->>W: Pass through - else - W->>W: Wake() (singleflight + deps) - W->>W: waitStarted() (wait for route to be started) - W->>W: waitForReady() (readyNotifyCh) - W->>C: Stream connected - end -``` - -### Idle Timeout Flow - -```mermaid -sequenceDiagram - participant Client as Client - participant T as Idle Timer - participant W as Watcher - participant P as Provider - participant D as Dependencies - - loop Every request - Client->>W: HTTP/Stream - W->>W: resetIdleTimer() - end - - T->>W: Timeout - W->>W: stopByMethod() - alt stop method = pause - W->>P: ContainerPause() - else stop method = stop - W->>P: ContainerStop(signal, timeout) - else kill method = kill - W->>P: ContainerKill(signal) - end - P-->>W: Result - W->>D: Stop dependencies - D-->>W: Done -``` - -## Dependency Management - -Watchers can depend on other containers being started first: - -```mermaid -graph LR - A[App] -->|depends on| B[Database] - A -->|depends on| C[Redis] - B -->|depends on| D[Cache] -``` - -```mermaid -sequenceDiagram - participant A as App Watcher - participant B as DB Watcher - participant P as Provider - - A->>B: Wake() - Note over B: SingleFlight prevents
duplicate wake - B->>P: ContainerStart() - P-->>B: Started - B->>B: Wait healthy - B-->>A: Ready - A->>P: ContainerStart() - P-->>A: Started -``` - -## Event System - -Wake events are broadcast via Server-Sent Events (SSE): - -```mermaid -classDiagram - class WakeEvent { - +Type: WakeEventType - +Message: string - +Timestamp: time.Time - +Error: string - +WriteSSE(w io.Writer) error - } - - class WakeEventType { - <> - WakeEventStarting - WakeEventWakingDep - WakeEventDepReady - WakeEventContainerWoke - WakeEventWaitingReady - WakeEventReady - WakeEventError - } - - WakeEvent --> WakeEventType -``` - -Notes: - -- The SSE endpoint is [`idlewatcher.WakeEventsPath`](internal/idlewatcher/types/paths.go:3). -- Each SSE subscriber gets a dedicated buffered channel; the watcher also keeps an in-memory `eventHistory` that is sent to new subscribers first. -- `eventHistory` is cleared when the container transitions to napping (stop/pause). - -## State Machine - -```mermaid -stateDiagram-v2 - Napping --> Starting: provider start/unpause event + Napping --> Starting: Wake() called Starting --> Ready: Health check passes - Starting --> Error: Health check fails / startup timeout - Error --> Napping: provider stop/pause event - Error --> Starting: provider start/unpause event + Starting --> Error: Health check fails / timeout + Ready --> Napping: Idle timeout Ready --> Napping: Manual stop - note right of Napping - Container is stopped or paused - Idle timer stopped - end note - - note right of Starting - Container is running but not ready - Health checking active - Events broadcasted - end note - - note right of Ready - Container healthy - Idle timer running - end note + Error --> Starting: Retry wake + Error --> Napping: Container stopped externally ``` -## Key Files +## Configuration Surface -| File | Purpose | -| --------------------- | ----------------------------------------------------- | -| `watcher.go` | Core Watcher implementation with lifecycle management | -| `handle_http.go` | HTTP interception and loading page serving | -| `handle_stream.go` | Stream connection wake handling | -| `provider/docker.go` | Docker container operations | -| `provider/proxmox.go` | Proxmox LXC container operations | -| `state.go` | Container state transitions | -| `events.go` | Event broadcasting via SSE | -| `health.go` | Health monitor implementation + readiness tracking | +Configuration is defined in `types.IdlewatcherConfig`: -## Configuration +```go +type IdlewatcherConfig struct { + IdlewatcherConfigBase + Docker *types.DockerProviderConfig // Exactly one required + Proxmox *types.ProxmoxProviderConfig // Exactly one required +} -See [`types.IdlewatcherConfig`](internal/types/idlewatcher.go:27) for configuration options: +type IdlewatcherConfigBase struct { + IdleTimeout time.Duration // Duration before container is stopped + StopMethod types.ContainerMethod // pause, stop, or kill + StopSignal types.ContainerSignal // Signal to send + StopTimeout int // Timeout in seconds + WakeTimeout time.Duration // Max time to wait for wake + DependsOn []string // Container dependencies + StartEndpoint string // Optional path restriction + NoLoadingPage bool // Skip loading page +} +``` -- `IdleTimeout`: Duration before container is put to sleep -- `StopMethod`: pause, stop, or kill -- `StopSignal`: Signal to send when stopping -- `StopTimeout`: Timeout for stop operation -- `WakeTimeout`: Timeout for wake operation -- `DependsOn`: List of dependent containers -- `StartEndpoint`: Optional HTTP path restriction for wake requests -- `NoLoadingPage`: Skip loading page, wait directly +### Docker Labels -Provider config (exactly one must be set): +```yaml +labels: + proxy.idle_timeout: 5m + proxy.idle_stop_method: stop + proxy.idle_depends_on: database:redis +``` -- `Docker`: container id/name + docker connection info -- `Proxmox`: `node` + `vmid` +### Path Constants -## Thread Safety +```go +const ( + LoadingPagePath = "/$godoxy/loading" + WakeEventsPath = "/$godoxy/wake-events" +) +``` -- Uses `synk.Value` for atomic state updates -- Uses `xsync.Map` for SSE subscriber management -- Uses `sync.RWMutex` for watcher map (`watcherMapMu`) and SSE event history (`eventHistoryMu`) -- Uses `singleflight.Group` to prevent duplicate wake calls +## Dependency and Integration Map + +| Dependency | Purpose | +| -------------------------------- | --------------------------- | +| `internal/health/monitor` | Health checking during wake | +| `internal/route/routes` | Route registry lookup | +| `internal/docker` | Docker client connection | +| `internal/proxmox` | Proxmox LXC management | +| `internal/watcher/events` | Container event watching | +| `pkg/gperr` | Error handling | +| `xsync/v4` | Concurrent maps | +| `golang.org/x/sync/singleflight` | Duplicate wake suppression | + +## Observability + +### Logs + +- **INFO**: Wake start, container started, ready notification +- **DEBUG**: State transitions, health check details +- **ERROR**: Wake failures, health check errors + +Log context includes: `alias`, `key`, `provider`, `method` + +### Metrics + +No metrics exposed directly; health check metrics available via `internal/health/monitor`. + +## Security Considerations + +- Loading page and SSE endpoints are mounted under `/$godoxy/` path +- No authentication on loading page; assumes internal network trust +- SSE event history may contain container names (visible to connected clients) + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| ----------------------------- | -------------------------------------------------- | ------------------------------ | +| Wake timeout | Returns error, container remains in current state | Retry wake with longer timeout | +| Health check fails repeatedly | Container marked as error, retries on next request | External fix required | +| Provider connection lost | SSE disconnects, next request retries wake | Reconnect on next request | +| Dependencies fail to start | Wake fails with dependency error | Fix dependency container | + +## Usage Examples + +### Basic HTTP Route with Idlewatcher + +```go +route := &route.Route{ + Alias: "myapp", + Idlewatcher: &types.IdlewatcherConfig{ + IdlewatcherConfigBase: types.IdlewatcherConfigBase{ + IdleTimeout: 5 * time.Minute, + StopMethod: types.ContainerMethodStop, + StopTimeout: 30, + }, + Docker: &types.DockerProviderConfig{ + ContainerID: "abc123", + }, + }, +} + +w, err := idlewatcher.NewWatcher(parent, route, route.Idlewatcher) +if err != nil { + return err +} +return w.Start(parent) +``` + +### Watching Wake Events + +```go +// Events are automatically served at /$godoxy/wake-events +// Client connects via EventSource: + +const eventSource = new EventSource("/$godoxy/wake-events"); +eventSource.onmessage = (e) => { + const event = JSON.parse(e.data); + console.log(`Wake event: ${event.type}`, event.message); +}; +``` + +## Testing Notes + +- Unit tests cover state machine transitions +- Integration tests with Docker daemon for provider operations +- Mock provider for testing wake flow without real containers diff --git a/internal/idlewatcher/provider/README.md b/internal/idlewatcher/provider/README.md new file mode 100644 index 00000000..5224098e --- /dev/null +++ b/internal/idlewatcher/provider/README.md @@ -0,0 +1,219 @@ +# Idlewatcher Provider + +Implements container runtime abstractions for Docker and Proxmox LXC backends. + +## Overview + +The `internal/idlewatcher/provider` package implements the `idlewatcher.Provider` interface for different container runtimes. It enables the idlewatcher to manage containers regardless of the underlying runtime (Docker or Proxmox LXC). + +### Primary Consumers + +- **idlewatcher.Watcher**: Uses providers to perform container lifecycle operations +- **Package tests**: Verify provider contract compliance + +### Non-goals + +- Does not implement idle detection logic +- Does not manage route configuration +- Does not handle health checking + +### Stability + +Internal package implementing stable `idlewatcher.Provider` interface. + +## Public API + +### Provider Interface + +```go +type Provider interface { + // Lifecycle operations + ContainerPause(ctx context.Context) error + ContainerUnpause(ctx context.Context) error + ContainerStart(ctx context.Context) error + ContainerStop(ctx context.Context, signal types.ContainerSignal, timeout int) error + ContainerKill(ctx context.Context, signal types.ContainerSignal) error + + // Status and monitoring + ContainerStatus(ctx context.Context) (ContainerStatus, error) + Watch(ctx context.Context) (eventCh <-chan events.Event, errCh <-chan gperr.Error) + + // Cleanup + Close() +} +``` + +### Container Status + +```go +type ContainerStatus string + +const ( + ContainerStatusRunning ContainerStatus = "running" + ContainerStatusStopped ContainerStatus = "stopped" + ContainerStatusPaused ContainerStatus = "paused" + ContainerStatusError ContainerStatus = "error" +) +``` + +### Exported Functions + +```go +// NewDockerProvider creates a provider for Docker containers +func NewDockerProvider(dockerCfg types.DockerProviderConfig, containerID string) (idlewatcher.Provider, error) + +// NewProxmoxProvider creates a provider for Proxmox LXC containers +func NewProxmoxProvider(ctx context.Context, nodeName string, vmid int) (idlewatcher.Provider, error) +``` + +## Architecture + +### Core Components + +```mermaid +classDiagram + class Provider { + <> + +ContainerPause(ctx) error + +ContainerStart(ctx) error + +ContainerStop(ctx, signal, timeout) error + +ContainerStatus(ctx) (ContainerStatus, error) + +Watch(ctx) (eventCh, errCh) + +Close() + } + + class DockerProvider { + +client *docker.SharedClient + +watcher watcher.DockerWatcher + +containerID string + +ContainerPause(ctx) error + +ContainerStart(ctx) error + +ContainerStatus(ctx) (ContainerStatus, error) + } + + class ProxmoxProvider { + +*proxmox.Node + +vmid int + +lxcName string + +running bool + +ContainerStart(ctx) error + +ContainerStop(ctx, signal, timeout) error + } + + Provider <|-- DockerProvider + Provider <|-- ProxmoxProvider +``` + +### Component Interactions + +```mermaid +flowchart TD + A[Watcher] --> B{Provider Type} + B -->|Docker| C[DockerProvider] + B -->|Proxmox| D[ProxmoxProvider] + + C --> E[Docker API] + D --> F[Proxmox API] + + E --> G[Container Events] + F --> H[LXC Events] + + G --> A + H --> A +``` + +## Configuration Surface + +### Docker Provider Config + +```go +type DockerProviderConfig struct { + URL string // Docker socket URL (unix:///var/run/docker.sock) + SocketPath string // Alternative socket path +} +``` + +### Proxmox Provider Config + +Provided via `NewProxmoxProvider` parameters: + +- `nodeName`: Proxmox node name +- `vmid`: LXC container ID + +## Dependency and Integration Map + +| Dependency | Purpose | +| ------------------------- | -------------------------------------- | +| `internal/docker` | Docker client and container operations | +| `internal/proxmox` | Proxmox API client | +| `internal/watcher` | Event watching for container changes | +| `internal/watcher/events` | Event types | +| `pkg/gperr` | Error handling | + +## Observability + +### Logs + +- **DEBUG**: API calls and responses +- **ERROR**: Operation failures with context + +Log context includes: `container`, `vmid`, `action` + +## Security Considerations + +- Docker provider requires access to Docker socket +- Proxmox provider requires API credentials +- Both handle sensitive container operations + +## Failure Modes and Recovery + +| Failure | Behavior | Recovery | +| ------------------------- | ------------------------ | --------------------------- | +| Docker socket unavailable | Returns connection error | Fix socket permissions/path | +| Container not found | Returns not found error | Verify container ID | +| Proxmox node unavailable | Returns API error | Check network/node | +| Operation timeout | Returns timeout error | Increase timeout or retry | + +## Usage Examples + +### Creating a Docker Provider + +```go +provider, err := provider.NewDockerProvider(types.DockerProviderConfig{ + SocketPath: "/var/run/docker.sock", +}, "abc123def456") +if err != nil { + return err +} +defer provider.Close() + +// Check container status +status, err := provider.ContainerStatus(ctx) +if err != nil { + return err +} + +// Start container if stopped +if status == idlewatcher.ContainerStatusStopped { + if err := provider.ContainerStart(ctx); err != nil { + return err + } +} +``` + +### Watching for Container Events + +```go +eventCh, errCh := provider.Watch(ctx) + +for { + select { + case <-ctx.Done(): + return + case event := <-eventCh: + log.Printf("Container %s: %s", event.ActorName, event.Action) + case err := <-errCh: + log.Printf("Watch error: %v", err) + } +} +``` diff --git a/internal/jsonstore/README.md b/internal/jsonstore/README.md new file mode 100644 index 00000000..7c091173 --- /dev/null +++ b/internal/jsonstore/README.md @@ -0,0 +1,364 @@ +# JSON Store + +The jsonstore package provides persistent JSON storage with namespace support, using thread-safe concurrent maps and automatic loading/saving. + +## Overview + +The jsonstore package implements a simple yet powerful JSON storage system for GoDoxy, supporting both key-value stores (MapStore) and single object stores (ObjectStore) with automatic persistence to JSON files. + +### Key Features + +- Namespace-based storage +- Thread-safe concurrent map operations (xsync) +- Automatic JSON loading on initialization +- Automatic JSON saving on program exit +- Generic type support +- Marshal/Unmarshal integration + +## Architecture + +```mermaid +graph TD + A[JSON Store] --> B{Namespace} + B --> C[MapStore] + B --> D[ObjectStore] + + C --> E[xsync.Map] + D --> F[Single Object] + + G[Storage File] --> H[Load on Init] + H --> I[Parse JSON] + I --> J[xsync.Map or Object] + + K[Program Exit] --> L[Save All] + L --> M[Serialize to JSON] + M --> N[Write Files] +``` + +## Core Components + +### MapStore + +```go +type MapStore[VT any] struct { + *xsync.Map[string, VT] +} + +// Implements: +// - Initialize() - initializes the internal map +// - MarshalJSON() - serializes to JSON +// - UnmarshalJSON() - deserializes from JSON +``` + +### ObjectStore + +```go +type ObjectStore[Pointer Initializer] struct { + ptr Pointer +} + +// Initializer interface requires: +// - Initialize() +``` + +### Store Interface + +```go +type store interface { + Initialize() + json.Marshaler + json.Unmarshaler +} +``` + +## Public API + +### MapStore Creation + +```go +// Store creates a new namespace map store. +func Store[VT any](namespace namespace) MapStore[VT] +``` + +### ObjectStore Creation + +```go +// Object creates a new namespace object store. +func Object[Ptr Initializer](namespace namespace) Ptr +``` + +## Usage + +### MapStore Example + +```go +// Define a namespace +type UserID string + +// Create a store for user sessions +var sessions = jsonstore.Store[UserID]("sessions") + +// Store a value +sessions.Store("user123", "session-token-abc") + +// Load a value +token, ok := sessions.Load("user123") +if ok { + fmt.Println("Session:", token) +} + +// Iterate over all entries +for id, token := range sessions.Range { + fmt.Printf("%s: %s\n", id, token) +} + +// Delete a value +sessions.Delete("user123") +``` + +### ObjectStore Example + +```go +// Define a struct that implements Initialize +type AppConfig struct { + Name string + Version int +} + +func (c *AppConfig) Initialize() { + c.Name = "MyApp" + c.Version = 1 +} + +// Create an object store +var config = jsonstore.Object[*AppConfig]("app_config") + +// Access the object +fmt.Printf("App: %s v%d\n", config.Name, config.Version) + +// Modify and save (automatic on exit) +config.Version = 2 +``` + +### Complete Example + +```go +package main + +import ( + "encoding/json" + "github.com/yusing/godoxy/internal/jsonstore" +) + +type Settings struct { + Theme string + Lang string +} + +func (s *Settings) Initialize() { + s.Theme = "dark" + s.Lang = "en" +} + +func main() { + // Create namespace type + type SettingsKey string + + // Create stores + var settings = jsonstore.Object[*Settings]("settings") + var cache = jsonstore.Store[string]("cache") + + // Use stores + settings.Theme = "light" + cache.Store("key1", "value1") + + // On program exit, all stores are automatically saved +} +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant Application + participant Store + participant xsync.Map + participant File + + Application->>Store: Store(key, value) + Store->>xsync.Map: Store(key, value) + xsync.Map-->>Store: Done + + Application->>Store: Load(key) + Store->>xsync.Map: Load(key) + xsync.Map-->>Store: value + Store-->>Application: value + + Application->>Store: Save() + Store->>File: Marshal JSON + File-->>Store: Success + + Note over Store,File: On program exit + Store->>File: Save all stores + File-->>Store: Complete +``` + +## Namespace + +Namespaces are string identifiers for different storage areas: + +```go +type namespace string + +// Create namespaces +var ( + users = jsonstore.Store[User]("users") + sessions = jsonstore.Store[Session]("sessions") + config = jsonstore.Object[*Config]("config") + metadata = jsonstore.Store[string]("metadata") +) +``` + +### Reserved Names + +None + +## File Storage + +### File Location + +```go +var storesPath = common.DataDir // Typically ./data/.{namespace}.json +``` + +### File Format + +Stores are saved as `{namespace}.json`: + +```json +{ + "key1": "value1", + "key2": "value2" +} +``` + +### Automatic Loading + +On initialization, stores are loaded from disk: + +```go +func loadNS[T store](ns namespace) T { + store := reflect.New(reflect.TypeFor[T]().Elem()).Interface().(T) + store.Initialize() + + path := filepath.Join(storesPath, string(ns)+".json") + file, err := os.Open(path) + if err != nil { + if !os.IsNotExist(err) { + log.Err(err).Msg("failed to load store") + } + return store + } + defer file.Close() + + if err := sonic.ConfigDefault.NewDecoder(file).Decode(&store); err != nil { + log.Err(err).Msg("failed to decode store") + } + + stores[ns] = store + return store +} +``` + +### Automatic Saving + +On program exit, all stores are saved: + +```go +func init() { + task.OnProgramExit("save_stores", func() { + if err := save(); err != nil { + log.Error().Err(err).Msg("failed to save stores") + } + }) +} + +func save() error { + for ns, store := range stores { + path := filepath.Join(storesPath, string(ns)+".json") + if err := serialization.SaveJSON(path, &store, 0644); err != nil { + return err + } + } + return nil +} +``` + +## Thread Safety + +The MapStore uses `xsync.Map` for thread-safe operations: + +```go +type MapStore[VT any] struct { + *xsync.Map[string, VT] +} + +// All operations are safe: +// - Load, Store, Delete +// - Range iteration +// - LoadAndDelete +// - LoadOrCompute +``` + +## JSON Serialization + +### MarshalJSON + +```go +func (s MapStore[VT]) MarshalJSON() ([]byte, error) { + return sonic.Marshal(xsync.ToPlainMap(s.Map)) +} +``` + +### UnmarshalJSON + +```go +func (s *MapStore[VT]) UnmarshalJSON(data []byte) error { + tmp := make(map[string]VT) + if err := sonic.Unmarshal(data, &tmp); err != nil { + return err + } + s.Map = xsync.NewMap[string, VT](xsync.WithPresize(len(tmp))) + for k, v := range tmp { + s.Store(k, v) + } + return nil +} +``` + +## Integration Points + +The jsonstore package integrates with: + +- **Serialization**: JSON marshaling/unmarshaling +- **Task Management**: Program exit callbacks +- **Common**: Data directory configuration + +## Error Handling + +Errors are logged but don't prevent store usage: + +```go +if err := sonic.Unmarshal(data, &tmp); err != nil { + log.Err(err). + Str("path", path). + Msg("failed to load store") +} +``` + +## Performance Considerations + +- Uses `xsync.Map` for lock-free reads +- Presizes maps based on input data +- Sonic library for fast JSON parsing +- Background save on program exit (non-blocking) diff --git a/internal/logging/README.md b/internal/logging/README.md index 457a6386..b9c204fb 100644 --- a/internal/logging/README.md +++ b/internal/logging/README.md @@ -1,30 +1,46 @@ # Logging Package -This package provides structured logging capabilities for GoDoxy, including application logging, HTTP access logging, and in-memory log streaming. +Structured logging capabilities for GoDoxy, including application logging, HTTP access logging, and in-memory log streaming. -## Structure +## Overview -``` -internal/logging/ -├── logging.go # Main logger initialization using zerolog -├── accesslog/ # HTTP access logging with rotation and filtering -│ ├── access_logger.go # Core logging logic and buffering -│ ├── multi_access_logger.go # Fan-out to multiple writers -│ ├── config.go # Configuration types and defaults -│ ├── formatter.go # Log format implementations -│ ├── file_logger.go # File I/O with reference counting -│ ├── rotate.go # Log rotation based on retention policy -│ ├── writer.go # Buffered/unbuffered writer abstractions -│ ├── back_scanner.go # Backward line scanning for rotation -│ ├── filter.go # Request filtering by status/method/header -│ ├── retention.go # Retention policy definitions -│ ├── response_recorder.go # HTTP response recording middleware -│ └── ... # Tests and utilities -└── memlogger/ # In-memory circular buffer with WebSocket streaming - └── mem_logger.go # Ring buffer with WebSocket event notifications -``` +This package provides structured logging for GoDoxy with three distinct subsystems: -## Architecture Overview +- **Application Logger**: Zerolog-based console logger with level-aware formatting +- **Access Logger**: HTTP request/response logging with configurable formats, filters, and destinations +- **In-Memory Logger**: Circular buffer with WebSocket streaming for real-time log viewing + +### Primary Consumers + +- `internal/api/` - HTTP request logging +- `internal/route/` - Route-level access logging +- WebUI - Real-time log streaming via WebSocket + +### Non-goals + +- Log aggregation across multiple GoDoxy instances +- Persistent storage of application logs (access logs only) +- Structured logging output to external systems (Datadog, etc.) + +### Stability + +Internal package with stable APIs. Exported interfaces (`AccessLogger`, `MemLogger`) are stable. + +## Packages + +### `accesslog/` + +HTTP request/response logging with configurable formats, filters, and destinations. + +See [accesslog/README.md](./accesslog/README.md) for full documentation. + +### `memlogger/` + +In-memory circular buffer with WebSocket streaming for real-time log viewing. + +See [memlogger/README.md](./memlogger/README.md) for full documentation. + +## Architecture ```mermaid graph TB @@ -43,13 +59,6 @@ graph TB W --> S[Stdout] end - subgraph "Log Rotation" - B --> RT[Rotate Timer] - RT --> BS[BackScanner] - BS --> T[Truncate/Move] - T --> F1 - end - subgraph "In-Memory Logger" WB[Write Buffer] WB --> RB[Circular Buffer
16KB max] @@ -58,206 +67,51 @@ graph TB end ``` -## Components +## Configuration Surface -### 1. Application Logger (`logging.go`) +### Access Log Configuration -Initializes a zerolog-based console logger with level-aware formatting: +See [accesslog/README.md](./accesslog/README.md) for configuration options. -- **Levels**: Trace → Debug → Info (determined by `common.IsTrace`/`common.IsDebug`) -- **Time Format**: 04:05 (trace) or 01-02 15:04 (debug/info) -- **Multi-line Handling**: Automatically indents continuation lines +### In-Memory Logger -```go -// Auto-initialized on import -func InitLogger(out ...io.Writer) +See [memlogger/README.md](./memlogger/README.md) for configuration options. -// Create logger with fixed level -NewLoggerWithFixedLevel(level zerolog.Level, out ...io.Writer) -``` +## Dependency and Integration Map -### 2. Access Logging (`accesslog/`) +### Internal Dependencies -Logs HTTP requests/responses with configurable formats, filters, and destinations. +- `internal/task/task.go` - Lifetime management +- `internal/maxmind/` - IP geolocation for ACL logging +- `pkg/gperr` - Error handling -#### Core Interface +### External Dependencies -```go -type AccessLogger interface { - Log(req *http.Request, res *http.Response) - LogError(req *http.Request, err error) - LogACL(info *maxmind.IPInfo, blocked bool) - Config() *Config - Flush() - Close() error -} -``` +- `github.com/rs/zerolog` - Structured logging +- `github.com/puzpuzpuz/xsync/v4` - Concurrent maps +- `golang.org/x/time/rate` - Error rate limiting -#### Log Formats +## Observability -| Format | Description | -| ---------- | --------------------------------- | -| `common` | Basic Apache Common format | -| `combined` | Common + Referer + User-Agent | -| `json` | Structured JSON with full details | +### Logs -#### Example Output +| Level | When | +| ------- | ---------------------------------------- | +| `Debug` | Buffer size adjustments, rotation checks | +| `Info` | Log rotation events, file opens/closes | +| `Error` | Write failures (rate-limited) | -``` -common: localhost 127.0.0.1 - - [01-04 10:30:45] "GET /api HTTP/1.1" 200 1234 -combined: localhost 127.0.0.1 - - [01-04 10:30:45] "GET /api HTTP/1.1" 200 1234 "https://example.com" "Mozilla/5.0" -json: {"time":"04/Jan/2025:10:30:45 +0000","ip":"127.0.0.1","method":"GET",...} -``` +## Failure Modes and Recovery -#### Filters +| Failure Mode | Impact | Recovery | +| --------------------------- | ------------------------ | ----------------------------------------------------------- | +| File write failure | Log entries dropped | Rate-limited error logging; task termination after 5 errors | +| Disk full | Rotation fails | Continue logging until space available | +| WebSocket client disconnect | Client misses logs | Client reconnects to receive new logs | +| Buffer overflow (memlogger) | Oldest entries truncated | Automatic truncation at 50% threshold | -Filter incoming requests before logging: +## Testing Notes -- **StatusCodes**: Keep/drop by HTTP status code range -- **Method**: Keep/drop by HTTP method -- **Headers**: Match header existence or value -- **CIDR**: Match client IP against CIDR ranges - -#### Multi-Destination Support - -```mermaid -graph LR - A[Request] --> B[MultiAccessLogger] - B --> C[AccessLogger 1] --> F[File] - B --> D[AccessLogger 2] --> S[Stdout] -``` - -### 3. File Management (`file_logger.go`) - -- **Reference Counting**: Multiple loggers can share the same file -- **Auto-Close**: File closes when ref count reaches zero -- **Thread-Safe**: Shared mutex per file path - -### 4. Log Rotation (`rotate.go`) - -Rotates logs based on retention policy: - -| Policy | Description | -| ---------- | ----------------------------------- | -| `Days` | Keep logs within last N days | -| `Last` | Keep last N log lines | -| `KeepSize` | Keep last N bytes (simple truncate) | - -**Algorithm** (for Days/Last): - -1. Scan file backward line-by-line using `BackScanner` -2. Parse timestamps to find cutoff point -3. Move retained lines to file front -4. Truncate excess - -```mermaid -flowchart LR - A[File End] --> B[BackScanner] - B --> C{Valid timestamp?} - C -->|No| D[Skip line] - C -->|Yes| E{Within retention?} - E -->|No| F[Keep line] - E -->|Yes| G[Stop scanning] - F --> H[Move to front] - G --> I[Truncate rest] -``` - -### 5. Buffering (`access_logger.go`) - -- **Dynamic Sizing**: Adjusts buffer size based on write throughput -- **Initial**: 4KB → **Max**: 8MB -- **Adjustment**: Every 5 seconds based on writes-per-second - -### 6. In-Memory Logger (`memlogger/`) - -Circular buffer for real-time log streaming via WebSocket: - -- **Size**: 16KB maximum, auto-truncates old entries -- **Streaming**: WebSocket connection receives live updates -- **Events API**: Subscribe to log events - -```go -// HTTP handler for WebSocket streaming -HandlerFunc() gin.HandlerFunc - -// Subscribe to log events -Events() (<-chan []byte, func()) - -// Write to buffer (implements io.Writer) -Write(p []byte) (n int, err error) -``` - -## Configuration - -```yaml -access_log: - path: /var/log/godoxy/access.log # File path (optional) - stdout: true # Also log to stdout (optional) - format: combined # common | combined | json - rotate_interval: 1h # How often to check rotation - retention: - days: 30 # Keep last 30 days - # OR - last: 10000 # Keep last 10000 lines - # OR - keep_size: 100MB # Keep last 100MB - filters: - status_codes: [400-599] # Only log errors - method: [GET, POST] - headers: - - name: X-Internal - value: "true" - cidr: - - 10.0.0.0/8 - fields: - headers: drop # keep | drop | redacted - query: keep # keep | drop | redacted - cookies: drop # keep | drop | redacted -``` - -## Data Flow - -```mermaid -sequenceDiagram - participant C as Client - participant M as Middleware - participant R as ResponseRecorder - participant F as Formatter - participant B as BufferedWriter - participant W as Writer - - C->>M: HTTP Request - M->>R: Capture request - R-->>M: Continue - - M->>M: Process request - - C->>M: HTTP Response - M->>R: Capture response - R->>F: Format log line - F->>B: Write formatted line - B->>W: Flush when needed - - par File Writer - W->>File: Append line - and Stdout Writer - W->>Stdout: Print line - end - - Note over B,W: Periodic rotation check - W->>File: Rotate if needed -``` - -## Key Design Patterns - -1. **Interface Segregation**: Small, focused interfaces (`AccessLogger`, `Writer`, `BufferedWriter`) - -2. **Dependency Injection**: Writers injected at creation for flexibility - -3. **Reference Counting**: Shared file handles prevent too-many-open-files - -4. **Dynamic Buffering**: Adapts to write throughput automatically - -5. **Backward Scanning**: Efficient rotation without loading entire file - -6. **Zero-Allocation Formatting**: Build log lines in pre-allocated buffers +- `access_logger_test.go` - Integration tests with mock file system +- `file_logger_test.go` - Reference counting tests +- `back_scanner_test.go` - Rotation boundary tests diff --git a/internal/logging/accesslog/README.md b/internal/logging/accesslog/README.md new file mode 100644 index 00000000..66ee8511 --- /dev/null +++ b/internal/logging/accesslog/README.md @@ -0,0 +1,493 @@ +# Access Logging + +Provides HTTP access logging with file rotation, log filtering, and multiple output formats for request and ACL event logging. + +## Overview + +The accesslog package captures HTTP request/response information and writes it to files or stdout. It includes configurable log formats, filtering rules, and automatic log rotation with retention policies. + +### Primary Consumers + +- `internal/route` - Middleware for logging proxied requests +- `internal/acl` - ACL decision logging +- `internal/api` - Request audit trails + +### Non-goals + +- Does not provide log parsing or analysis +- Does not implement log aggregation across services +- Does not provide log shipping to external systems +- Does not implement access control (use `internal/acl`) + +### Stability + +Internal package. Public interfaces are stable. + +## Public API + +### Exported Types + +#### AccessLogger Interface + +```go +type AccessLogger interface { + // Log records an HTTP request and response + Log(req *http.Request, res *http.Response) + + // LogError logs a request with an error status code + LogError(req *http.Request, err error) + + // LogACL logs an ACL block/allow event + LogACL(info *maxmind.IPInfo, blocked bool) + + // Config returns the logger configuration + Config() *Config + + // Flush forces any buffered log data to be written + Flush() + + // Close closes the logger and releases resources + Close() error +} +``` + +Main interface for logging HTTP requests and ACL events. + +#### Writer Interface + +```go +type Writer interface { + io.WriteCloser + ShouldBeBuffered() bool + Name() string // file name or path +} +``` + +Output destination interface. + +#### Format Type + +```go +type Format string + +const ( + FormatCommon Format = "common" + FormatCombined Format = "combined" + FormatJSON Format = "json" +) +``` + +Log format constants. + +### Configuration Types + +#### RequestLoggerConfig + +```go +type RequestLoggerConfig struct { + ConfigBase + Format Format `json:"format" validate:"oneof=common combined json"` + Filters Filters `json:"filters"` + Fields Fields `json:"fields"` +} +``` + +Configuration for request/response logging. + +#### ACLLoggerConfig + +```go +type ACLLoggerConfig struct { + ConfigBase + LogAllowed bool `json:"log_allowed"` +} +``` + +Configuration for ACL event logging. + +#### ConfigBase + +```go +type ConfigBase struct { + B int `json:"buffer_size"` // Deprecated: buffer size is adjusted dynamically + Path string `json:"path"` + Stdout bool `json:"stdout"` + Retention *Retention `json:"retention" aliases:"keep"` + RotateInterval time.Duration `json:"rotate_interval,omitempty" swaggertype:"primitive,integer"` +} +``` + +Common configuration for all loggers. + +#### Filters + +```go +type Filters struct { + StatusCodes LogFilter[*StatusCodeRange] `json:"status_codes"` + Method LogFilter[HTTPMethod] `json:"method"` + Host LogFilter[Host] `json:"host"` + Headers LogFilter[*HTTPHeader] `json:"headers"` + CIDR LogFilter[*CIDR] `json:"cidr"` +} +``` + +Filtering rules for what to log. + +#### Fields + +```go +type Fields struct { + Headers FieldConfig `json:"headers" aliases:"header"` + Query FieldConfig `json:"query" aliases:"queries"` + Cookies FieldConfig `json:"cookies" aliases:"cookie"` +} +``` + +Field configuration for what data to include. + +### Exported Functions + +#### Constructor + +```go +func NewAccessLogger(parent task.Parent, cfg AnyConfig) (AccessLogger, error) +func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) AccessLogger +func NewAccessLoggerWithIO(parent task.Parent, writer Writer, anyCfg AnyConfig) AccessLogger +``` + +Create access loggers from configurations. + +#### Default Configurations + +```go +func DefaultRequestLoggerConfig() *RequestLoggerConfig +func DefaultACLLoggerConfig() *ACLLoggerConfig +``` + +Returns default configurations. + +## Architecture + +### Core Components + +```mermaid +graph TD + subgraph Request Flow + Req[HTTP Request] -->|Passed to| Log[AccessLogger.Log] + Res[HTTP Response] -->|Passed to| Log + Log -->|Formats| Fmt[RequestFormatter] + Fmt -->|Writes to| Writer[BufferedWriter] + Writer -->|Outputs to| Output[File/Stdout] + end + + subgraph Background Tasks + Rotator[Rotation Task] -->|Triggers| Rotate[ShouldRotate] + Adjuster[Buffer Adjuster] -->|Adjusts| Buffer[Buffer Size] + end +``` + +| Component | Responsibility | +| ------------------ | ------------------------------------ | +| `AccessLogger` | Main logging interface | +| `RequestFormatter` | Formats request/response logs | +| `ACLFormatter` | Formats ACL decision logs | +| `Writer` | Output destination (file/stdout) | +| `BufferedWriter` | Efficient I/O with dynamic buffering | + +### Log Flow + +```mermaid +sequenceDiagram + participant Request + participant AccessLogger + participant Formatter + participant BufferedWriter + participant File + + Request->>AccessLogger: Log(req, res) + AccessLogger->>AccessLogger: shouldLog() filter check + alt Passes filters + AccessLogger->>Formatter: AppendRequestLog(line, req, res) + Formatter->>AccessLogger: Formatted line + AccessLogger->>BufferedWriter: Write(line) + BufferedWriter->>BufferedWriter: Buffer if needed + BufferedWriter->>File: Flush when full/rotating + else Fails filters + AccessLogger->>Request: Skip logging + end +``` + +### Buffer Management + +The logger dynamically adjusts buffer size based on write throughput: + +| Parameter | Value | +| ------------------- | --------- | +| Initial Buffer Size | 4 KB | +| Maximum Buffer Size | 8 MB | +| Adjustment Interval | 5 seconds | + +Buffer size adjustment formula: + +```go +newBufSize = origBufSize +/- step +step = max(|wps - origBufSize|/2, wps/2) +``` + +### Rotation Logic + +```mermaid +stateDiagram-v2 + [*] --> Logging + Logging --> Logging: Normal writes + Logging --> Rotating: Interval reached + Rotating --> Logging: New file created + Rotating --> [*]: Logger closed +``` + +Rotation checks: + +1. Is rotation enabled (supportRotate + valid retention)? +1. Is retention period valid? +1. Create new file with timestamp suffix +1. Delete old files beyond retention + +## Log Formats + +### Common Format + +``` +127.0.0.1 - - [10/Jan/2024:12:00:00 +0000] "GET /api HTTP/1.1" 200 1234 +``` + +### Combined Format + +``` +127.0.0.1 - - [10/Jan/2024:12:00:00 +0000] "GET /api HTTP/1.1" 200 1234 "https://example.com" "Mozilla/5.0" +``` + +### JSON Format + +```json +{ + "level": "info", + "time": "10/Jan/2024:12:00:00 +0000", + "ip": "127.0.0.1", + "method": "GET", + "scheme": "http", + "host": "example.com", + "path": "/api", + "protocol": "HTTP/1.1", + "status": 200, + "type": "application/json", + "size": 1234, + "referer": "https://example.com", + "useragent": "Mozilla/5.0" +} +``` + +## Configuration Surface + +### YAML Configuration + +```yaml +access_log: + path: /var/log/godoxy/access.log + stdout: false + rotate_interval: 1h + retention: + days: 30 + format: combined + filters: + status_codes: + keep: + - min: 200 + max: 599 + method: + keep: + - GET + - POST + headers: + keep: + - name: Authorization +``` + +### Configuration Fields + +| Field | Type | Default | Description | +| ---------------------- | -------- | -------- | ------------------- | +| `path` | string | - | Log file path | +| `stdout` | bool | false | Also log to stdout | +| `rotate_interval` | duration | 1h | Rotation interval | +| `retention.days` | int | 30 | Days to retain logs | +| `format` | string | combined | Log format | +| `filters.status_codes` | range[] | all | Status code filter | +| `filters.method` | string[] | all | HTTP method filter | +| `filters.cidr` | CIDR[] | none | IP range filter | + +### Reloading + +Configuration is fixed at construction time. Create a new logger to apply changes. + +## Dependency and Integration Map + +### Internal Dependencies + +| Package | Purpose | +| ------------------------ | ---------------------------------- | +| `internal/maxmind/types` | IP geolocation for ACL logs | +| `internal/serialization` | Default value factory registration | + +### External Dependencies + +| Dependency | Purpose | +| -------------------------------- | --------------------------- | +| `github.com/rs/zerolog` | JSON formatting and logging | +| `github.com/yusing/goutils/task` | Lifetime management | +| `github.com/puzpuzpuz/xsync/v4` | Concurrent map operations | +| `golang.org/x/time/rate` | Error rate limiting | + +## Observability + +### Logs + +| Level | When | +| ----- | ----------------------------- | +| Debug | Buffer size adjustments | +| Info | Log file rotation | +| Error | Write failures (rate limited) | + +### Metrics + +None exposed directly. Write throughput tracked internally. + +## Security Considerations + +- Log files should have appropriate permissions (644) +- Sensitive headers can be filtered via `Filters.Headers` +- Query parameters and cookies are configurable via `Fields` +- Rate limiting prevents error log flooding + +## Failure Modes and Recovery + +| Failure | Detection | Recovery | +| ----------------------- | ------------------------ | -------------------------------------- | +| Write error | `Write()` returns error | Rate-limited logging, then task finish | +| File deleted while open | Write failure | Logger continues with error | +| Disk full | Write failure | Error logged, may terminate | +| Rotation error | `Rotate()` returns error | Continue with current file | + +### Error Rate Limiting + +```go +const ( + errRateLimit = 200 * time.Millisecond + errBurst = 5 +) +``` + +Errors are rate-limited to prevent log flooding. After burst exceeded, task is finished. + +## Usage Examples + +### Basic Request Logger + +```go +import "github.com/yusing/godoxy/internal/logging/accesslog" + +cfg := accesslog.DefaultRequestLoggerConfig() +cfg.Path = "/var/log/godoxy/access.log" +cfg.RotateInterval = time.Hour +cfg.Retention = &accesslog.Retention{Days: 30} + +logger, err := accesslog.NewAccessLogger(parent, cfg) +if err != nil { + log.Fatal(err) +} +defer logger.Close() + +// Log a request +logger.Log(req, res) +``` + +### JSON Format with Filters + +```go +cfg := accesslog.RequestLoggerConfig{ + ConfigBase: accesslog.ConfigBase{ + Path: "/var/log/godoxy/requests.json.log", + Retention: &accesslog.Retention{Days: 7}, + }, + Format: accesslog.FormatJSON, + Filters: accesslog.Filters{ + StatusCodes: accesslog.LogFilter[*accesslog.StatusCodeRange]{ + Keep: []accesslog.StatusCodeRange{{Min: 400, Max: 599}}, + }, + }, +} + +logger := accesslog.NewAccessLogger(parent, &cfg) +``` + +### ACL Logger + +```go +aclCfg := accesslog.DefaultACLLoggerConfig() +aclCfg.Path = "/var/log/godoxy/acl.log" +aclCfg.LogAllowed = false // Only log blocked requests + +aclLogger, err := accesslog.NewAccessLogger(parent, aclCfg) +if err != nil { + log.Fatal(err) +} + +// Log ACL decision +aclLogger.LogACL(ipInfo, true) // blocked +aclLogger.LogACL(ipInfo, false) // allowed (if LogAllowed is true) +``` + +### Custom Writer + +```go +type customWriter struct { + *os.File +} + +func (w *customWriter) ShouldBeBuffered() bool { return true } +func (w *customWriter) Name() string { return "custom" } + +writer := &customWriter{File: myFile} +logger := accesslog.NewAccessLoggerWithIO(parent, writer, cfg) +``` + +### Integration with Route Middleware + +```go +func accessLogMiddleware(logger accesslog.AccessLogger) gin.HandlerFunc { + return func(c *gin.Context) { + c.Next() + logger.Log(c.Request, c.Writer.Result()) + } +} +``` + +## Performance Characteristics + +- Buffered writes reduce I/O operations +- Dynamic buffer sizing adapts to throughput +- Per-writer locks allow parallel writes to different files +- Byte pools reduce GC pressure +- Efficient log rotation with back scanning + +## Testing Notes + +- `NewMockAccessLogger` for testing without file I/O +- Mock file implementation via `NewMockFile` +- Filter tests verify predicate logic +- Rotation tests verify retention cleanup + +## Related Packages + +- `internal/route` - Route middleware integration +- `internal/acl` - ACL decision logging +- `internal/maxmind` - IP geolocation for ACL logs diff --git a/internal/logging/memlogger/README.md b/internal/logging/memlogger/README.md new file mode 100644 index 00000000..980b6bd1 --- /dev/null +++ b/internal/logging/memlogger/README.md @@ -0,0 +1,330 @@ +# In-Memory Logger + +Provides a thread-safe in-memory circular buffer logger with WebSocket-based real-time streaming for log data. + +## Overview + +The memlogger package implements a thread-safe in-memory log buffer with WebSocket streaming capabilities. It stores log data in memory and pushes new entries to connected WebSocket clients and event subscribers. + +### Primary Consumers + +- `internal/api/v1/cert/renew` - Provides WebSocket endpoint for certificate renewal logs +- Diagnostic and debugging interfaces + +### Non-goals + +- Does not persist logs to disk +- Does not provide log rotation or retention policies +- Does not support structured/log levels +- Does not provide authentication for WebSocket connections + +### Stability + +Internal package. Public interfaces are stable. + +## Public API + +### Exported Types + +#### MemLogger Interface + +```go +type MemLogger io.Writer +``` + +The `MemLogger` is an `io.Writer` interface. Any data written to it is stored in the circular buffer and broadcast to subscribers. + +### Exported Functions + +#### GetMemLogger + +```go +func GetMemLogger() MemLogger +``` + +Returns the global singleton `MemLogger` instance. + +**Example:** + +```go +logger := memlogger.GetMemLogger() +logger.Write([]byte("log message")) +``` + +#### HandlerFunc + +```go +func HandlerFunc() gin.HandlerFunc +``` + +Returns a Gin middleware handler that upgrades HTTP connections to WebSocket and streams log data. + +**Example:** + +```go +router.GET("/logs/ws", memlogger.HandlerFunc()) +``` + +#### Events + +```go +func Events() (<-chan []byte, func()) +``` + +Returns a channel for receiving log events and a cancel function to unsubscribe. + +**Returns:** + +- `<-chan []byte` - Channel receiving log entry slices +- `func()` - Cleanup function that unsubscribes and closes the channel + +**Example:** + +```go +ch, cancel := memlogger.Events() +defer cancel() + +for event := range ch { + fmt.Println(string(event)) +} +``` + +## Architecture + +### Core Components + +```mermaid +flowchart LR + subgraph In-Memory Buffer + LB[bytes.Buffer] -->|Stores| Logs[Log Entries 16KB cap] + end + + subgraph Notification System + Notify[notifyWS] -->|Notifies| WS[WebSocket Clients] + Notify -->|Notifies| Ch[Event Channels] + end + + subgraph External Clients + HTTP[HTTP Request] -->|Upgrades to| WS + API[Events API] -->|Subscribes to| Ch + end +``` + +| Component | Responsibility | +| -------------- | ------------------------------------------------ | +| `memLogger` | Main struct holding buffer and subscription maps | +| `bytes.Buffer` | Circular buffer for log storage (16KB max) | +| `connChans` | xsync.Map of WebSocket channels | +| `listeners` | xsync.Map of event channels | + +### Write Flow + +```mermaid +sequenceDiagram + participant Writer + participant MemLogger + participant Buffer + participant Subscribers + + Writer->>MemLogger: Write(p) + MemLogger->>Buffer: truncateIfNeeded(n) + Buffer->>Buffer: Truncate to 8KB if needed + Buffer->>Buffer: Write(p) + MemLogger->>MemLogger: writeBuf returns position + MemLogger->>Subscribers: notifyWS(pos, n) + Subscribers->>Subscribers: Send to WebSocket/Listeners +``` + +### Buffer Behavior + +The circular buffer has fixed maximum size: + +| Property | Value | +| ------------------ | ---------- | +| Maximum Size | 16 KB | +| Truncate Threshold | 8 KB (50%) | +| Write Chunk Size | 4 KB | +| Write Timeout | 10 seconds | + +**Truncation Logic:** +When the buffer exceeds the maximum size: + +1. The buffer is truncated to 8 KB (half the maximum) +1. Oldest entries are removed first +1. Recent logs are always preserved + +### Thread Safety + +Multiple synchronization mechanisms ensure thread safety: + +| Field | Mutex Type | Purpose | +| ------------ | -------------- | ------------------------------------- | +| `Buffer` | `sync.RWMutex` | Protecting buffer operations | +| `notifyLock` | `sync.RWMutex` | Protecting notification maps | +| `connChans` | `xsync.Map` | Thread-safe WebSocket channel storage | +| `listeners` | `xsync.Map` | Thread-safe event listener storage | + +## Configuration Surface + +No explicit configuration. The singleton instance uses fixed constants: + +```go +const ( + maxMemLogSize = 16 * 1024 // 16KB buffer + truncateSize = maxMemLogSize / 2 // 8KB + initialWriteChunkSize = 4 * 1024 + writeTimeout = 10 * time.Second +) +``` + +## Dependency and Integration Map + +### Internal Dependencies + +| Dependency | Purpose | +| ------------------------------------------ | -------------------- | +| `github.com/yusing/goutils/http/websocket` | WebSocket management | + +### External Dependencies + +| Dependency | Purpose | +| ------------------------------- | ------------------------- | +| `github.com/gin-gonic/gin` | HTTP/WebSocket handling | +| `github.com/puzpuzpuz/xsync/v4` | Concurrent map operations | + +## Observability + +### Logs + +No logging in this package. Errors are returned via WebSocket write failures. + +### Metrics + +None exposed. + +## Failure Modes and Recovery + +| Failure | Detection | Recovery | +| ----------------------- | ------------------------ | ------------------------- | +| WebSocket write timeout | 3-second timer | Skip subscriber, continue | +| Buffer write error | `writeBuf` returns error | Logged but not returned | +| Subscriber channel full | Channel send timeout | Skip subscriber, continue | +| Buffer exceeds max size | `truncateIfNeeded` | Truncate to 8KB | + +### Concurrency Guarantees + +- Multiple goroutines can write concurrently +- Multiple WebSocket connections supported +- Subscriptions can be added/removed during operation +- Buffer truncation is atomic + +## Usage Examples + +### Basic Log Writing + +```go +import "github.com/yusing/godoxy/internal/logging/memlogger" + +logger := memlogger.GetMemLogger() + +// Write a simple message +logger.Write([]byte("Application started\n")) + +// Write formatted logs +logger.Write([]byte(fmt.Sprintf("[INFO] Request received: %s\n", path))) +``` + +### WebSocket Endpoint + +```go +import ( + "github.com/gin-gonic/gin" + "github.com/yusing/godoxy/internal/logging/memlogger" +) + +func setupRouter(r *gin.Engine) { + // Real-time log streaming via WebSocket + r.GET("/api/logs/stream", memlogger.HandlerFunc()) +} +``` + +### Subscribing to Log Events + +```go +func monitorLogs(ctx context.Context) { + ch, cancel := memlogger.Events() + defer cancel() + + for { + select { + case <-ctx.Done(): + return + case event := <-ch: + processLogEvent(event) + } + } +} + +func processLogEvent(event []byte) { + // Handle the log event + fmt.Printf("Log: %s", string(event)) +} +``` + +### WebSocket Client + +```javascript +// Client-side JavaScript +const ws = new WebSocket("ws://localhost:8080/api/logs/stream"); + +ws.onmessage = (event) => { + console.log("New log entry:", event.data); +}; + +ws.onclose = () => { + console.log("Log stream disconnected"); +}; + +ws.onerror = (error) => { + console.error("Log stream error:", error); +}; +``` + +### Complete Integration + +```go +func setupLogging(r *gin.Engine) *memlogger.MemLogger { + logger := memlogger.GetMemLogger() + + // WebSocket endpoint for real-time logs + r.GET("/ws/logs", memlogger.HandlerFunc()) + + return logger +} + +// Elsewhere in the application +func recordRequest(logger memlogger.MemLogger, path string, status int) { + logger.Write([]byte(fmt.Sprintf("[%s] %s - %d\n", + time.Now().Format(time.RFC3339), path, status))) +} +``` + +## Performance Characteristics + +- O(1) write operations (amortized) +- O(n) for truncation where n is buffer size +- WebSocket notifications are non-blocking (3-second timeout) +- Memory usage is bounded at 16KB + +## Testing Notes + +- Mock by providing a custom `io.Writer` implementation +- Test concurrent writes with goroutines +- Verify truncation behavior +- Test WebSocket upgrade failures + +## Related Packages + +- `internal/api` - HTTP API endpoints +- `github.com/gin-gonic/gin` - HTTP framework +- `github.com/yusing/goutils/http/websocket` - WebSocket utilities diff --git a/internal/maxmind/README.md b/internal/maxmind/README.md new file mode 100644 index 00000000..2791e799 --- /dev/null +++ b/internal/maxmind/README.md @@ -0,0 +1,337 @@ +# MaxMind + +The maxmind package provides MaxMind GeoIP database integration for IP geolocation, including automatic database downloading and updates. + +## Overview + +The maxmind package implements MaxMind GeoIP database management, providing IP geolocation lookups for country and city information. It supports automatic database downloading, scheduled updates, and thread-safe access. + +### Key Features + +- MaxMind GeoIP database loading +- Automatic database downloading from MaxMind +- Scheduled updates every 24 hours +- City lookup with cache support +- IP geolocation (country, city, timezone) +- Thread-safe access + +## Architecture + +```mermaid +graph TD + A[MaxMind Config] --> B[Load Database] + B --> C{Exists?} + C -->|No| D[Download] + C -->|Yes| E[Load] + D --> F[Extract from TarGz] + E --> G[Open Reader] + + H[IP Lookup] --> I[City Lookup] + I --> J{Cache Hit?} + J -->|Yes| K[Return Cached] + J -->|No| L[Database Query] + L --> M[Cache Result] + M --> K + + N[Update Scheduler] --> O[Check Daily] + O --> P{Different?} + P -->|Yes| Q[Download Update] + P -->|No| O +``` + +## Core Components + +### MaxMind Structure + +```go +type MaxMind struct { + *Config + lastUpdate time.Time + db struct { + *maxminddb.Reader + sync.RWMutex + } +} +``` + +### Configuration + +```go +type Config struct { + Database string // Database type (GeoLite2 or GeoIP2) + AccountID int + LicenseKey Secret +} +``` + +### IP Information + +```go +type IPInfo struct { + IP net.IP + Str string + Country *Country + City *City + Location *Location +} + +type Country struct { + IsoCode string + Name string +} + +type City struct { + Country *Country + Name string + Location *Location +} + +type Location struct { + TimeZone string + Latitude float64 + Longitude float64 +} +``` + +## Public API + +### Initialization + +```go +// LoadMaxMindDB loads or downloads the MaxMind database. +func (cfg *MaxMind) LoadMaxMindDB(parent task.Parent) gperr.Error +``` + +### Lookup + +```go +// LookupCity looks up city information for an IP. +func LookupCity(info *IPInfo) (city *City, ok bool) +``` + +## Usage + +### Basic Setup + +```go +maxmindCfg := &maxmind.Config{ + Database: maxmind.MaxMindGeoLite, + AccountID: 123456, + LicenseKey: "your-license-key", +} + +err := maxmindCfg.LoadMaxMindDB(parent) +if err != nil { + log.Fatal(err) +} +``` + +### IP Lookup + +```go +// Create IP info +ipInfo := &maxmind.IPInfo{ + IP: net.ParseIP("8.8.8.8"), + Str: "8.8.8.8", +} + +// Lookup city +city, ok := maxmind.LookupCity(ipInfo) +if ok { + fmt.Printf("Country: %s\n", city.Country.IsoCode) + fmt.Printf("City: %s\n", city.Name) + fmt.Printf("Timezone: %s\n", city.Location.TimeZone) +} +``` + +### Database Types + +```go +const ( + MaxMindGeoLite = "GeoLite2-Country" + MaxMindGeoIP2 = "GeoIP2-Country" +) +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant Config + participant MaxMind + participant Database + participant Cache + participant UpdateScheduler + + Config->>MaxMind: LoadMaxMindDB() + MaxMind->>Database: Open() + alt Database Missing + MaxMind->>MaxMind: Download() + MaxMind->>Database: Extract & Create + end + Database-->>MaxMind: Reader + + Note over MaxMind: Start Update Scheduler + + loop Every 24 Hours + UpdateScheduler->>MaxMind: Check Update + MaxMind->>MaxMind: Check Last-Modified + alt Update Available + MaxMind->>MaxMind: Download + MaxMind->>Database: Replace + end + end + + participant Lookup + Lookup->>MaxMind: LookupCity(ip) + MaxMind->>Cache: Check + alt Cache Hit + Cache-->>Lookup: City Info + else Cache Miss + MaxMind->>Database: Query + Database-->>MaxMind: City Info + MaxMind->>Cache: Store + MaxMind-->>Lookup: City Info + end +``` + +## Database Download + +### Download Process + +```go +func (cfg *MaxMind) download() error { + resp, err := cfg.doReq(http.MethodGet) + if err != nil { + return err + } + + // Read response + databaseGZ, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + // Extract from tar.gz + err = extractFileFromTarGz(databaseGZ, cfg.dbFilename(), tmpDBPath) + if err != nil { + return err + } + + // Validate + db, err := maxmindDBOpen(tmpDBPath) + if err != nil { + os.Remove(tmpDBPath) + return err + } + db.Close() + + // Rename to final location + os.Rename(tmpDBPath, dbFile) + return nil +} +``` + +### Security Checks + +The download process includes tar bomb protection: + +```go +sumSize := int64(0) +for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + sumSize += hdr.Size + if sumSize > 30*1024*1024 { + return errors.New("file size exceeds 30MB") + } +} +``` + +## Update Scheduling + +```go +func (cfg *MaxMind) scheduleUpdate(parent task.Parent) { + task := parent.Subtask("maxmind_schedule_update", true) + ticker := time.NewTicker(updateInterval) // 24 hours + + cfg.loadLastUpdate() + cfg.update() + + for { + select { + case <-task.Context().Done(): + return + case <-ticker.C: + cfg.update() + } + } +} +``` + +## Thread Safety + +The database uses a read-write mutex: + +```go +type MaxMind struct { + *Config + db struct { + *maxminddb.Reader + sync.RWMutex + } +} + +// Lookups use RLock +func (cfg *MaxMind) lookup(ip net.IP) (*maxminddb.City, error) { + cfg.db.RLock() + defer cfg.db.RUnlock() + return cfg.db.Lookup(ip) +} +``` + +## Configuration + +### Environment Variables + +| Variable | Description | +| --------------------- | ------------------- | +| `MAXMIND_ACCOUNT_ID` | MaxMind account ID | +| `MAXMIND_LICENSE_KEY` | MaxMind license key | + +### YAML Configuration + +```yaml +providers: + maxmind: + database: geolite2 + account_id: 123456 + license_key: your-license-key +``` + +## Integration Points + +The maxmind package integrates with: + +- **ACL**: IP-based access control (country/timezone matching) +- **Config**: Configuration management +- **Logging**: Update notifications +- **City Cache**: IP geolocation caching + +## Error Handling + +```go +var ( + ErrResponseNotOK = gperr.New("response not OK") + ErrDownloadFailure = gperr.New("download failure") +) +``` + +## Performance Considerations + +- 24-hour update interval reduces unnecessary downloads +- Database size ~10-30MB +- City lookup cache reduces database queries +- RLock for concurrent reads diff --git a/internal/metrics/README.md b/internal/metrics/README.md index a6ab5aa6..60b3a740 100644 --- a/internal/metrics/README.md +++ b/internal/metrics/README.md @@ -1,285 +1,118 @@ # Metrics Package -System monitoring and metrics collection for GoDoxy. +System monitoring and metrics collection for GoDoxy with time-series storage and REST/WebSocket APIs. ## Overview -This package provides a unified metrics collection system that polls system and route data at regular intervals, stores historical data across multiple time periods, and exposes both REST and WebSocket APIs for consumption. +This package provides a unified metrics collection system that: + +- Polls system and route data at regular intervals +- Stores historical data across multiple time periods +- Exposes both REST and WebSocket APIs for consumption + +### Primary Consumers + +- `internal/api/v1/metrics/` - REST API endpoints +- WebUI - Real-time charts +- `internal/metrics/uptime/` - Route health monitoring + +### Non-goals + +- Metric aggregation from external sources +- Alerting (handled by `internal/notif/`) +- Long-term storage (30-day retention only) + +### Stability + +Internal package. See `internal/metrics/period/README.md` for the core framework documentation. + +## Packages + +### `period/` + +Generic time-bucketed metrics storage framework with: + +- `Period[T]` - Multi-timeframe container +- `Poller[T, A]` - Background data collector +- `Entries[T]` - Circular buffer for time-series data + +See [period/README.md](./period/README.md) for full documentation. + +### `uptime/` + +Route health status monitoring using the period framework. + +### `systeminfo/` + +System metrics collection (CPU, memory, disk, network, sensors) using the period framework. ## Architecture ```mermaid graph TB - subgraph "Core Framework" - P[Period Generic] - E[Entries Ring Buffer] - PL[Poller Orchestrator] - end - subgraph "Data Sources" SI[SystemInfo Poller] UP[Uptime Poller] end - subgraph "Utilities" - UT[Utils] + subgraph "Period Framework" + P[Period Generic] + E[Entries Ring Buffer] + PL[Poller Orchestrator] + H[Handler HTTP API] + end + + subgraph "Storage" + JSON[(data/metrics/*.json)] end P --> E PL --> P PL --> SI PL --> UP - UT -.-> PL - UT -.-> SI - UT -.-> UP + H --> PL + PL --> JSON ``` -## Directory Structure +## Configuration Surface -``` -internal/metrics/ -├── period/ # Core polling and storage framework -│ ├── period.go # Period[T] - multi-timeframe container -│ ├── entries.go # Entries[T] - ring buffer implementation -│ ├── poller.go # Poller[T, A] - orchestration and lifecycle -│ └── handler.go # HTTP handler for data access -├── systeminfo/ # System metrics (CPU, memory, disk, network, sensors) -├── uptime/ # Route health and uptime monitoring -└── utils/ # Shared utilities (query parsing, pagination) -``` +No explicit configuration. Pollers respect `common.MetricsDisable*` flags: -## Core Components +| Flag | Disables | +| ----------------------- | ------------------------- | +| `MetricsDisableCPU` | CPU percentage collection | +| `MetricsDisableMemory` | Memory statistics | +| `MetricsDisableDisk` | Disk usage and I/O | +| `MetricsDisableNetwork` | Network counters | +| `MetricsDisableSensors` | Temperature sensors | -### 1. Period[T] (`period/period.go`) +## Dependency and Integration Map -A generic container that manages multiple time periods for the same data type. +### Internal Dependencies -```go -type Period[T any] struct { - Entries map[Filter]*Entries[T] // 5m, 15m, 1h, 1d, 1mo - mu sync.RWMutex -} -``` +- `github.com/yusing/goutils/task` - Lifetime management +- `internal/types` - Health check types -**Time Periods:** +### External Dependencies -| Filter | Duration | Entries | Interval | -| ------ | -------- | ------- | -------- | -| `5m` | 5 min | 100 | 3s | -| `15m` | 15 min | 100 | 9s | -| `1h` | 1 hour | 100 | 36s | -| `1d` | 1 day | 100 | 14.4m | -| `1mo` | 30 days | 100 | 7.2h | +- `github.com/shirou/gopsutil/v4` - System metrics collection +- `github.com/puzpuzpuz/xsync/v4` - Atomic value storage +- `github.com/bytedance/sonic` - JSON serialization -### 2. Entries[T] (`period/entries.go`) +## Observability -A fixed-size ring buffer (100 entries) with time-aware sampling. +### Logs -```go -type Entries[T any] struct { - entries [100]T // Fixed-size array - index int // Current position - count int // Number of entries - interval time.Duration // Sampling interval - lastAdd time.Time // Last write timestamp -} -``` +| Level | When | +| ------- | ------------------------------------------- | +| `Debug` | Poller start, data load/save | +| `Error` | Data source failures (aggregated every 30s) | -**Features:** +## Failure Modes and Recovery -- Circular buffer for efficient memory usage -- Rate-limited adds (respects configured interval) -- JSON serialization/deserialization with temporal spacing - -### 3. Poller[T, A] (`period/poller.go`) - -The orchestrator that ties together polling, storage, and HTTP serving. - -```go -type Poller[T any, A any] struct { - name string - poll PollFunc[T] // Data collection - aggregate AggregateFunc[T, A] // Data aggregation - resultFilter FilterFunc[T] // Query filtering - period *Period[T] // Data storage - lastResult synk.Value[T] // Latest snapshot -} -``` - -**Poll Cycle (1 second interval):** - -```mermaid -sequenceDiagram - participant T as Task - participant P as Poller - participant D as Data Source - participant S as Storage (Period) - participant F as File - - T->>P: Start() - P->>F: Load historical data - F-->>P: Period[T] state - - loop Every 1 second - P->>D: Poll(ctx, lastResult) - D-->>P: New data point - P->>S: Add to all periods - P->>P: Update lastResult - - alt Every 30 seconds - P->>P: Gather & log errors - end - - alt Every 5 minutes - P->>F: Persist to JSON - end - end -``` - -### 4. HTTP Handler (`period/handler.go`) - -Provides REST and WebSocket endpoints for data access. - -**Endpoints:** - -- `GET /metrics?period=5m&aggregate=cpu_average` - Historical data -- `WS /metrics?period=5m&interval=5s` - Streaming updates - -**Query Parameters:** -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `period` | Filter | (none) | Time range (5m, 15m, 1h, 1d, 1mo) | -| `aggregate` | string | (varies) | Aggregation mode | -| `interval` | duration | 1s | WebSocket update interval | -| `limit` | int | 0 | Max results (0 = all) | -| `offset` | int | 0 | Pagination offset | -| `keyword` | string | "" | Fuzzy search filter | - -## Implementations - -### SystemInfo Poller - -Collects system metrics using `gopsutil`: - -```go -type SystemInfo struct { - Timestamp int64 - CPUAverage *float64 - Memory mem.VirtualMemoryStat - Disks map[string]disk.UsageStat - DisksIO map[string]*disk.IOCountersStat - Network net.IOCountersStat - Sensors Sensors -} -``` - -**Aggregation Modes:** - -- `cpu_average` - CPU usage percentage -- `memory_usage` - Memory used in bytes -- `memory_usage_percent` - Memory usage percentage -- `disks_read_speed` - Disk read speed (bytes/s) -- `disks_write_speed` - Disk write speed (bytes/s) -- `disks_iops` - Disk I/O operations per second -- `disk_usage` - Disk usage in bytes -- `network_speed` - Upload/download speed (bytes/s) -- `network_transfer` - Total bytes transferred -- `sensor_temperature` - Temperature sensor readings - -### Uptime Poller - -Monitors route health and calculates uptime statistics: - -```go -type RouteAggregate struct { - Alias string - DisplayName string - Uptime float32 // Percentage healthy - Downtime float32 // Percentage unhealthy - Idle float32 // Percentage napping/starting - AvgLatency float32 // Average latency in ms - CurrentStatus HealthStatus - Statuses []Status // Historical statuses -} -``` - -## Data Flow - -```mermaid -flowchart TD - A[Data Source] -->|PollFunc| B[Poller] - B -->|Add| C[Period.Entries] - C -->|Ring Buffer| D[(Memory)] - D -->|Every 5min| E[(data/metrics/*.json)] - - B -->|HTTP Request| F[ServeHTTP] - F -->|Filter| G[Get] - G -->|Aggregate| H[Response] - - F -->|WebSocket| I[PeriodicWrite] - I -->|interval| J[Push Updates] -``` - -## Persistence - -Data is persisted to `data/metrics/` as JSON files: - -```json -{ - "entries": { - "5m": { - "entries": [...], - "interval": "3s" - }, - "15m": {...}, - "1h": {...}, - "1d": {...}, - "1mo": {...} - } -} -``` - -**On Load:** - -- Validates and fixes interval mismatches -- Reconstructs temporal spacing for historical entries - -## Thread Safety - -- `Period[T]` uses `sync.RWMutex` for concurrent access -- `Entries[T]` is append-only (safe for single writer) -- `Poller` uses `synk.Value[T]` for atomic last result storage - -## Creating a New Poller - -```go -type MyData struct { - Value int -} - -type MyAggregate struct { - Values []int -} - -var MyPoller = period.NewPoller( - "my_poll_name", - func(ctx context.Context, last *MyData) (*MyData, error) { - // Fetch data - return &MyData{Value: 42}, nil - }, - func(entries []*MyData, query url.Values) (int, MyAggregate) { - // Aggregate for API response - return len(entries), MyAggregate{Values: [...]} - }, -) - -func init() { - MyPoller.Start() -} -``` - -## Error Handling - -- Poll errors are aggregated over 30-second windows -- Errors are logged with frequency counts -- Individual sensor warnings (e.g., ENODATA) are suppressed gracefully +| Failure Mode | Impact | Recovery | +| ------------------------- | -------------------- | -------------------------------- | +| Data source timeout | Missing data point | Logged, aggregated, continues | +| Disk read failure | No historical data | Starts fresh, warns | +| Disk write failure | Data loss on restart | Continues, retries next interval | +| Memory allocation failure | OOM risk | Go runtime handles | diff --git a/internal/metrics/period/README.md b/internal/metrics/period/README.md new file mode 100644 index 00000000..c2058808 --- /dev/null +++ b/internal/metrics/period/README.md @@ -0,0 +1,470 @@ +# Period Metrics + +Provides time-bucketed metrics storage with configurable periods, enabling historical data aggregation and real-time streaming. + +## Overview + +The period package implements a generic metrics collection system with time-bucketed storage. It collects data points at regular intervals and stores them in predefined time windows (5m, 15m, 1h, 1d, 1mo) with automatic persistence and HTTP/WebSocket APIs. + +### Primary Consumers + +- `internal/metrics/uptime` - Route health status storage +- `internal/metrics/systeminfo` - System metrics storage +- `internal/api/v1/metrics` - HTTP API endpoints + +### Non-goals + +- Does not provide data visualization +- Does not implement alerting or anomaly detection +- Does not support custom time periods (fixed set only) +- Does not provide data aggregation across multiple instances + +### Stability + +Internal package. Public interfaces are stable. + +## Public API + +### Exported Types + +#### Period[T] Struct + +```go +type Period[T any] struct { + Entries map[Filter]*Entries[T] + mu sync.RWMutex +} +``` + +Container for all time-bucketed entries. Maps each filter to its corresponding `Entries`. + +**Methods:** + +- `Add(info T)` - Adds a data point to all periods +- `Get(filter Filter) ([]T, bool)` - Gets entries for a specific period +- `Total() int` - Returns total number of entries across all periods +- `ValidateAndFixIntervals()` - Validates and fixes intervals after loading + +#### Entries[T] Struct + +```go +type Entries[T any] struct { + entries [maxEntries]T + index int + count int + interval time.Duration + lastAdd time.Time +} +``` + +Circular buffer holding up to 100 entries for a single time period. + +**Methods:** + +- `Add(now time.Time, info T)` - Adds an entry with interval checking +- `Get() []T` - Returns all entries in chronological order + +#### Filter Type + +```go +type Filter string +``` + +Time period filter. + +```go +const ( + MetricsPeriod5m Filter = "5m" + MetricsPeriod15m Filter = "15m" + MetricsPeriod1h Filter = "1h" + MetricsPeriod1d Filter = "1d" + MetricsPeriod1mo Filter = "1mo" +) +``` + +#### Poller[T, A] Struct + +```go +type Poller[T any, A any] struct { + name string + poll PollFunc[T] + aggregate AggregateFunc[T, A] + resultFilter FilterFunc[T] + period *Period[T] + lastResult synk.Value[T] + errs []pollErr +} +``` + +Generic poller that collects data at regular intervals. + +**Type Aliases:** + +```go +type PollFunc[T any] func(ctx context.Context, lastResult T) (T, error) +type AggregateFunc[T any, A any] func(entries []T, query url.Values) (total int, result A) +type FilterFunc[T any] func(entries []T, keyword string) (filtered []T) +``` + +#### ResponseType[AggregateT] + +```go +type ResponseType[AggregateT any] struct { + Total int `json:"total"` + Data AggregateT `json:"data"` +} +``` + +Standard response format for API endpoints. + +### Exported Functions + +#### Period Constructors + +```go +func NewPeriod[T any]() *Period[T] +``` + +Creates a new `Period[T]` with all time buckets initialized. + +#### Poller Constructors + +```go +func NewPoller[T any, A any]( + name string, + poll PollFunc[T], + aggregator AggregateFunc[T, A], +) *Poller[T, A] +``` + +Creates a new poller with the specified name, poll function, and aggregator. + +```go +func (p *Poller[T, A]) WithResultFilter(filter FilterFunc[T]) *Poller[T, A] +``` + +Adds a result filter to the poller for keyword-based filtering. + +#### Poller Methods + +```go +func (p *Poller[T, A]) Get(filter Filter) ([]T, bool) +``` + +Gets entries for a specific time period. + +```go +func (p *Poller[T, A]) GetLastResult() T +``` + +Gets the most recently collected data point. + +```go +func (p *Poller[T, A]) Start() +``` + +Starts the poller. Launches a background goroutine that: + +1. Polls for data at 1-second intervals +1. Stores data in all time buckets +1. Saves data to disk every 5 minutes +1. Reports errors every 30 seconds + +```go +func (p *Poller[T, A]) ServeHTTP(c *gin.Context) +``` + +HTTP handler for data retrieval. + +## Architecture + +### Core Components + +```mermaid +flowchart TD + subgraph Poller + Poll[PollFunc] -->|Collects| Data[Data Point T] + Data -->|Adds to| Period[Period T] + Period -->|Stores in| Buckets[Time Buckets] + end + + subgraph Time Buckets + Bucket5m[5m Bucket] -->|Holds| Entries5m[100 Entries] + Bucket15m[15m Bucket] -->|Holds| Entries15m[100 Entries] + Bucket1h[1h Bucket] -->|Holds| Entries1h[100 Entries] + Bucket1d[1d Bucket] -->|Holds| Entries1d[100 Entries] + Bucket1mo[1mo Bucket] -->|Holds| Entries1mo[100 Entries] + end + + subgraph API + Handler[ServeHTTP] -->|Queries| Period + Period -->|Returns| Aggregate[Aggregated Data] + WebSocket[WebSocket] -->|Streams| Periodic[Periodic Updates] + end + + subgraph Persistence + Save[save] -->|Writes| File[JSON File] + File -->|Loads| Load[load] + end +``` + +### Data Flow + +```mermaid +sequenceDiagram + participant Collector + participant Poller + participant Period + participant Entries as Time Bucket + participant Storage + + Poller->>Poller: Start background goroutine + + loop Every 1 second + Poller->>Collector: poll(ctx, lastResult) + Collector-->>Poller: data, error + Poller->>Period: Add(data) + Period->>Entries: Add(now, data) + Entries->>Entries: Circular buffer write + + Poller->>Poller: Check save interval (every 5min) + alt Save interval reached + Poller->>Storage: Save to JSON + end + + alt Error interval reached (30s) + Poller->>Poller: Gather and log errors + end + end +``` + +### Time Periods + +| Filter | Duration | Interval | Max Entries | +| ------ | ---------- | ------------ | ----------- | +| `5m` | 5 minutes | 3 seconds | 100 | +| `15m` | 15 minutes | 9 seconds | 100 | +| `1h` | 1 hour | 36 seconds | 100 | +| `1d` | 1 day | 14.4 minutes | 100 | +| `1mo` | 30 days | 7.2 hours | 100 | + +### Circular Buffer Behavior + +```mermaid +stateDiagram-v2 + [*] --> Empty: NewEntries() + Empty --> Filling: Add(entry 1) + Filling --> Filling: Add(entry 2..N) + Filling --> Full: count == maxEntries + Full --> Overwrite: Add(new entry) + Overwrite --> Overwrite: index = (index + 1) % max +``` + +When full, new entries overwrite oldest entries (FIFO). + +## Configuration Surface + +### Poller Configuration + +| Parameter | Type | Default | Description | +| -------------------- | ------------- | -------------- | -------------------------- | +| `PollInterval` | time.Duration | 1s | How often to poll for data | +| `saveInterval` | time.Duration | 5m | How often to save to disk | +| `gatherErrsInterval` | time.Duration | 30s | Error aggregation interval | +| `saveBaseDir` | string | `data/metrics` | Persistence directory | + +### HTTP Query Parameters + +| Parameter | Description | +| ------------------ | ----------------------------------- | +| `period` | Time filter (5m, 15m, 1h, 1d, 1mo) | +| `aggregate` | Aggregation mode (package-specific) | +| `interval` | WebSocket update interval | +| `limit` / `offset` | Pagination parameters | + +## Dependency and Integration Map + +### Internal Dependencies + +None. + +### External Dependencies + +| Dependency | Purpose | +| ------------------------------------------ | ------------------------ | +| `github.com/gin-gonic/gin` | HTTP handling | +| `github.com/yusing/goutils/http/websocket` | WebSocket streaming | +| `github.com/bytedance/sonic` | JSON serialization | +| `github.com/yusing/goutils/task` | Lifetime management | +| `github.com/puzpuzpuz/xsync/v4` | Concurrent value storage | + +### Integration Points + +- Poll function collects data from external sources +- Aggregate function transforms data for visualization +- Filter function enables keyword-based filtering +- HTTP handler provides REST/WebSocket endpoints + +## Observability + +### Logs + +| Level | When | +| ----- | ------------------------------------- | +| Debug | Poller start/stop, buffer adjustments | +| Error | Load/save failures | +| Info | Data loaded from disk | + +### Metrics + +None exposed directly. Poll errors are accumulated and logged periodically. + +## Security Considerations + +- HTTP endpoint should be protected via authentication +- Data files contain potentially sensitive metrics +- No input validation beyond basic query parsing +- WebSocket connections have configurable intervals + +## Failure Modes and Recovery + +| Failure | Detection | Recovery | +| -------------------- | ---------------------- | ----------------------------------- | +| Poll function error | `poll()` returns error | Error accumulated, logged every 30s | +| JSON load failure | `os.ReadFile` error | Continue with empty period | +| JSON save failure | `Encode` error | Error accumulated, logged | +| Context cancellation | `<-ctx.Done()` | Goroutine exits, final save | +| Disk full | Write error | Error logged, continue | + +### Persistence Behavior + +1. On startup, attempts to load existing data from `data/metrics/{name}.json` +1. If file doesn't exist, starts with empty data +1. On load, validates and fixes intervals +1. Saves every 5 minutes during operation +1. Final save on goroutine exit + +## Usage Examples + +### Defining a Custom Poller + +```go +import "github.com/yusing/godoxy/internal/metrics/period" + +type CustomMetric struct { + Timestamp int64 `json:"timestamp"` + Value float64 `json:"value"` + Name string `json:"name"` +} + +func pollCustomMetric(ctx context.Context, last CustomMetric) (CustomMetric, error) { + return CustomMetric{ + Timestamp: time.Now().Unix(), + Value: readSensorValue(), + Name: "sensor_1", + }, nil +} + +func aggregateCustomMetric(entries []CustomMetric, query url.Values) (int, Aggregated) { + // Aggregate logic here + return len(aggregated), aggregated +} + +var CustomPoller = period.NewPoller("custom", pollCustomMetric, aggregateCustomMetric) +``` + +### Starting the Poller + +```go +// In your main initialization +CustomPoller.Start() +``` + +### Accessing Data + +```go +// Get all entries from the last hour +entries, ok := CustomPoller.Get(period.MetricsPeriod1h) +if ok { + for _, entry := range entries { + fmt.Printf("Value: %.2f at %d\n", entry.Value, entry.Timestamp) + } +} + +// Get the most recent value +latest := CustomPoller.GetLastResult() +``` + +### HTTP Integration + +```go +import "github.com/gin-gonic/gin" + +func setupMetricsAPI(r *gin.Engine) { + r.GET("/api/metrics/custom", CustomPoller.ServeHTTP) +} +``` + +**API Examples:** + +```bash +# Get last collected data +GET /api/metrics/custom + +# Get 1-hour history +GET /api/metrics/custom?period=1h + +# Get 1-day history with aggregation +GET /api/metrics/custom?period=1d&aggregate=cpu_average +``` + +### WebSocket Integration + +```go +// WebSocket connections automatically receive updates +// at the specified interval +ws, _, _ := websocket.DefaultDialer.Dial("ws://localhost/api/metrics/custom?interval=5s", nil) + +for { + _, msg, _ := ws.ReadMessage() + // Process the update +} +``` + +### Data Persistence Format + +```json +{ + "entries": { + "5m": { + "entries": [...], + "interval": 3000000000 + }, + "15m": {...}, + "1h": {...}, + "1d": {...}, + "1mo": {...} + } +} +``` + +## Performance Characteristics + +- O(1) add to circular buffer +- O(1) get (returns slice view) +- O(n) serialization where n = total entries +- Memory: O(5 * 100 * sizeof(T)) = fixed overhead +- JSON load/save: O(n) where n = total entries + +## Testing Notes + +- Test circular buffer overflow behavior +- Test interval validation after load +- Test aggregation with various query parameters +- Test concurrent access to period +- Test error accumulation and reporting + +## Related Packages + +- `internal/metrics/uptime` - Uses period for health status +- `internal/metrics/systeminfo` - Uses period for system metrics diff --git a/internal/metrics/systeminfo/README.md b/internal/metrics/systeminfo/README.md new file mode 100644 index 00000000..7573d668 --- /dev/null +++ b/internal/metrics/systeminfo/README.md @@ -0,0 +1,439 @@ +# System Info + +Collects and aggregates system metrics including CPU, memory, disk, network, and sensor data with configurable aggregation modes. + +## Overview + +The systeminfo package a custom fork of the [gopsutil](https://github.com/shirou/gopsutil) library to collect system metrics and integrates with the `period` package for time-bucketed storage. It supports collecting CPU, memory, disk, network, and sensor data with configurable collection intervals and aggregation modes for visualization. + +### Primary Consumers + +- `internal/api/v1/metrics` - HTTP endpoint for system metrics +- `internal/homepage` - Dashboard system monitoring widgets +- Monitoring and alerting systems + +### Non-goals + +- Does not provide alerting on metric thresholds +- Does not persist metrics beyond the period package retention +- Does not provide data aggregation across multiple instances +- Does not support custom metric collectors + +### Stability + +Internal package. Data format and API are stable. + +## Public API + +### Exported Types + +#### SystemInfo Struct + +```go +type SystemInfo struct { + Timestamp int64 `json:"timestamp"` + CPUAverage *float64 `json:"cpu_average"` + Memory mem.VirtualMemoryStat `json:"memory"` + Disks map[string]disk.UsageStat `json:"disks"` + DisksIO map[string]*disk.IOCountersStat `json:"disks_io"` + Network net.IOCountersStat `json:"network"` + Sensors Sensors `json:"sensors"` +} +``` + +Container for all system metrics at a point in time. + +**Fields:** + +- `Timestamp` - Unix timestamp of collection +- `CPUAverage` - Average CPU usage percentage (0-100) +- `Memory` - Virtual memory statistics (used, total, percent, etc.) +- `Disks` - Disk usage by partition mountpoint +- `DisksIO` - Disk I/O counters by device name +- `Network` - Network I/O counters for primary interface +- `Sensors` - Hardware temperature sensor readings + +#### Sensors Type + +```go +type Sensors []sensors.TemperatureStat +``` + +Slice of temperature sensor readings. + +#### Aggregated Type + +```go +type Aggregated []map[string]any +``` + +Aggregated data suitable for charting libraries like Recharts. Each entry is a map with timestamp and values. + +#### SystemInfoAggregateMode Type + +```go +type SystemInfoAggregateMode string +``` + +Aggregation mode constants: + +```go +const ( + SystemInfoAggregateModeCPUAverage SystemInfoAggregateMode = "cpu_average" + SystemInfoAggregateModeMemoryUsage SystemInfoAggregateMode = "memory_usage" + SystemInfoAggregateModeMemoryUsagePercent SystemInfoAggregateMode = "memory_usage_percent" + SystemInfoAggregateModeDisksReadSpeed SystemInfoAggregateMode = "disks_read_speed" + SystemInfoAggregateModeDisksWriteSpeed SystemInfoAggregateMode = "disks_write_speed" + SystemInfoAggregateModeDisksIOPS SystemInfoAggregateMode = "disks_iops" + SystemInfoAggregateModeDiskUsage SystemInfoAggregateMode = "disk_usage" + SystemInfoAggregateModeNetworkSpeed SystemInfoAggregateMode = "network_speed" + SystemInfoAggregateModeNetworkTransfer SystemInfoAggregateMode = "network_transfer" + SystemInfoAggregateModeSensorTemperature SystemInfoAggregateMode = "sensor_temperature" +) +``` + +### Exported Variables + +#### Poller + +```go +var Poller = period.NewPoller("system_info", getSystemInfo, aggregate) +``` + +Pre-configured poller for system info metrics. Start with `Poller.Start()`. + +### Exported Functions + +#### getSystemInfo + +```go +func getSystemInfo(ctx context.Context, lastResult *SystemInfo) (*SystemInfo, error) +``` + +Collects current system metrics. This is the poll function passed to the period poller. + +**Features:** + +- Concurrent collection of all metric categories +- Handles partial failures gracefully +- Calculates rates based on previous result (for speed metrics) +- Logs warnings for non-critical errors + +**Rate Calculations:** + +- Disk read/write speed: `(currentBytes - lastBytes) / interval` +- Disk IOPS: `(currentCount - lastCount) / interval` +- Network speed: `(currentBytes - lastBytes) / interval` + +#### aggregate + +```go +func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggregated) +``` + +Aggregates system info entries for a specific mode. Called by the period poller. + +**Query Parameters:** + +- `aggregate` - The aggregation mode (see constants above) + +**Returns:** + +- `total` - Number of aggregated entries +- `result` - Slice of maps suitable for charting + +## Architecture + +### Core Components + +```mermaid +flowchart TD + subgraph Collection + G[gopsutil] -->|CPU| CPU[CPU Percent] + G -->|Memory| Mem[Virtual Memory] + G -->|Disks| Disk[Partitions & IO] + G -->|Network| Net[Network Counters] + G -->|Sensors| Sens[Temperature] + end + + subgraph Poller + Collect[getSystemInfo] -->|Aggregates| Info[SystemInfo] + Info -->|Stores in| Period[Period SystemInfo] + end + + subgraph Aggregation Modes + CPUAvg[cpu_average] + MemUsage[memory_usage] + MemPercent[memory_usage_percent] + DiskRead[disks_read_speed] + DiskWrite[disks_write_speed] + DiskIOPS[disks_iops] + DiskUsage[disk_usage] + NetSpeed[network_speed] + NetTransfer[network_transfer] + SensorTemp[sensor_temperature] + end + + Period -->|Query with| Aggregate[aggregate function] + Aggregate --> CPUAvg + Aggregate --> MemUsage + Aggregate --> DiskRead +``` + +### Data Flow + +```mermaid +sequenceDiagram + participant gopsutil + participant Poller + participant Period + participant API + + Poller->>Poller: Start background goroutine + + loop Every 1 second + Poller->>gopsutil: Collect CPU (500ms timeout) + Poller->>gopsutil: Collect Memory + Poller->>gopsutil: Collect Disks (partition + IO) + Poller->>gopsutil: Collect Network + Poller->>gopsutil: Collect Sensors + + gopsutil-->>Poller: SystemInfo + Poller->>Period: Add(SystemInfo) + end + + API->>Period: Get(filter) + Period-->>API: Entries + API->>API: aggregate(entries, mode) + API-->>Client: Chart data +``` + +### Collection Categories + +| Category | Data Source | Optional | Rate Metrics | +| -------- | ------------------------------------------------------ | -------- | --------------------- | +| CPU | `cpu.PercentWithContext` | Yes | No | +| Memory | `mem.VirtualMemoryWithContext` | Yes | No | +| Disks | `disk.PartitionsWithContext` + `disk.UsageWithContext` | Yes | Yes (read/write/IOPS) | +| Network | `net.IOCountersWithContext` | Yes | Yes (upload/download) | +| Sensors | `sensors.TemperaturesWithContext` | Yes | No | + +### Aggregation Modes + +Each mode produces chart-friendly output: + +**CPU Average:** + +```json +[ + { "timestamp": 1704892800, "cpu_average": 45.5 }, + { "timestamp": 1704892810, "cpu_average": 52.3 } +] +``` + +**Memory Usage:** + +```json +[ + { "timestamp": 1704892800, "memory_usage": 8388608000 }, + { "timestamp": 1704892810, "memory_usage": 8453440000 } +] +``` + +**Disk Read/Write Speed:** + +```json +[ + { "timestamp": 1704892800, "sda": 10485760, "sdb": 5242880 }, + { "timestamp": 1704892810, "sda": 15728640, "sdb": 4194304 } +] +``` + +## Configuration Surface + +### Disabling Metrics Categories + +Metrics categories can be disabled via environment variables: + +| Variable | Purpose | +| ------------------------- | ------------------------------------------- | +| `METRICS_DISABLE_CPU` | Set to "true" to disable CPU collection | +| `METRICS_DISABLE_MEMORY` | Set to "true" to disable memory collection | +| `METRICS_DISABLE_DISK` | Set to "true" to disable disk collection | +| `METRICS_DISABLE_NETWORK` | Set to "true" to disable network collection | +| `METRICS_DISABLE_SENSORS` | Set to "true" to disable sensor collection | + +## Dependency and Integration Map + +### Internal Dependencies + +| Package | Purpose | +| -------------------------------- | --------------------- | +| `internal/metrics/period` | Time-bucketed storage | +| `internal/common` | Configuration flags | +| `github.com/yusing/goutils/errs` | Error handling | + +### External Dependencies + +| Dependency | Purpose | +| ------------------------------- | ------------------------- | +| `github.com/shirou/gopsutil/v4` | System metrics collection | +| `github.com/rs/zerolog` | Logging | + +### Integration Points + +- gopsutil provides raw system metrics +- period package handles storage and persistence +- HTTP API provides query interface + +## Observability + +### Logs + +| Level | When | +| ----- | ------------------------------------------ | +| Warn | Non-critical errors (e.g., no sensor data) | +| Error | Other errors | + +### Metrics + +No metrics exposed directly. Collection errors are logged. + +## Failure Modes and Recovery + +| Failure | Detection | Recovery | +| --------------- | ------------------------------------ | -------------------------------- | +| No CPU data | `cpu.Percent` returns error | Skip and log later with warning | +| No memory data | `mem.VirtualMemory` returns error | Skip and log later with warning | +| No disk data | `disk.Usage` returns error for all | Skip and log later with warning | +| No network data | `net.IOCounters` returns error | Skip and log later with warning | +| No sensor data | `sensors.Temperatures` returns error | Skip and log later with warning | +| Context timeout | Context deadline exceeded | Return partial data with warning | + +### Partial Collection + +The package uses `gperr.NewGroup` to collect errors from concurrent operations: + +```go +errs := gperr.NewGroup("failed to get system info") +errs.Go(func() error { return s.collectCPUInfo(ctx) }) +errs.Go(func() error { return s.collectMemoryInfo(ctx) }) +// ... +result := errs.Wait() +``` + +Warnings (like `ENODATA`) are logged but don't fail the collection. +Critical errors cause the function to return an error. + +## Usage Examples + +### Starting the Poller + +```go +import "github.com/yusing/godoxy/internal/metrics/systeminfo" + +func init() { + systeminfo.Poller.Start() +} +``` + +### HTTP Endpoint + +```go +import "github.com/gin-gonic/gin" + +func setupMetricsAPI(r *gin.Engine) { + r.GET("/api/metrics/system", systeminfo.Poller.ServeHTTP) +} +``` + +**API Examples:** + +```bash +# Get latest metrics +curl http://localhost:8080/api/metrics/system + +# Get 1-hour history with CPU aggregation +curl "http://localhost:8080/api/metrics/system?period=1h&aggregate=cpu_average" + +# Get 24-hour memory usage history +curl "http://localhost:8080/api/metrics/system?period=1d&aggregate=memory_usage_percent" + +# Get disk I/O for the last hour +curl "http://localhost:8080/api/metrics/system?period=1h&aggregate=disks_read_speed" +``` + +### WebSocket Streaming + +```javascript +const ws = new WebSocket( + "ws://localhost:8080/api/metrics/system?period=1m&interval=5s&aggregate=cpu_average" +); + +ws.onmessage = (event) => { + const data = JSON.parse(event.data); + console.log("CPU:", data.data); +}; +``` + +### Direct Data Access + +```go +// Get entries for the last hour +entries, ok := systeminfo.Poller.Get(period.MetricsPeriod1h) +for _, entry := range entries { + if entry.CPUAverage != nil { + fmt.Printf("CPU: %.1f%% at %d\n", *entry.CPUAverage, entry.Timestamp) + } +} + +// Get the most recent metrics +latest := systeminfo.Poller.GetLastResult() +``` + +### Disabling Metrics at Runtime + +```go +import ( + "github.com/yusing/godoxy/internal/common" + "github.com/yusing/godoxy/internal/metrics/systeminfo" +) + +func init() { + // Disable expensive sensor collection + common.MetricsDisableSensors = true + systeminfo.Poller.Start() +} +``` + +## Performance Characteristics + +- O(1) per metric collection (gopsutil handles complexity) +- Concurrent collection of all categories +- Rate calculations O(n) where n = number of disks/interfaces +- Memory: O(5 _ 100 _ sizeof(SystemInfo)) +- JSON serialization O(n) for API responses + +### Collection Latency + +| Category | Typical Latency | +| -------- | -------------------------------------- | +| CPU | ~10-50ms | +| Memory | ~5-10ms | +| Disks | ~10-100ms (depends on partition count) | +| Network | ~5-10ms | +| Sensors | ~10-50ms | + +## Testing Notes + +- Mock gopsutil calls for unit tests +- Test with real metrics to verify rate calculations +- Test aggregation modes with various data sets +- Verify disable flags work correctly +- Test partial failure scenarios + +## Related Packages + +- `internal/metrics/period` - Time-bucketed storage +- `internal/api/v1/metrics` - HTTP API endpoints +- `github.com/shirou/gopsutil/v4` - System metrics library diff --git a/internal/metrics/uptime/README.md b/internal/metrics/uptime/README.md new file mode 100644 index 00000000..4c5b985c --- /dev/null +++ b/internal/metrics/uptime/README.md @@ -0,0 +1,402 @@ +# Uptime + +Tracks and aggregates route health status over time, providing uptime/downtime statistics and latency metrics. + +## Overview + +The uptime package monitors route health status and calculates uptime percentages over configurable time periods. It integrates with the `period` package for historical storage and provides aggregated statistics for visualization. + +### Primary Consumers + +- `internal/api/v1/metrics` - HTTP endpoint for uptime data +- `internal/homepage` - Dashboard uptime widgets +- Monitoring and alerting systems + +### Non-goals + +- Does not perform health checks (handled by `internal/route/routes`) +- Does not provide alerting on downtime +- Does not persist data beyond the period package retention +- Does not aggregate across multiple GoDoxy instances + +### Stability + +Internal package. Data format and API are stable. + +## Public API + +### Exported Types + +#### StatusByAlias + +```go +type StatusByAlias struct { + Map map[string]routes.HealthInfoWithoutDetail `json:"statuses"` + Timestamp int64 `json:"timestamp"` +} +``` + +Container for health status of all routes at a specific time. + +#### Status + +```go +type Status struct { + Status types.HealthStatus `json:"status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"` + Latency int32 `json:"latency"` + Timestamp int64 `json:"timestamp"` +} +``` + +Individual route status at a point in time. + +#### RouteAggregate + +```go +type RouteAggregate struct { + Alias string `json:"alias"` + DisplayName string `json:"display_name"` + Uptime float32 `json:"uptime"` + Downtime float32 `json:"downtime"` + Idle float32 `json:"idle"` + AvgLatency float32 `json:"avg_latency"` + IsDocker bool `json:"is_docker"` + IsExcluded bool `json:"is_excluded"` + CurrentStatus types.HealthStatus `json:"current_status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"` + Statuses []Status `json:"statuses"` +} +``` + +Aggregated statistics for a single route. + +#### Aggregated + +```go +type Aggregated []RouteAggregate +``` + +Slice of route aggregates, sorted alphabetically by alias. + +### Exported Variables + +#### Poller + +```go +var Poller = period.NewPoller("uptime", getStatuses, aggregateStatuses) +``` + +Pre-configured poller for uptime metrics. Start with `Poller.Start()`. + +### Unexported Functions + +#### getStatuses + +```go +func getStatuses(ctx context.Context, _ StatusByAlias) (StatusByAlias, error) +``` + +Collects current status of all routes. Called by the period poller every second. + +**Returns:** + +- `StatusByAlias` - Map of all route statuses with current timestamp +- `error` - Always nil (errors are logged internally) + +#### aggregateStatuses + +```go +func aggregateStatuses(entries []StatusByAlias, query url.Values) (int, Aggregated) +``` + +Aggregates status entries into route statistics. + +**Query Parameters:** + +- `period` - Time filter (5m, 15m, 1h, 1d, 1mo) +- `limit` - Maximum number of routes to return (0 = all) +- `offset` - Offset for pagination +- `keyword` - Fuzzy search keyword for filtering routes + +**Returns:** + +- `int` - Total number of routes matching the query +- `Aggregated` - Slice of route aggregates + +## Architecture + +### Core Components + +```mermaid +flowchart TD + subgraph Health Monitoring + Routes[Routes] -->|GetHealthInfoWithoutDetail| Status[Status Map] + Status -->|Polls every| Second[1 Second] + end + + subgraph Poller + Poll[getStatuses] -->|Collects| StatusByAlias + StatusByAlias -->|Stores in| Period[Period StatusByAlias] + end + + subgraph Aggregation + Query[Query Params] -->|Filters| Aggregate[aggregateStatuses] + Aggregate -->|Calculates| RouteAggregate + RouteAggregate -->|Uptime| UP[Uptime %] + RouteAggregate -->|Downtime| DOWN[Downtime %] + RouteAggregate -->|Idle| IDLE[Idle %] + RouteAggregate -->|Latency| LAT[Avg Latency] + end + + subgraph Response + RouteAggregate -->|JSON| Client[API Client] + end +``` + +### Data Flow + +```mermaid +sequenceDiagram + participant Routes as Route Registry + participant Poller as Uptime Poller + participant Period as Period Storage + participant API as HTTP API + + Routes->>Poller: GetHealthInfoWithoutDetail() + Poller->>Period: Add(StatusByAlias) + + loop Every second + Poller->>Routes: Collect status + Poller->>Period: Store status + end + + API->>Period: Get(filter) + Period-->>API: Entries + API->>API: aggregateStatuses() + API-->>Client: Aggregated JSON +``` + +### Status Types + +| Status | Description | Counted as Uptime? | +| ----------- | ------------------------------ | ------------------ | +| `healthy` | Route is responding normally | Yes | +| `unhealthy` | Route is not responding | No | +| `unknown` | Status could not be determined | Excluded | +| `napping` | Route is in idle/sleep state | Idle (separate) | +| `starting` | Route is starting up | Idle (separate) | + +### Calculation Formula + +For a set of status entries: + +``` +Uptime = healthy_count / total_count +Downtime = unhealthy_count / total_count +Idle = (napping_count + starting_count) / total_count +AvgLatency = sum(latency) / count +``` + +Note: `unknown` statuses are excluded from all calculations. + +## Configuration Surface + +No explicit configuration. The poller uses period package defaults: + +| Parameter | Value | +| ------------- | ---------------------------- | +| Poll Interval | 1 second | +| Retention | 5m, 15m, 1h, 1d, 1mo periods | + +## Dependency and Integration Map + +### Internal Dependencies + +| Package | Purpose | +| ------------------------- | --------------------- | +| `internal/route/routes` | Health info retrieval | +| `internal/metrics/period` | Time-bucketed storage | +| `internal/types` | HealthStatus enum | +| `internal/metrics/utils` | Query utilities | + +### External Dependencies + +| Dependency | Purpose | +| ---------------------------------------- | ---------------- | +| `github.com/lithammer/fuzzysearch/fuzzy` | Keyword matching | +| `github.com/bytedance/sonic` | JSON marshaling | + +### Integration Points + +- Route health monitors provide status via `routes.GetHealthInfoWithoutDetail()` +- Period poller handles data collection and storage +- HTTP API provides query interface via `Poller.ServeHTTP` + +## Observability + +### Logs + +Poller lifecycle and errors are logged via zerolog. + +### Metrics + +No metrics exposed directly. Status data available via API. + +## Failure Modes and Recovery + +| Failure | Detection | Recovery | +| -------------------------------- | --------------------------------- | ------------------------------ | +| Route health monitor unavailable | Empty map returned | Log warning, continue | +| Invalid query parameters | `aggregateStatuses` returns empty | Return empty result | +| Poller panic | Goroutine crash | Process terminates | +| Persistence failure | Load/save error | Log, continue with empty state | + +### Fuzzy Search + +The package uses `fuzzy.MatchFold` for keyword matching: + +- Case-insensitive matching +- Substring matching +- Fuzzy ranking + +## Usage Examples + +### Starting the Poller + +```go +import "github.com/yusing/godoxy/internal/metrics/uptime" + +func init() { + uptime.Poller.Start() +} +``` + +### HTTP Endpoint + +```go +import ( + "github.com/gin-gonic/gin" + "github.com/yusing/godoxy/internal/metrics/uptime" +) + +func setupUptimeAPI(r *gin.Engine) { + r.GET("/api/uptime", uptime.Poller.ServeHTTP) +} +``` + +**API Examples:** + +```bash +# Get latest status +curl http://localhost:8080/api/uptime + +# Get 1-hour history +curl "http://localhost:8080/api/uptime?period=1h" + +# Get with limit and offset (pagination) +curl "http://localhost:8080/api/uptime?limit=10&offset=0" + +# Search for routes containing "api" +curl "http://localhost:8080/api/uptime?keyword=api" + +# Combined query +curl "http://localhost:8080/api/uptime?period=1d&limit=20&offset=0&keyword=docker" +``` + +### WebSocket Streaming + +```javascript +const ws = new WebSocket( + "ws://localhost:8080/api/uptime?period=1m&interval=5s" +); + +ws.onmessage = (event) => { + const data = JSON.parse(event.data); + data.data.forEach((route) => { + console.log(`${route.display_name}: ${route.uptime * 100}% uptime`); + }); +}; +``` + +### Direct Data Access + +```go +// Get entries for the last hour +entries, ok := uptime.Poller.Get(period.MetricsPeriod1h) +for _, entry := range entries { + for alias, status := range entry.Map { + fmt.Printf("Route %s: %s (latency: %dms)\n", + alias, status.Status, status.Latency.Milliseconds()) + } +} + +// Get aggregated statistics +_, agg := uptime.aggregateStatuses(entries, url.Values{ + "period": []string{"1h"}, +}) + +for _, route := range agg { + fmt.Printf("%s: %.1f%% uptime, %.1fms avg latency\n", + route.DisplayName, route.Uptime*100, route.AvgLatency) +} +``` + +### Response Format + +**Latest Status Response:** + +```json +{ + "alias1": { + "status": "healthy", + "latency": 45 + }, + "alias2": { + "status": "unhealthy", + "latency": 0 + } +} +``` + +**Aggregated Response:** + +```json +{ + "total": 5, + "data": [ + { + "alias": "api-server", + "display_name": "API Server", + "uptime": 0.98, + "downtime": 0.02, + "idle": 0.0, + "avg_latency": 45.5, + "is_docker": true, + "is_excluded": false, + "current_status": "healthy", + "statuses": [ + { "status": "healthy", "latency": 45, "timestamp": 1704892800 } + ] + } + ] +} +``` + +## Performance Characteristics + +- O(n) status collection per poll where n = number of routes +- O(m \* k) aggregation where m = entries, k = routes +- Memory: O(p _ r _ s) where p = periods, r = routes, s = status size +- Fuzzy search is O(routes \* keyword_length) + +## Testing Notes + +- Mock `routes.GetHealthInfoWithoutDetail()` for testing +- Test aggregation with known status sequences +- Verify pagination and filtering logic +- Test fuzzy search matching + +## Related Packages + +- `internal/route/routes` - Route health monitoring +- `internal/metrics/period` - Time-bucketed metrics storage +- `internal/types` - Health status types diff --git a/internal/net/README.md b/internal/net/README.md new file mode 100644 index 00000000..f2fa745b --- /dev/null +++ b/internal/net/README.md @@ -0,0 +1,144 @@ +# Network Utilities + +The net package provides network utility functions for GoDoxy, including TCP connection testing and network-related helpers. + +## Overview + +The net package implements network utility functions that are used throughout GoDoxy for connectivity testing, TCP operations, and network-related utilities. + +### Key Features + +- TCP connection testing (ping) +- Connection utilities + +## Core Functions + +### TCP Ping + +```go +// PingTCP pings a TCP endpoint by attempting a connection. +func PingTCP(ctx context.Context, ip net.IP, port int) error +``` + +## Usage + +### Basic Usage + +```go +import "github.com/yusing/godoxy/internal/net" + +func checkService(ctx context.Context, ip string, port int) error { + addr := net.ParseIP(ip) + if addr == nil { + return fmt.Errorf("invalid IP: %s", ip) + } + + err := net.PingTCP(ctx, addr, port) + if err != nil { + return fmt.Errorf("service %s:%d unreachable: %w", ip, port, err) + } + + fmt.Printf("Service %s:%d is reachable\n", ip, port) + return nil +} +``` + +### Timeout Usage + +```go +ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) +defer cancel() + +ip := net.ParseIP("192.168.1.100") +err := net.PingTCP(ctx, ip, 8080) + +if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + log.Println("Connection timed out") + } else { + log.Printf("Connection failed: %v", err) + } +} +``` + +## Implementation + +```go +func PingTCP(ctx context.Context, ip net.IP, port int) error { + var dialer net.Dialer + conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", ip, port)) + if err != nil { + return err + } + conn.Close() + return nil +} +``` + +## Data Flow + +```mermaid +sequenceDiagram + participant Caller + participant Dialer + participant TCPEndpoint + participant Connection + + Caller->>Dialer: DialContext("tcp", "ip:port") + Dialer->>TCPEndpoint: SYN + TCPEndpoint-->>Dialer: SYN-ACK + Dialer->>Connection: Create connection + Connection-->>Dialer: Connection + Dialer-->>Caller: nil error + + Note over Caller,Connection: Connection immediately closed + Connection->>TCPEndpoint: FIN + TCPEndpoint-->>Connection: FIN-ACK +``` + +## Use Cases + +### Service Health Check + +```go +func checkServices(ctx context.Context, services []Service) error { + for _, svc := range services { + ip := net.ParseIP(svc.IP) + if ip == nil { + return fmt.Errorf("invalid IP for %s: %s", svc.Name, svc.IP) + } + + if err := net.PingTCP(ctx, ip, svc.Port); err != nil { + return fmt.Errorf("service %s (%s:%d) unreachable: %w", + svc.Name, svc.IP, svc.Port, err) + } + } + return nil +} +``` + +### Proxmox Container Reachability + +```go +// Check if a Proxmox container is reachable on its proxy port +func checkContainerReachability(ctx context.Context, node *proxmox.Node, vmid int, port int) error { + ips, err := node.LXCGetIPs(ctx, vmid) + if err != nil { + return err + } + + for _, ip := range ips { + if err := net.PingTCP(ctx, ip, port); err == nil { + return nil // Found reachable IP + } + } + + return fmt.Errorf("no reachable IP found for container %d", vmid) +} +``` + +## Related Packages + +- **Route**: Uses TCP ping for load balancing health checks +- **Proxmox**: Uses TCP ping to verify container reachability +- **Idlewatcher**: Uses TCP ping to check idle status diff --git a/internal/net/gphttp/README.md b/internal/net/gphttp/README.md new file mode 100644 index 00000000..0115d176 --- /dev/null +++ b/internal/net/gphttp/README.md @@ -0,0 +1,146 @@ +# gphttp + +HTTP utilities package providing transport configuration, default HTTP client, and a wrapper around `http.ServeMux` with panic recovery. + +## Overview + +This package provides shared HTTP utilities used throughout GoDoxy: + +- **Default HTTP Client**: Pre-configured `http.Client` with secure settings +- **Transport Factory**: Functions to create optimized `http.Transport` configurations +- **ServeMux Wrapper**: Extended `http.ServeMux` with panic recovery for handler registration + +## Architecture + +```mermaid +graph TD + A[HTTP Request] --> B[gphttp.Client] + B --> C[Transport] + C --> D[Network Connection] + + E[Server Setup] --> F[gphttp.ServeMux] + F --> G[http.ServeMux] + G --> H[HTTP Handlers] +``` + +## Core Components + +### HTTP Client + +The package exports a pre-configured `http.Client` with secure defaults: + +```go +var ( + httpClient = &http.Client{ + Timeout: 5 * time.Second, + Transport: &http.Transport{ + DisableKeepAlives: true, + ForceAttemptHTTP2: false, + DialContext: (&net.Dialer{ + Timeout: 3 * time.Second, + KeepAlive: 60 * time.Second, + }).DialContext, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + Get = httpClient.Get + Post = httpClient.Post + Head = httpClient.Head + Do = httpClient.Do +) +``` + +### Transport Factory + +Functions for creating optimized HTTP transports: + +```go +// NewTransport creates an http.Transport with proxy support and optimized settings +func NewTransport() *http.Transport + +// NewTransportWithTLSConfig creates an http.Transport with custom TLS configuration +func NewTransportWithTLSConfig(tlsConfig *tls.Config) *http.Transport +``` + +Default transport settings: + +- `MaxIdleConnsPerHost`: 1000 +- `IdleConnTimeout`: 90 seconds +- `TLSHandshakeTimeout`: 10 seconds +- `ResponseHeaderTimeout`: 60 seconds +- `WriteBufferSize` / `ReadBufferSize`: 16KB + +### ServeMux Wrapper + +Extended `http.ServeMux` with panic recovery: + +```go +type ServeMux struct { + *http.ServeMux +} + +func NewServeMux() ServeMux +func (mux ServeMux) Handle(pattern string, handler http.Handler) (err error) +func (mux ServeMux) HandleFunc(pattern string, handler http.HandlerFunc) (err error) +``` + +The `Handle` and `HandleFunc` methods recover from panics and return them as errors, preventing one bad handler from crashing the entire server. + +## Usage Examples + +### Using the Default Client + +```go +import "github.com/yusing/godoxy/internal/net/gphttp" + +// Simple GET request +resp, err := gphttp.Get("https://example.com") +if err != nil { + log.Fatal(err) +} +defer resp.Body.Close() + +// POST request +resp, err := gphttp.Post("https://example.com", "application/json", body) +``` + +### Creating Custom Transports + +```go +import ( + "crypto/tls" + "net/http" + "github.com/yusing/godoxy/internal/net/gphttp" +) + +// Default transport with environment proxy +transport := gphttp.NewTransport() + +// Custom TLS configuration +tlsConfig := &tls.Config{ + ServerName: "example.com", +} +transport := gphttp.NewTransportWithTLSConfig(tlsConfig) +``` + +### Using ServeMux with Panic Recovery + +```go +mux := gphttp.NewServeMux() + +// Register handlers - panics are converted to errors +if err := mux.HandleFunc("/api", apiHandler); err != nil { + log.Printf("handler registration failed: %v", err) +} +``` + +## Integration Points + +- Used by `internal/net/gphttp/middleware` for HTTP request/response processing +- Used by `internal/net/gphttp/loadbalancer` for backend connections +- Used throughout the route handling system + +## Configuration + +The default client disables HTTP/2 (`ForceAttemptHTTP2: false`) and keep-alives (`DisableKeepAlives: true`) for security and compatibility reasons. The transport uses environment proxy settings via `http.ProxyFromEnvironment`. diff --git a/internal/net/gphttp/loadbalancer/README.md b/internal/net/gphttp/loadbalancer/README.md new file mode 100644 index 00000000..97432657 --- /dev/null +++ b/internal/net/gphttp/loadbalancer/README.md @@ -0,0 +1,304 @@ +# Load Balancer + +Load balancing package providing multiple distribution algorithms, sticky sessions, and server health management. + +## Overview + +This package implements a flexible load balancer for distributing HTTP requests across multiple backend servers. It supports multiple balancing algorithms and integrates with GoDoxy's task management and health monitoring systems. + +## Architecture + +```mermaid +graph TD + A[HTTP Request] --> B[LoadBalancer] + B --> C{Algorithm} + C -->|Round Robin| D[RoundRobin] + C -->|Least Connections| E[LeastConn] + C -->|IP Hash| F[IPHash] + + D --> G[Available Servers] + E --> G + F --> G + + G --> H[Server Selection] + H --> I{Sticky Session?} + I -->|Yes| J[Set Cookie] + I -->|No| K[Continue] + + J --> L[ServeHTTP] + K --> L +``` + +## Algorithms + +### Round Robin + +Distributes requests evenly across all available servers in sequence. + +```mermaid +sequenceDiagram + participant C as Client + participant LB as LoadBalancer + participant S1 as Server 1 + participant S2 as Server 2 + participant S3 as Server 3 + + C->>LB: Request 1 + LB->>S1: Route to Server 1 + C->>LB: Request 2 + LB->>S2: Route to Server 2 + C->>LB: Request 3 + LB->>S3: Route to Server 3 + C->>LB: Request 4 + LB->>S1: Route to Server 1 +``` + +### Least Connections + +Routes requests to the server with the fewest active connections. + +```mermaid +flowchart LR + subgraph LB["Load Balancer"] + direction TB + A["Server A
3 connections"] + B["Server B
1 connection"] + C["Server C
5 connections"] + end + + New["New Request"] --> B +``` + +### IP Hash + +Consistently routes requests from the same client IP to the same server using hash-based distribution. + +```mermaid +graph LR + Client1["Client IP: 192.168.1.10"] -->|Hash| ServerA + Client2["Client IP: 192.168.1.20"] -->|Hash| ServerB + Client3["Client IP: 192.168.1.30"] -->|Hash| ServerA +``` + +## Core Components + +### LoadBalancer + +```go +type LoadBalancer struct { + *types.LoadBalancerConfig + task *task.Task + pool pool.Pool[types.LoadBalancerServer] + poolMu sync.Mutex + sumWeight int + startTime time.Time +} +``` + +**Key Methods:** + +```go +// Create a new load balancer from configuration +func New(cfg *types.LoadBalancerConfig) *LoadBalancer + +// Start the load balancer as a background task +func (lb *LoadBalancer) Start(parent task.Parent) gperr.Error + +// Update configuration dynamically +func (lb *LoadBalancer) UpdateConfigIfNeeded(cfg *types.LoadBalancerConfig) + +// Add a backend server +func (lb *LoadBalancer) AddServer(srv types.LoadBalancerServer) + +// Remove a backend server +func (lb *LoadBalancer) RemoveServer(srv types.LoadBalancerServer) + +// ServeHTTP implements http.Handler +func (lb *LoadBalancer) ServeHTTP(rw http.ResponseWriter, r *http.Request) +``` + +### Server + +```go +type server struct { + name string + url *nettypes.URL + weight int + http.Handler + types.HealthMonitor +} + +// Create a new backend server +func NewServer(name string, url *nettypes.URL, weight int, handler http.Handler, healthMon types.HealthMonitor) types.LoadBalancerServer +``` + +**Server Interface:** + +```go +type LoadBalancerServer interface { + Name() string + URL() *nettypes.URL + Key() string + Weight() int + SetWeight(weight int) + Status() types.HealthStatus + Latency() time.Duration + ServeHTTP(rw http.ResponseWriter, r *http.Request) + TryWake() error +} +``` + +### Sticky Sessions + +The load balancer supports sticky sessions via cookies: + +```mermaid +flowchart TD + A[Client Request] --> B{Cookie exists?} + B -->|No| C[Select Server] + B -->|Yes| D[Extract Server Hash] + D --> E[Find Matching Server] + C --> F[Set Cookie
godoxy_lb_sticky] + E --> G[Route to Server] + F --> G +``` + +```go +// Cookie settings +Name: "godoxy_lb_sticky" +MaxAge: Configurable (default: 24 hours) +HttpOnly: true +SameSite: Lax +Secure: Based on TLS/Forwarded-Proto +``` + +## Balancing Modes + +```go +const ( + LoadbalanceModeUnset = "" + LoadbalanceModeRoundRobin = "round_robin" + LoadbalanceModeLeastConn = "least_conn" + LoadbalanceModeIPHash = "ip_hash" +) +``` + +## Configuration + +```go +type LoadBalancerConfig struct { + Link string // Link name + Mode LoadbalanceMode // Balancing algorithm + Sticky bool // Enable sticky sessions + StickyMaxAge time.Duration // Cookie max age + Options map[string]any // Algorithm-specific options +} +``` + +## Usage Examples + +### Basic Round Robin Load Balancer + +```go +config := &types.LoadBalancerConfig{ + Link: "my-service", + Mode: types.LoadbalanceModeRoundRobin, +} + +lb := loadbalancer.New(config) +lb.Start(parentTask) + +// Add backend servers +lb.AddServer(loadbalancer.NewServer("backend-1", url1, 10, handler1, health1)) +lb.AddServer(loadbalancer.NewServer("backend-2", url2, 10, handler2, health2)) + +// Use as HTTP handler +http.Handle("/", lb) +``` + +### Least Connections with Sticky Sessions + +```go +config := &types.LoadBalancerConfig{ + Link: "api-service", + Mode: types.LoadbalanceModeLeastConn, + Sticky: true, + StickyMaxAge: 1 * time.Hour, +} + +lb := loadbalancer.New(config) +lb.Start(parentTask) + +for _, srv := range backends { + lb.AddServer(srv) +} +``` + +### IP Hash Load Balancer with Real IP + +```go +config := &types.LoadBalancerConfig{ + Link: "user-service", + Mode: types.LoadbalanceModeIPHash, + Options: map[string]any{ + "header": "X-Real-IP", + "from": []string{"10.0.0.0/8", "172.16.0.0/12"}, + "recursive": true, + }, +} + +lb := loadbalancer.New(config) +``` + +### Server Weight Management + +```go +// Servers are balanced based on weight (max total: 100) +lb.AddServer(NewServer("server1", url1, 30, handler, health)) +lb.AddServer(NewServer("server2", url2, 50, handler, health)) +lb.AddServer(NewServer("server3", url3, 20, handler, health)) + +// Weights are auto-rebalanced if total != 100 +``` + +## Idlewatcher Integration + +The load balancer integrates with the idlewatcher system: + +- Wake events path (`/api/wake`): Wakes all idle servers +- Favicon and loading page paths: Bypassed for sticky session handling +- Server wake support via `TryWake()` interface + +## Health Monitoring + +The load balancer implements `types.HealthMonitor`: + +```go +func (lb *LoadBalancer) Status() types.HealthStatus +func (lb *LoadBalancer) Detail() string +func (lb *LoadBalancer) Uptime() time.Duration +func (lb *LoadBalancer) Latency() time.Duration +``` + +Health JSON representation: + +```json +{ + "name": "my-service", + "status": "healthy", + "detail": "3/3 servers are healthy", + "started": "2024-01-01T00:00:00Z", + "uptime": "1h2m3s", + "latency": "10ms", + "extra": { + "config": {...}, + "pool": {...} + } +} +``` + +## Thread Safety + +- Server pool operations are protected by `poolMu` mutex +- Algorithm-specific state uses atomic operations or dedicated synchronization +- Least connections uses `xsync.Map` for thread-safe connection counting diff --git a/internal/net/gphttp/middleware/README.md b/internal/net/gphttp/middleware/README.md new file mode 100644 index 00000000..b649c4fc --- /dev/null +++ b/internal/net/gphttp/middleware/README.md @@ -0,0 +1,336 @@ +# Middleware + +HTTP middleware framework providing request/response processing, middleware chaining, and composition from YAML files. + +## Overview + +This package implements a flexible HTTP middleware system for GoDoxy. Middleware can modify requests before they reach the backend and modify responses before they return to the client. The system supports: + +- **Request Modifiers**: Process requests before forwarding +- **Response Modifiers**: Modify responses before returning to client +- **Middleware Chaining**: Compose multiple middleware in priority order +- **YAML Composition**: Define middleware chains in configuration files +- **Bypass Rules**: Skip middleware based on request properties +- **Dynamic Loading**: Load middleware definitions from files at runtime + +## Architecture + +```mermaid +graph TD + A[HTTP Request] --> B[Middleware Chain] + + subgraph Chain [Middleware Pipeline] + direction LR + B1[RedirectHTTP] --> B2[RealIP] + B2 --> B3[RateLimit] + B3 --> B4[OIDC] + B4 --> B5[CustomErrorPage] + end + + Chain --> C[Backend Handler] + C --> D[Response Modifier] + + subgraph ResponseChain [Response Pipeline] + direction LR + D1[CustomErrorPage] --> D2[ModifyResponse] + D2 --> D3[ModifyHTML] + end + + ResponseChain --> E[HTTP Response] +``` + +## Middleware Flow + +```mermaid +sequenceDiagram + participant C as Client + participant M as Middleware Chain + participant B as Backend + participant R as Response Chain + participant C2 as Client + + C->>M: HTTP Request + M->>M: before() - RequestModifier + M->>M: Check Bypass Rules + M->>M: Sort by Priority + + par Request Modifiers + M->>M: Middleware 1 (before) + M->>M: Middleware 2 (before) + end + + M->>B: Forward Request + + B-->>M: HTTP Response + + par Response Modifiers + M->>R: ResponseModifier 1 + M->>R: ResponseModifier 2 + end + + R-->>C2: Modified Response +``` + +## Core Components + +### Middleware + +```go +type Middleware struct { + name string + construct ImplNewFunc + impl any + commonOptions +} + +type commonOptions struct { + Priority int `json:"priority"` // Default: 10, 0 is highest + Bypass Bypass `json:"bypass"` +} +``` + +**Interfaces:** + +```go +// RequestModifier - modify or filter requests +type RequestModifier interface { + before(w http.ResponseWriter, r *http.Request) (proceed bool) +} + +// ResponseModifier - modify responses +type ResponseModifier interface { + modifyResponse(r *http.Response) error +} + +// MiddlewareWithSetup - one-time setup after construction +type MiddlewareWithSetup interface { + setup() +} + +// MiddlewareFinalizer - finalize after options applied +type MiddlewareFinalizer interface { + finalize() +} + +// MiddlewareFinalizerWithError - finalize with error handling +type MiddlewareFinalizerWithError interface { + finalize() error +} +``` + +### Middleware Chain + +```go +type middlewareChain struct { + beforess []RequestModifier + modResps []ResponseModifier +} + +func NewMiddlewareChain(name string, chain []*Middleware) *Middleware +``` + +### Bypass Rules + +```go +type Bypass []rules.RuleOn + +// ShouldBypass checks if request should skip middleware +func (b Bypass) ShouldBypass(w http.ResponseWriter, r *http.Request) bool +``` + +## Available Middleware + +| Name | Type | Description | +| ------------------------------- | -------- | ------------------------------------------ | +| `redirecthttp` | Request | Redirect HTTP to HTTPS | +| `oidc` | Request | OIDC authentication | +| `forwardauth` | Request | Forward authentication to external service | +| `modifyrequest` / `request` | Request | Modify request headers and path | +| `modifyresponse` / `response` | Response | Modify response headers | +| `setxforwarded` | Request | Set X-Forwarded headers | +| `hidexforwarded` | Request | Remove X-Forwarded headers | +| `modifyhtml` | Response | Inject HTML into responses | +| `themed` | Response | Apply theming to HTML | +| `errorpage` / `customerrorpage` | Response | Serve custom error pages | +| `realip` | Request | Extract real client IP from headers | +| `cloudflarerealip` | Request | Cloudflare-specific real IP extraction | +| `cidrwhitelist` | Request | Allow only specific IP ranges | +| `ratelimit` | Request | Rate limiting by IP | +| `hcaptcha` | Request | hCAPTCHA verification | + +## Usage Examples + +### Creating a Middleware + +```go +import "github.com/yusing/godoxy/internal/net/gphttp/middleware" + +type myMiddleware struct { + SomeOption string `json:"some_option"` +} + +func (m *myMiddleware) before(w http.ResponseWriter, r *http.Request) bool { + // Process request + r.Header.Set("X-Custom", m.SomeOption) + return true // false would block the request +} + +var MyMiddleware = middleware.NewMiddleware[myMiddleware]() +``` + +### Building Middleware from Map + +```go +middlewaresMap := map[string]middleware.OptionsRaw{ + "realip": { + "priority": 5, + "header": "X-Real-IP", + "from": []string{"10.0.0.0/8"}, + }, + "ratelimit": { + "priority": 10, + "average": 10, + "burst": 20, + }, +} + +mid, err := middleware.BuildMiddlewareFromMap("my-chain", middlewaresMap) +if err != nil { + log.Fatal(err) +} +``` + +### YAML Composition + +```yaml +# config/middlewares/my-chain.yml +- use: realip + header: X-Real-IP + from: + - 10.0.0.0/8 + - 172.16.0.0/12 + bypass: + - path glob("/public/*") + +- use: ratelimit + average: 100 + burst: 200 + +- use: oidc + allowed_users: + - user@example.com +``` + +```go +// Load from file +eb := &gperr.Builder{} +middlewares := middleware.BuildMiddlewaresFromComposeFile( + "config/middlewares/my-chain.yml", + eb, +) +``` + +### Applying Middleware to Reverse Proxy + +```go +import "github.com/yusing/goutils/http/reverseproxy" + +rp := &reverseproxy.ReverseProxy{ + Target: backendURL, +} + +err := middleware.PatchReverseProxy(rp, middlewaresMap) +if err != nil { + log.Fatal(err) +} +``` + +### Bypass Rules + +```go +bypassRules := middleware.Bypass{ + { + Type: rules.RuleOnTypePathPrefix, + Value: "/public", + }, + { + Type: rules.RuleOnTypePath, + Value: "/health", + }, +} + +mid, _ := middleware.RateLimiter.New(middleware.OptionsRaw{ + "bypass": bypassRules, + "average": 10, + "burst": 20, +}) +``` + +## Priority + +Middleware are executed in priority order (lower number = higher priority): + +```mermaid +graph LR + A[Priority 0] --> B[Priority 5] + B --> C[Priority 10] + C --> D[Priority 20] + + style A fill:#14532d,stroke:#fff,color:#fff + style B fill:#14532d,stroke:#fff,color:#fff + style C fill:#44403c,stroke:#fff,color:#fff + style D fill:#44403c,stroke:#fff,color:#fff +``` + +## Request Processing + +```mermaid +flowchart TD + A[Request] --> B{Has Bypass Rules?} + B -->|Yes| C{Match Bypass?} + B -->|No| D[Execute before#40;#41;] + + C -->|Match| E[Skip Middleware
Proceed to Next] + C -->|No Match| D + + D --> F{before#40;#41; Returns?} + F -->|true| G[Continue to Next] + F -->|false| H[Stop Pipeline] + + G --> I[Backend Handler] + I --> J[Response] + J --> K{Has Response Modifier?} + K -->|Yes| L[Execute modifyResponse] + K -->|No| M[Return Response] + L --> M +``` + +## Integration Points + +- **Error Pages**: Uses `errorpage` package for custom error responses +- **Authentication**: Integrates with `internal/auth` for OIDC +- **Rate Limiting**: Uses `golang.org/x/time/rate` +- **IP Processing**: Uses `internal/net/types` for CIDR handling + +## Error Handling + +Errors during middleware construction are collected and reported: + +```go +var errs gperr.Builder +for name, opts := range middlewaresMap { + m, err := middleware.Get(name) + if err != nil { + errs.Add(err) + continue + } + mid, err := m.New(opts) + if err != nil { + errs.AddSubjectf(err, "middlewares.%s", name) + continue + } +} +if errs.HasError() { + log.Error().Err(errs.Error()).Msg("middleware compilation failed") +} +``` diff --git a/internal/net/gphttp/middleware/captcha/README.md b/internal/net/gphttp/middleware/captcha/README.md new file mode 100644 index 00000000..eeb0d57a --- /dev/null +++ b/internal/net/gphttp/middleware/captcha/README.md @@ -0,0 +1,264 @@ +# Captcha Middleware + +CAPTCHA verification middleware package providing session-based captcha challenge and verification. + +## Overview + +This package implements CAPTCHA verification middleware that protects routes by requiring users to complete a CAPTCHA challenge before accessing the protected resource. It supports pluggable providers (currently hCAPTCHA) and uses encrypted sessions for verification state. + +## Architecture + +```mermaid +graph TD + A[Client Request] --> B{Captcha Session?} + B -->|Valid| C[Proceed to Backend] + B -->|Invalid| D[Show CAPTCHA Page] + + D --> E{POST with Token?} + E -->|Valid| F[Create Session
Set Cookie] + E -->|Invalid| G[Show Error] + F --> C + + subgraph Captcha Provider + H[hCAPTCHA API] + D -->|Script/Form HTML| H + F -->|Verify Token| H + end + + subgraph Session Store + I[CaptchaSessions
jsonstore] + end + + F --> I + I -.->|Session Check| B +``` + +## Captcha Flow + +```mermaid +sequenceDiagram + participant C as Client + participant M as Middleware + participant P as Provider + participant S as Session Store + participant B as Backend + + C->>M: Request (no session) + M->>M: Check cookie + M->>M: Session not found/expired + M->>C: Send CAPTCHA Page + + C->>M: POST with captcha response + M->>P: Verify token + P-->>M: Verification result + + alt Verification successful + M->>S: Store session + M->>C: Set session cookie
Redirect to protected path + C->>M: Request (with session cookie) + M->>S: Validate session + M->>B: Forward request + else Verification failed + M->>C: Error: verification failed + end +``` + +## Core Components + +### Provider Interface + +```go +type Provider interface { + // CSP directives for the captcha provider + CSPDirectives() []string + // CSP sources for the captcha provider + CSPSources() []string + // Verify the captcha response from the request + Verify(r *http.Request) error + // Session expiry duration after successful verification + SessionExpiry() time.Duration + // Script HTML to include in the page + ScriptHTML() string + // Form HTML to render the captcha widget + FormHTML() string +} +``` + +### ProviderBase + +```go +type ProviderBase struct { + Expiry time.Duration `json:"session_expiry"` // Default: 24 hours +} + +func (p *ProviderBase) SessionExpiry() time.Duration +``` + +### hCAPTCHA Provider + +```go +type HcaptchaProvider struct { + ProviderBase + SiteKey string `json:"site_key" validate:"required"` + Secret string `json:"secret" validate:"required"` +} + +// CSP Directives: script-src, frame-src, style-src, connect-src +// CSP Sources: https://hcaptcha.com, https://*.hcaptcha.com +``` + +### Captcha Session + +```go +type CaptchaSession struct { + ID string `json:"id"` + Expiry time.Time `json:"expiry"` +} + +var CaptchaSessions = jsonstore.Store[*CaptchaSession]("captcha_sessions") + +func newCaptchaSession(p Provider) *CaptchaSession +func (s *CaptchaSession) expired() bool +``` + +## Middleware Integration + +```go +type hCaptcha struct { + captcha.HcaptchaProvider +} + +func (h *hCaptcha) before(w http.ResponseWriter, r *http.Request) bool { + return captcha.PreRequest(h, w, r) +} + +var HCaptcha = NewMiddleware[hCaptcha]() +``` + +### PreRequest Handler + +```go +func PreRequest(p Provider, w http.ResponseWriter, r *http.Request) (proceed bool) +``` + +This function: + +1. Checks for valid session cookie +1. Validates session expiry +1. Returns true if session is valid +1. For non-HTML requests, returns 403 Forbidden +1. For POST requests, verifies the captcha token +1. For GET requests, renders the CAPTCHA challenge page + +## Configuration + +### hCAPTCHA Configuration + +```yaml +middleware: + my-captcha: + use: hcaptcha + site_key: "YOUR_SITE_KEY" + secret: "YOUR_SECRET" + session_expiry: 24h # optional, default 24h +``` + +### Route Configuration + +```yaml +routes: + - host: example.com + path: /admin + middlewares: + - my-captcha +``` + +## Usage Examples + +### Basic Setup + +```go +import "github.com/yusing/godoxy/internal/net/gphttp/middleware" + +hcaptchaMiddleware := middleware.HCaptcha.New(middleware.OptionsRaw{ + "site_key": "your-site-key", + "secret": "your-secret", +}) +``` + +### Using in Middleware Chain + +```yaml +# config/middlewares/admin-protection.yml +- use: captcha + site_key: "${HCAPTCHA_SITE_KEY}" + secret: "${HCAPTCHA_SECRET}" + bypass: + - type: CIDR + value: 10.0.0.0/8 +``` + +## Session Management + +Sessions are stored in a JSON-based store with the following properties: + +- **Session ID**: 32-byte CRNG (`crypto/rand.Read`) random hex string +- **Expiry**: Configurable duration (default 24 hours) +- **Cookie**: `godoxy_captcha_session` with HttpOnly flag + +```mermaid +flowchart TD + A[Session Created] --> B[Cookie Set] + B --> C[Client Sends Cookie] + C --> D{Session Valid?} + D -->|Yes| E[Proceed] + D -->|No| F{HTML Request?} + F -->|Yes| G[Show CAPTCHA] + F -->|No| H[403 Forbidden] +``` + +## CSP Integration + +The CAPTCHA provider supplies CSP directives that should be added to the response: + +```go +// hCAPTCHA CSP Directives +CSPDirectives() []string +// Returns: ["script-src", "frame-src", "style-src", "connect-src"] + +CSPSources() []string +// Returns: ["https://hcaptcha.com", "https://*.hcaptcha.com"] +``` + +## HTML Template + +The package includes an embedded HTML template (`captcha.html`) that renders the CAPTCHA challenge page with: + +- Provider script (`