Compare commits

..

17 Commits

Author SHA1 Message Date
yusing
8da63daf02 refactor: simplify and remove duplicated code for icon caching 2025-04-28 11:22:49 +08:00
yusing
c5fd21552e fix(oidc): token not being refreshed when receiving simutaneous requests from the same session 2025-04-28 11:19:57 +08:00
yusing
27409abc24 fix: missing proxmox initialization 2025-04-28 05:08:14 +08:00
yusing
21c9e46274 fix: remove redundant event logging 2025-04-28 05:03:17 +08:00
yusing
22a12d3116 chore: remove redundant loadbalancer debug message 2025-04-28 04:57:26 +08:00
yusing
89d93dd878 chore: better error message 2025-04-28 00:48:20 +08:00
yusing
66853dfc52 fix: cloudflare realIP should defaults to be recursive 2025-04-27 23:53:04 +08:00
yusing
c72f66d64b feat(acl): add FORCE_RESOLVE_COUNTRY option to resolve country 2025-04-26 09:48:43 +08:00
yusing
59bc342a40 fix: notfications not being sent 2025-04-26 09:20:03 +08:00
yusing
e11579df10 chore(maxm): improved database update mechanism, fixed db being downloaded twice on first run 2025-04-26 09:08:03 +08:00
yusing
6a8f6fb4b5 chore(accesslog): reduce buffering for stdout 2025-04-26 08:29:55 +08:00
yusing
8f20bd3840 fix(acl): caching logic 2025-04-26 08:05:26 +08:00
yusing
f1abb745fe fix(tcp): return a dummy connection instead of nil 2025-04-26 07:57:20 +08:00
yusing
cb2990f6e8 chore: enrich example config 2025-04-26 07:40:55 +08:00
yusing
fb2f850311 fix(oidc): incorrect redirect url 2025-04-26 06:57:02 +08:00
yusing
2b9c0f09ee fix version checking 2025-04-26 06:50:43 +08:00
yusing
efe3eb4ce7 fix: autocert panic 2025-04-26 06:41:15 +08:00
33 changed files with 381 additions and 559 deletions

View File

@@ -1,3 +1,6 @@
# docker image tag (latest, nightly)
TAG=latest
# set timezone to get correct log timestamp
TZ=ETC/UTC
@@ -47,8 +50,12 @@ GODOXY_API_ADDR=127.0.0.1:8888
# Frontend listening port
GODOXY_FRONTEND_PORT=3000
# Prometheus Metrics
GODOXY_PROMETHEUS_ENABLED=true
# Frontend aliases (subdomains / FQDNs, e.g. godoxy, godoxy.domain.com)
GODOXY_FRONTEND_ALIASES=godoxy
# Docker socket
# /var/run/podman/podman.sock for podman
DOCKER_SOCKET=/var/run/docker.sock
# Debug mode
GODOXY_DEBUG=false

View File

@@ -6,13 +6,13 @@ import (
"os"
"sync"
"github.com/yusing/go-proxy/internal"
"github.com/yusing/go-proxy/internal/api/v1/query"
"github.com/yusing/go-proxy/internal/auth"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/config"
"github.com/yusing/go-proxy/internal/dnsproviders"
"github.com/yusing/go-proxy/internal/gperr"
"github.com/yusing/go-proxy/internal/homepage"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/logging/memlogger"
"github.com/yusing/go-proxy/internal/metrics/systeminfo"
@@ -50,7 +50,7 @@ func main() {
rawLogger.Println("ok")
return
case common.CommandListIcons:
icons, err := internal.ListAvailableIcons()
icons, err := homepage.ListAvailableIcons()
if err != nil {
rawLogger.Fatal(err)
}
@@ -79,7 +79,7 @@ func main() {
logging.Info().Msgf("GoDoxy version %s", pkg.GetVersion())
logging.Trace().Msg("trace enabled")
parallel(
internal.InitIconListCache,
homepage.InitIconListCache,
systeminfo.Poller.Start,
)

View File

@@ -1,7 +1,7 @@
---
services:
frontend:
image: ghcr.io/yusing/godoxy-frontend:latest
image: ghcr.io/yusing/godoxy-frontend:${TAG:-latest}
container_name: godoxy-frontend
restart: unless-stopped
network_mode: host # do not change this
@@ -13,7 +13,7 @@ services:
# modify below to fit your needs
labels:
proxy.aliases: godoxy
proxy.aliases: ${GODOXY_FRONTEND_ALIASES:-godoxy}
proxy.godoxy.port: ${GODOXY_FRONTEND_PORT:-3000}
# proxy.godoxy.middlewares.cidr_whitelist: |
# status: 403
@@ -24,13 +24,13 @@ services:
# - 192.168.0.0/16
# - 172.16.0.0/12
app:
image: ghcr.io/yusing/godoxy:latest
image: ghcr.io/yusing/godoxy:${TAG:-latest}
container_name: godoxy
restart: always
network_mode: host # do not change this
env_file: .env
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
- ./config:/app/config
- ./logs:/app/logs
- ./error_pages:/app/error_pages

View File

@@ -3,11 +3,9 @@ package acl
import (
"github.com/puzpuzpuz/xsync/v3"
acl "github.com/yusing/go-proxy/internal/acl/types"
"go.uber.org/atomic"
)
var cityCache = xsync.NewMapOf[string, *acl.City]()
var numCachedLookup atomic.Uint64
func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
if ip.City != nil {
@@ -20,7 +18,7 @@ func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
city, ok := cityCache.Load(ip.Str)
if ok {
numCachedLookup.Inc()
ip.City = city
return city, true
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog"
acl "github.com/yusing/go-proxy/internal/acl/types"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/gperr"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/logging/accesslog"
@@ -63,7 +64,7 @@ type checkCache struct {
const cacheTTL = 1 * time.Minute
func (c *checkCache) Expired() bool {
return c.created.Add(cacheTTL).After(utils.TimeNow())
return c.created.Add(cacheTTL).Before(utils.TimeNow())
}
//TODO: add stats
@@ -153,7 +154,10 @@ func (c *Config) Start(parent *task.Task) gperr.Error {
return nil
}
func (c *config) cacheRecord(info *acl.IPInfo, allow bool) {
func (c *Config) cacheRecord(info *acl.IPInfo, allow bool) {
if common.ForceResolveCountry && info.City == nil {
c.MaxMind.lookupCity(info)
}
c.ipCache.Store(info.Str, &checkCache{
IPInfo: info,
allow: allow,
@@ -175,7 +179,7 @@ func (c *Config) IPAllowed(ip net.IP) bool {
return false
}
// always allow private and loopback
// always allow loopback
// loopback is not logged
if ip.IsLoopback() {
return true

View File

@@ -54,7 +54,7 @@ func (cfg *MaxMindConfig) LoadMaxMindDB(parent task.Parent) gperr.Error {
path := dbPath(cfg.Database)
reader, err := maxmindDBOpen(path)
exists := true
valid := true
if err != nil {
switch {
case errors.Is(err, os.ErrNotExist):
@@ -65,20 +65,19 @@ func (cfg *MaxMindConfig) LoadMaxMindDB(parent task.Parent) gperr.Error {
return gperr.Wrap(err)
}
}
exists = false
valid = false
}
if !exists {
if !valid {
cfg.logger.Info().Msg("MaxMind DB not found/invalid, downloading...")
reader, err = cfg.download()
if err != nil {
if err = cfg.download(); err != nil {
return ErrDownloadFailure.With(err)
}
} else {
cfg.logger.Info().Msg("MaxMind DB loaded")
cfg.db.Reader = reader
go cfg.scheduleUpdate(parent)
}
cfg.logger.Info().Msg("MaxMind DB loaded")
cfg.db.Reader = reader
go cfg.scheduleUpdate(parent)
return nil
}
@@ -137,17 +136,10 @@ func (cfg *MaxMindConfig) update() {
Time("latest", remoteLastModified.Local()).
Time("current", cfg.lastUpdate).
Msg("MaxMind DB update available")
reader, err := cfg.download()
if err != nil {
if err = cfg.download(); err != nil {
cfg.logger.Err(err).Msg("failed to update MaxMind DB")
return
}
cfg.db.Lock()
cfg.db.Close()
cfg.db.Reader = reader
cfg.setLastUpdate(*remoteLastModified)
cfg.db.Unlock()
cfg.logger.Info().Msg("MaxMind DB updated")
}
@@ -190,57 +182,87 @@ func (cfg *MaxMindConfig) checkLastest() (lastModifiedT *time.Time, err error) {
return &lastModifiedTime, nil
}
func (cfg *MaxMindConfig) download() (*maxminddb.Reader, error) {
func (cfg *MaxMindConfig) download() error {
resp, err := newReq(cfg, http.MethodGet)
if err != nil {
return nil, err
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
return fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
}
path := dbPath(cfg.Database)
tmpPath := path + "-tmp.tar.gz"
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
}
dbFile := dbPath(cfg.Database)
tmpGZPath := dbFile + "-tmp.tar.gz"
tmpDBPath := dbFile + "-tmp"
cfg.logger.Info().Msg("MaxMind DB downloading...")
_, err = io.Copy(file, resp.Body)
if err != nil {
file.Close()
return nil, err
}
file.Close()
// extract .tar.gz and move only the dbFilename to path
err = extractFileFromTarGz(tmpPath, dbFilename(cfg.Database), path)
if err != nil {
return nil, gperr.New("failed to extract database from archive").With(err)
}
// cleanup the tar.gz file
_ = os.Remove(tmpPath)
db, err := maxmindDBOpen(path)
if err != nil {
return nil, err
}
return db, nil
}
func extractFileFromTarGz(tarGzPath, targetFilename, destPath string) error {
f, err := os.Open(tarGzPath)
tmpGZFile, err := os.OpenFile(tmpGZPath, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return err
}
defer f.Close()
gzr, err := gzip.NewReader(f)
// cleanup the tar.gz file
defer func() {
_ = tmpGZFile.Close()
_ = os.Remove(tmpGZPath)
}()
cfg.logger.Info().Msg("MaxMind DB downloading...")
_, err = io.Copy(tmpGZFile, resp.Body)
if err != nil {
return err
}
if _, err := tmpGZFile.Seek(0, io.SeekStart); err != nil {
return err
}
// extract .tar.gz and to database
err = extractFileFromTarGz(tmpGZFile, dbFilename(cfg.Database), tmpDBPath)
if err != nil {
return gperr.New("failed to extract database from archive").With(err)
}
// test if the downloaded database is valid
db, err := maxmindDBOpen(tmpDBPath)
if err != nil {
_ = os.Remove(tmpDBPath)
return err
}
db.Close()
err = os.Rename(tmpDBPath, dbFile)
if err != nil {
return err
}
cfg.db.Lock()
defer cfg.db.Unlock()
if cfg.db.Reader != nil {
cfg.db.Reader.Close()
}
cfg.db.Reader, err = maxmindDBOpen(dbFile)
if err != nil {
return err
}
lastModifiedStr := resp.Header.Get("Last-Modified")
lastModifiedTime, err := time.Parse(http.TimeFormat, lastModifiedStr)
if err == nil {
cfg.setLastUpdate(lastModifiedTime)
}
cfg.logger.Info().Msg("MaxMind DB downloaded")
return nil
}
func extractFileFromTarGz(tarGzFile *os.File, targetFilename, destPath string) error {
defer tarGzFile.Close()
gzr, err := gzip.NewReader(tarGzFile)
if err != nil {
return err
}

View File

@@ -1,6 +1,8 @@
package acl
import (
"archive/tar"
"compress/gzip"
"io"
"net/http"
"net/http/httptest"
@@ -144,9 +146,17 @@ func Test_MaxMindConfig_download(t *testing.T) {
logger: zerolog.Nop(),
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
io.Copy(w, strings.NewReader("FAKEMMDB"))
gz := gzip.NewWriter(w)
t := tar.NewWriter(gz)
t.WriteHeader(&tar.Header{
Name: dbFilename(MaxMindGeoLite),
})
t.Write([]byte("1234"))
t.Close()
gz.Close()
}))
defer server.Close()
oldURL := dbURL
dbURL = func(MaxMindDatabaseType) string { return server.URL }
defer func() { dbURL = oldURL }()
@@ -163,26 +173,26 @@ func Test_MaxMindConfig_download(t *testing.T) {
}
defer func() { maxmindDBOpen = origOpen }()
rw := &fakeReadCloser{}
req, err := http.NewRequest(http.MethodGet, server.URL, nil)
if err != nil {
t.Fatalf("newReq() error = %v", err)
}
rw := httptest.NewRecorder()
oldNewReq := newReq
newReq = func(cfg *MaxMindConfig, method string) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusOK,
Body: rw,
}, nil
server.Config.Handler.ServeHTTP(rw, req)
return rw.Result(), nil
}
defer func() { newReq = oldNewReq }()
db, err := cfg.download()
err = cfg.download()
if err != nil {
t.Fatalf("download() error = %v", err)
}
if db == nil {
if cfg.db.Reader == nil {
t.Error("expected db instance")
}
if !rw.closed {
t.Error("expected rw to be closed")
}
}
func Test_MaxMindConfig_loadMaxMindDB(t *testing.T) {

View File

@@ -1,7 +1,9 @@
package acl
import (
"io"
"net"
"time"
)
type TCPListener struct {
@@ -9,6 +11,17 @@ type TCPListener struct {
lis net.Listener
}
type noConn struct{}
func (noConn) Read(b []byte) (int, error) { return 0, io.EOF }
func (noConn) Write(b []byte) (int, error) { return 0, io.EOF }
func (noConn) Close() error { return nil }
func (noConn) LocalAddr() net.Addr { return nil }
func (noConn) RemoteAddr() net.Addr { return nil }
func (noConn) SetDeadline(t time.Time) error { return nil }
func (noConn) SetReadDeadline(t time.Time) error { return nil }
func (noConn) SetWriteDeadline(t time.Time) error { return nil }
func (cfg *Config) WrapTCP(lis net.Listener) net.Listener {
if cfg == nil {
return lis
@@ -32,11 +45,11 @@ func (s *TCPListener) Accept() (net.Conn, error) {
if !ok {
// Not a TCPAddr, drop
c.Close()
return nil, nil
return noConn{}, nil
}
if !s.acl.IPAllowed(addr.IP) {
c.Close()
return nil, nil
return noConn{}, nil
}
return c, nil
}

View File

@@ -6,9 +6,9 @@ import (
"strconv"
"strings"
"github.com/yusing/go-proxy/internal"
"github.com/yusing/go-proxy/internal/common"
config "github.com/yusing/go-proxy/internal/config/types"
"github.com/yusing/go-proxy/internal/homepage"
"github.com/yusing/go-proxy/internal/net/gphttp"
"github.com/yusing/go-proxy/internal/net/gphttp/middleware"
"github.com/yusing/go-proxy/internal/route/routes"
@@ -67,7 +67,7 @@ func List(cfg config.ConfigInstance, w http.ResponseWriter, r *http.Request) {
if err != nil {
limit = 0
}
icons, err := internal.SearchIcons(r.FormValue("keyword"), limit)
icons, err := homepage.SearchIcons(r.FormValue("keyword"), limit)
if err != nil {
gphttp.ClientError(w, err)
return

View File

@@ -1,6 +1,7 @@
package auth
import (
"context"
"net/http"
"github.com/yusing/go-proxy/internal/common"
@@ -38,22 +39,40 @@ func IsOIDCEnabled() bool {
return common.OIDCIssuerURL != ""
}
type nextHandler struct{}
var nextHandlerContextKey = nextHandler{}
func RequireAuth(next http.HandlerFunc) http.HandlerFunc {
if IsEnabled() {
return func(w http.ResponseWriter, r *http.Request) {
if err := defaultAuth.CheckToken(r); err != nil {
gphttp.ClientError(w, err, http.StatusUnauthorized)
} else {
next(w, r)
}
}
if !IsEnabled() {
return next
}
return func(w http.ResponseWriter, r *http.Request) {
if err := defaultAuth.CheckToken(r); err != nil {
if IsFrontend(r) {
r = r.WithContext(context.WithValue(r.Context(), nextHandlerContextKey, next))
defaultAuth.LoginHandler(w, r)
} else {
gphttp.ClientError(w, err, http.StatusUnauthorized)
}
return
}
next(w, r)
}
return next
}
func AuthCheckHandler(w http.ResponseWriter, r *http.Request) {
if err := defaultAuth.CheckToken(r); err != nil {
http.Redirect(w, r, "/v1/auth/login", http.StatusFound)
func ProceedNext(w http.ResponseWriter, r *http.Request) {
next, ok := r.Context().Value(nextHandlerContextKey).(http.HandlerFunc)
if ok {
next(w, r)
} else {
w.WriteHeader(http.StatusOK)
}
}
func AuthCheckHandler(w http.ResponseWriter, r *http.Request) {
if err := defaultAuth.CheckToken(r); err != nil {
defaultAuth.LoginHandler(w, r)
} else {
w.WriteHeader(http.StatusOK)
}

View File

@@ -1,11 +1,13 @@
package auth
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/golang-jwt/jwt/v5"
@@ -19,6 +21,10 @@ type oauthRefreshToken struct {
Username string `json:"username"`
RefreshToken string `json:"refresh_token"`
Expiry time.Time `json:"expiry"`
result *refreshResult
err error
mu sync.Mutex
}
type Session struct {
@@ -27,6 +33,12 @@ type Session struct {
Groups []string `json:"groups"`
}
type refreshResult struct {
newSession Session
jwt string
jwtExpiry time.Time
}
type sessionClaims struct {
Session
jwt.RegisteredClaims
@@ -34,11 +46,12 @@ type sessionClaims struct {
type sessionID string
var oauthRefreshTokens jsonstore.MapStore[oauthRefreshToken]
var oauthRefreshTokens jsonstore.MapStore[*oauthRefreshToken]
var (
defaultRefreshTokenExpiry = 30 * 24 * time.Hour // 1 month
refreshBefore = 30 * time.Second
sessionInvalidateDelay = 3 * time.Second
)
var (
@@ -50,7 +63,7 @@ const sessionTokenIssuer = "GoDoxy"
func init() {
if IsOIDCEnabled() {
oauthRefreshTokens = jsonstore.Store[oauthRefreshToken]("oauth_refresh_tokens")
oauthRefreshTokens = jsonstore.Store[*oauthRefreshToken]("oauth_refresh_tokens")
}
}
@@ -61,7 +74,7 @@ func (token *oauthRefreshToken) expired() bool {
func newSessionID() sessionID {
b := make([]byte, 32)
_, _ = rand.Read(b)
return sessionID(base64.StdEncoding.EncodeToString(b))
return sessionID(hex.EncodeToString(b))
}
func newSession(username string, groups []string) Session {
@@ -72,26 +85,28 @@ func newSession(username string, groups []string) Session {
}
}
// getOnceOAuthRefreshToken returns the refresh token for the given session.
// getOAuthRefreshToken returns the refresh token for the given session.
//
// The token is removed from the store after retrieval.
func getOnceOAuthRefreshToken(claims *Session) (*oauthRefreshToken, bool) {
func getOAuthRefreshToken(claims *Session) (*oauthRefreshToken, bool) {
token, ok := oauthRefreshTokens.Load(string(claims.SessionID))
if !ok {
return nil, false
}
invalidateOAuthRefreshToken(claims.SessionID)
if token.expired() {
invalidateOAuthRefreshToken(claims.SessionID)
return nil, false
}
if claims.Username != token.Username {
return nil, false
}
return &token, true
return token, true
}
func storeOAuthRefreshToken(sessionID sessionID, username, token string) {
oauthRefreshTokens.Store(string(sessionID), oauthRefreshToken{
oauthRefreshTokens.Store(string(sessionID), &oauthRefreshToken{
Username: username,
RefreshToken: token,
Expiry: time.Now().Add(defaultRefreshTokenExpiry),
@@ -135,51 +150,75 @@ func (auth *OIDCProvider) parseSessionJWT(sessionJWT string) (claims *sessionCla
return claims, sessionToken.Valid && claims.Issuer == sessionTokenIssuer, nil
}
func (auth *OIDCProvider) TryRefreshToken(w http.ResponseWriter, r *http.Request, sessionJWT string) error {
func (auth *OIDCProvider) TryRefreshToken(ctx context.Context, sessionJWT string) (*refreshResult, error) {
// verify the session cookie
claims, valid, err := auth.parseSessionJWT(sessionJWT)
if err != nil {
return fmt.Errorf("%w: %w", ErrInvalidSessionToken, err)
return nil, fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrInvalidSessionToken, err)
}
if !valid {
return ErrInvalidSessionToken
return nil, ErrInvalidSessionToken
}
// check if refresh is possible
refreshToken, ok := getOnceOAuthRefreshToken(&claims.Session)
refreshToken, ok := getOAuthRefreshToken(&claims.Session)
if !ok {
return errNoRefreshToken
return nil, errNoRefreshToken
}
if !auth.checkAllowed(claims.Username, claims.Groups) {
return ErrUserNotAllowed
return nil, ErrUserNotAllowed
}
return auth.doRefreshToken(ctx, refreshToken, &claims.Session)
}
func (auth *OIDCProvider) doRefreshToken(ctx context.Context, refreshToken *oauthRefreshToken, claims *Session) (*refreshResult, error) {
refreshToken.mu.Lock()
defer refreshToken.mu.Unlock()
// already refreshed
// this must be called after refresh but before invalidate
if refreshToken.result != nil || refreshToken.err != nil {
return refreshToken.result, refreshToken.err
}
// this step refreshes the token
// see https://cs.opensource.google/go/x/oauth2/+/refs/tags/v0.29.0:oauth2.go;l=313
newToken, err := auth.oauthConfig.TokenSource(r.Context(), &oauth2.Token{
newToken, err := auth.oauthConfig.TokenSource(ctx, &oauth2.Token{
RefreshToken: refreshToken.RefreshToken,
}).Token()
if err != nil {
return fmt.Errorf("%w: %w", ErrRefreshTokenFailure, err)
refreshToken.err = fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrRefreshTokenFailure, err)
return nil, refreshToken.err
}
idTokenJWT, idToken, err := auth.getIdToken(r.Context(), newToken)
idTokenJWT, idToken, err := auth.getIdToken(ctx, newToken)
if err != nil {
return err
refreshToken.err = fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrRefreshTokenFailure, err)
return nil, refreshToken.err
}
// in case there're multiple requests for the same session to refresh
// invalidate the token after a short delay
go func() {
<-time.After(sessionInvalidateDelay)
invalidateOAuthRefreshToken(claims.SessionID)
}()
sessionID := newSessionID()
logging.Debug().Str("username", claims.Username).Time("expiry", newToken.Expiry).Msg("refreshed token")
storeOAuthRefreshToken(sessionID, claims.Username, newToken.RefreshToken)
// set new idToken and new sessionToken
auth.setIDTokenCookie(w, r, idTokenJWT, time.Until(idToken.Expiry))
auth.setSessionTokenCookie(w, r, Session{
SessionID: sessionID,
Username: claims.Username,
Groups: claims.Groups,
})
return nil
refreshToken.result = &refreshResult{
newSession: Session{
SessionID: sessionID,
Username: claims.Username,
Groups: claims.Groups,
},
jwt: idTokenJWT,
jwtExpiry: idToken.Expiry,
}
return refreshToken.result, nil
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/coreos/go-oidc/v3/oidc"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/gperr"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/net/gphttp"
"github.com/yusing/go-proxy/internal/utils"
@@ -47,7 +48,12 @@ const (
OIDCLogoutPath = "/auth/logout"
)
var errMissingIDToken = errors.New("missing id_token field from oauth token")
var (
errMissingIDToken = errors.New("missing id_token field from oauth token")
ErrMissingOAuthToken = gperr.New("missing oauth token")
ErrInvalidOAuthToken = gperr.New("invalid oauth token")
)
// generateState generates a random string for OIDC state.
const oidcStateLength = 32
@@ -148,12 +154,19 @@ func (auth *OIDCProvider) HandleAuth(w http.ResponseWriter, r *http.Request) {
func (auth *OIDCProvider) LoginHandler(w http.ResponseWriter, r *http.Request) {
// check for session token
sessionToken, err := r.Cookie(CookieOauthSessionToken)
if err == nil {
err = auth.TryRefreshToken(w, r, sessionToken.Value)
if err != nil {
logging.Debug().Err(err).Msg("failed to refresh token")
auth.clearCookie(w, r)
if err == nil { // session token exists
result, err := auth.TryRefreshToken(r.Context(), sessionToken.Value)
// redirect back to where they requested
// when token refresh is ok
if err == nil {
auth.setIDTokenCookie(w, r, result.jwt, time.Until(result.jwtExpiry))
auth.setSessionTokenCookie(w, r, result.newSession)
ProceedNext(w, r)
return
}
// clear cookies then redirect to home
logging.Err(err).Msg("failed to refresh token")
auth.clearCookie(w, r)
http.Redirect(w, r, "/", http.StatusFound)
return
}

View File

@@ -10,22 +10,21 @@ import (
)
var (
ErrMissingOAuthToken = gperr.New("missing oauth token")
ErrMissingSessionToken = gperr.New("missing session token")
ErrInvalidOAuthToken = gperr.New("invalid oauth token")
ErrInvalidSessionToken = gperr.New("invalid session token")
ErrUserNotAllowed = gperr.New("user not allowed")
)
func IsFrontend(r *http.Request) bool {
return r.Host == common.APIHTTPAddr
}
func requestHost(r *http.Request) string {
// check if it's from backend
switch r.Host {
case common.APIHTTPAddr:
// use XFH
if IsFrontend(r) {
return r.Header.Get("X-Forwarded-Host")
default:
return r.Host
}
return r.Host
}
// cookieDomain returns the fully qualified domain name of the request host

View File

@@ -82,10 +82,6 @@ func (cfg *Config) Validate() gperr.Error {
}
func (cfg *Config) GetLegoConfig() (*User, *lego.Config, gperr.Error) {
if cfg == nil {
cfg = new(Config)
}
if err := cfg.Validate(); err != nil {
return nil, nil, err
}

View File

@@ -76,7 +76,7 @@ func (p *Provider) ObtainCert() error {
return nil
}
if p.cfg.Provider == ProviderLocal {
if p.cfg.Provider == ProviderPseudo {
t := time.NewTicker(1000 * time.Millisecond)
defer t.Stop()
logging.Info().Msg("init client for pseudo provider")

View File

@@ -16,7 +16,6 @@ const (
ConfigPath = ConfigBasePath + "/" + ConfigFileName
IconListCachePath = ConfigBasePath + "/.icon_list_cache.json"
IconCachePath = ConfigBasePath + "/.icon_cache.json"
NamespaceHomepageOverrides = ".homepage"
NamespaceIconCache = ".icon_cache"

View File

@@ -58,6 +58,8 @@ var (
MetricsDisableDisk = GetEnvBool("METRICS_DISABLE_DISK", false)
MetricsDisableNetwork = GetEnvBool("METRICS_DISABLE_NETWORK", false)
MetricsDisableSensors = GetEnvBool("METRICS_DISABLE_SENSORS", false)
ForceResolveCountry = GetEnvBool("FORCE_RESOLVE_COUNTRY", false)
)
func GetEnv[T any](key string, defaultValue T, parser func(string) (T, error)) T {

View File

@@ -18,6 +18,7 @@ import (
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/net/gphttp/server"
"github.com/yusing/go-proxy/internal/notif"
"github.com/yusing/go-proxy/internal/proxmox"
proxy "github.com/yusing/go-proxy/internal/route/provider"
"github.com/yusing/go-proxy/internal/task"
"github.com/yusing/go-proxy/internal/utils"
@@ -229,6 +230,7 @@ func (cfg *Config) load() gperr.Error {
errs.Add(cfg.entrypoint.SetAccessLogger(cfg.task, model.Entrypoint.AccessLog))
cfg.initNotification(model.Providers.Notification)
errs.Add(cfg.initAutoCert(model.AutoCert))
errs.Add(cfg.initProxmox(model.Providers.Proxmox))
errs.Add(cfg.loadRouteProviders(&model.Providers))
cfg.value = model
@@ -265,6 +267,10 @@ func (cfg *Config) initAutoCert(autocertCfg *autocert.Config) gperr.Error {
return nil
}
if autocertCfg == nil {
autocertCfg = new(autocert.Config)
}
user, legoCfg, err := autocertCfg.GetLegoConfig()
if err != nil {
return err
@@ -274,6 +280,17 @@ func (cfg *Config) initAutoCert(autocertCfg *autocert.Config) gperr.Error {
return nil
}
func (cfg *Config) initProxmox(proxmoxCfg []proxmox.Config) gperr.Error {
proxmox.Clients.Clear()
var errs = gperr.NewBuilder()
for _, cfg := range proxmoxCfg {
if err := cfg.Init(); err != nil {
errs.Add(err.Subject(cfg.URL))
}
}
return errs.Error()
}
func (cfg *Config) errIfExists(p *proxy.Provider) gperr.Error {
if _, ok := cfg.providers.Load(p.String()); ok {
return gperr.Errorf("provider %s already exists", p.String())

View File

@@ -12,6 +12,7 @@ import (
"github.com/yusing/go-proxy/internal/gperr"
"github.com/yusing/go-proxy/internal/logging/accesslog"
"github.com/yusing/go-proxy/internal/notif"
"github.com/yusing/go-proxy/internal/proxmox"
"github.com/yusing/go-proxy/internal/utils"
)
@@ -30,6 +31,7 @@ type (
Docker map[string]string `json:"docker" yaml:"docker,omitempty" validate:"non_empty_docker_keys,dive,unix_addr|url"`
Agents []*agent.AgentConfig `json:"agents" yaml:"agents,omitempty"`
Notification []notif.NotificationConfig `json:"notification" yaml:"notification,omitempty"`
Proxmox []proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
}
Entrypoint struct {
Middlewares []map[string]any `json:"middlewares"`

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/jsonstore"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/task"
"github.com/yusing/go-proxy/internal/utils"
@@ -15,34 +16,24 @@ import (
type cacheEntry struct {
Icon []byte `json:"icon"`
ContentType string `json:"content_type"`
ContentType string `json:"content_type,omitempty"`
LastAccess atomic.Value[time.Time] `json:"last_access"`
}
// cache key can be absolute url or route name.
var (
iconCache = make(map[string]*cacheEntry)
iconCacheMu sync.RWMutex
iconCache = jsonstore.Store[*cacheEntry](common.NamespaceIconCache)
iconMu sync.RWMutex
)
const (
iconCacheTTL = 3 * 24 * time.Hour
cleanUpInterval = time.Minute
maxCacheSize = 1024 * 1024 // 1MB
maxIconSize = 1024 * 1024 // 1MB
maxCacheEntries = 100
)
func InitIconCache() {
iconCacheMu.Lock()
defer iconCacheMu.Unlock()
err := utils.LoadJSONIfExist(common.IconCachePath, &iconCache)
if err != nil {
logging.Error().Err(err).Msg("failed to load icon cache")
} else if len(iconCache) > 0 {
logging.Info().Int("count", len(iconCache)).Msg("icon cache loaded")
}
func init() {
go func() {
cleanupTicker := time.NewTicker(cleanUpInterval)
defer cleanupTicker.Stop()
@@ -55,36 +46,21 @@ func InitIconCache() {
}
}
}()
task.OnProgramExit("save_favicon_cache", func() {
iconCacheMu.Lock()
defer iconCacheMu.Unlock()
if len(iconCache) == 0 {
return
}
if err := utils.SaveJSON(common.IconCachePath, &iconCache, 0o644); err != nil {
logging.Error().Err(err).Msg("failed to save icon cache")
}
})
}
func pruneExpiredIconCache() {
iconCacheMu.Lock()
defer iconCacheMu.Unlock()
nPruned := 0
for key, icon := range iconCache {
for key, icon := range iconCache.Range {
if icon.IsExpired() {
delete(iconCache, key)
iconCache.Delete(key)
nPruned++
}
}
if len(iconCache) > maxCacheEntries {
if iconCache.Size() > maxCacheEntries {
iconCache.Clear()
newIconCache := make(map[string]*cacheEntry, maxCacheEntries)
i := 0
for key, icon := range iconCache {
for key, icon := range iconCache.Range {
if i == maxCacheEntries {
break
}
@@ -93,7 +69,9 @@ func pruneExpiredIconCache() {
i++
}
}
iconCache = newIconCache
for key, icon := range newIconCache {
iconCache.Store(key, icon)
}
}
if nPruned > 0 {
logging.Info().Int("pruned", nPruned).Msg("pruned expired icon cache")
@@ -101,21 +79,18 @@ func pruneExpiredIconCache() {
}
func PruneRouteIconCache(route route) {
iconCacheMu.Lock()
defer iconCacheMu.Unlock()
delete(iconCache, route.Key())
iconCache.Delete(route.Key())
}
func loadIconCache(key string) *FetchResult {
iconCacheMu.RLock()
defer iconCacheMu.RUnlock()
icon, ok := iconCache[key]
iconMu.RLock()
defer iconMu.RUnlock()
icon, ok := iconCache.Load(key)
if ok && len(icon.Icon) > 0 {
logging.Debug().
Str("key", key).
Msg("icon found in cache")
icon.LastAccess.Store(time.Now())
icon.LastAccess.Store(utils.TimeNow())
return &FetchResult{Icon: icon.Icon, contentType: icon.ContentType}
}
return nil
@@ -123,15 +98,17 @@ func loadIconCache(key string) *FetchResult {
func storeIconCache(key string, result *FetchResult) {
icon := result.Icon
if len(icon) > maxCacheSize {
if len(icon) > maxIconSize {
logging.Debug().Int("size", len(icon)).Msg("icon cache size exceeds max cache size")
return
}
iconCacheMu.Lock()
defer iconCacheMu.Unlock()
iconMu.Lock()
defer iconMu.Unlock()
entry := &cacheEntry{Icon: icon, ContentType: result.contentType}
entry.LastAccess.Store(time.Now())
iconCache[key] = entry
iconCache.Store(key, entry)
logging.Debug().Str("key", key).Int("size", len(icon)).Msg("stored icon cache")
}
@@ -140,12 +117,20 @@ func (e *cacheEntry) IsExpired() bool {
}
func (e *cacheEntry) UnmarshalJSON(data []byte) error {
var tmp struct {
Icon []byte `json:"icon"`
ContentType string `json:"content_type,omitempty"`
LastAccess time.Time `json:"last_access"`
}
// check if data is json
if json.Valid(data) {
err := json.Unmarshal(data, &e)
err := json.Unmarshal(data, &tmp)
// return only if unmarshal is successful
// otherwise fallback to base64
if err == nil {
e.Icon = tmp.Icon
e.ContentType = tmp.ContentType
e.LastAccess.Store(tmp.LastAccess)
return nil
}
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/lithammer/fuzzysearch/fuzzy"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/task"
"github.com/yusing/go-proxy/internal/utils"
)
@@ -68,6 +69,10 @@ func InitIconListCache() {
Int("display_names", len(iconsCache.DisplayNames)).
Msg("icon list cache loaded")
}
task.OnProgramExit("save_icon_list_cache", func() {
utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
})
}
func ListAvailableIcons() (*Cache, error) {

View File

@@ -131,7 +131,7 @@ func NewWatcher(parent task.Parent, r routes.Route) (*Watcher, error) {
case routes.StreamRoute:
w.stream = r
default:
return nil, gperr.New("unexpected route type")
return nil, gperr.Errorf("unexpected route type: %T", r)
}
ctx, cancel := context.WithTimeout(parent.Context(), reqTimeout)

View File

@@ -1,297 +0,0 @@
package internal
import (
"encoding/json"
"io"
"net/http"
"sync"
"time"
"github.com/lithammer/fuzzysearch/fuzzy"
"github.com/yusing/go-proxy/internal/common"
"github.com/yusing/go-proxy/internal/logging"
"github.com/yusing/go-proxy/internal/utils"
)
type GitHubContents struct { //! keep this, may reuse in future
Type string `json:"type"`
Path string `json:"path"`
Name string `json:"name"`
Sha string `json:"sha"`
Size int `json:"size"`
}
type (
IconsMap map[string]map[string]struct{}
IconList []string
Cache struct {
WalkxCode, Selfhst IconsMap
DisplayNames ReferenceDisplayNameMap
IconList IconList // combined into a single list
}
ReferenceDisplayNameMap map[string]string
)
func (icons *Cache) needUpdate() bool {
return len(icons.WalkxCode) == 0 || len(icons.Selfhst) == 0 || len(icons.IconList) == 0 || len(icons.DisplayNames) == 0
}
const updateInterval = 2 * time.Hour
var (
iconsCache *Cache
iconsCahceMu sync.RWMutex
lastUpdate time.Time
)
const (
walkxcodeIcons = "https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons@master/tree.json"
selfhstIcons = "https://cdn.selfh.st/directory/icons.json"
)
func InitIconListCache() {
iconsCahceMu.Lock()
defer iconsCahceMu.Unlock()
iconsCache = &Cache{
WalkxCode: make(IconsMap),
Selfhst: make(IconsMap),
DisplayNames: make(ReferenceDisplayNameMap),
IconList: []string{},
}
err := utils.LoadJSONIfExist(common.IconListCachePath, iconsCache)
if err != nil {
logging.Error().Err(err).Msg("failed to load icon list cache config")
} else if len(iconsCache.IconList) > 0 {
logging.Info().
Int("icons", len(iconsCache.IconList)).
Int("display_names", len(iconsCache.DisplayNames)).
Msg("icon list cache loaded")
}
}
func ListAvailableIcons() (*Cache, error) {
iconsCahceMu.RLock()
if time.Since(lastUpdate) < updateInterval {
if !iconsCache.needUpdate() {
iconsCahceMu.RUnlock()
return iconsCache, nil
}
}
iconsCahceMu.RUnlock()
iconsCahceMu.Lock()
defer iconsCahceMu.Unlock()
logging.Info().Msg("updating icon data")
icons, err := fetchIconData()
if err != nil {
return nil, err
}
logging.Info().
Int("icons", len(icons.IconList)).
Int("display_names", len(icons.DisplayNames)).
Msg("icons list updated")
iconsCache = icons
lastUpdate = time.Now()
err = utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
if err != nil {
logging.Warn().Err(err).Msg("failed to save icon list cache")
}
return icons, nil
}
func SearchIcons(keyword string, limit int) ([]string, error) {
icons, err := ListAvailableIcons()
if err != nil {
return nil, err
}
if keyword == "" {
return utils.Slice(icons.IconList, limit), nil
}
return utils.Slice(fuzzy.Find(keyword, icons.IconList), limit), nil
}
func HasWalkxCodeIcon(name string, filetype string) bool {
icons, err := ListAvailableIcons()
if err != nil {
logging.Error().Err(err).Msg("failed to list icons")
return false
}
if _, ok := icons.WalkxCode[filetype]; !ok {
return false
}
_, ok := icons.WalkxCode[filetype][name+"."+filetype]
return ok
}
func HasSelfhstIcon(name string, filetype string) bool {
icons, err := ListAvailableIcons()
if err != nil {
logging.Error().Err(err).Msg("failed to list icons")
return false
}
if _, ok := icons.Selfhst[filetype]; !ok {
return false
}
_, ok := icons.Selfhst[filetype][name+"."+filetype]
return ok
}
func GetDisplayName(reference string) (string, bool) {
icons, err := ListAvailableIcons()
if err != nil {
logging.Error().Err(err).Msg("failed to list icons")
return "", false
}
displayName, ok := icons.DisplayNames[reference]
return displayName, ok
}
func fetchIconData() (*Cache, error) {
walkxCodeIconMap, walkxCodeIconList, err := fetchWalkxCodeIcons()
if err != nil {
return nil, err
}
n := 0
for _, items := range walkxCodeIconMap {
n += len(items)
}
selfhstIconMap, selfhstIconList, referenceToNames, err := fetchSelfhstIcons()
if err != nil {
return nil, err
}
return &Cache{
WalkxCode: walkxCodeIconMap,
Selfhst: selfhstIconMap,
DisplayNames: referenceToNames,
IconList: append(walkxCodeIconList, selfhstIconList...),
}, nil
}
/*
format:
{
"png": [
"*.png",
],
"svg": [
"*.svg",
],
"webp": [
"*.webp",
]
}
*/
func fetchWalkxCodeIcons() (IconsMap, IconList, error) {
req, err := http.NewRequest(http.MethodGet, walkxcodeIcons, nil)
if err != nil {
return nil, nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
data := make(map[string][]string)
err = json.Unmarshal(body, &data)
if err != nil {
return nil, nil, err
}
icons := make(IconsMap, len(data))
iconList := make(IconList, 0, 2000)
for fileType, files := range data {
icons[fileType] = make(map[string]struct{}, len(files))
for _, icon := range files {
icons[fileType][icon] = struct{}{}
iconList = append(iconList, "@walkxcode/"+icon)
}
}
return icons, iconList, nil
}
/*
format:
{
"Name": "2FAuth",
"Reference": "2fauth",
"SVG": "Yes",
"PNG": "Yes",
"WebP": "Yes",
"Light": "Yes",
"Category": "Self-Hosted",
"CreatedAt": "2024-08-16 00:27:23+00:00"
}
*/
func fetchSelfhstIcons() (IconsMap, IconList, ReferenceDisplayNameMap, error) {
type SelfhStIcon struct {
Name string `json:"Name"`
Reference string `json:"Reference"`
SVG string `json:"SVG"`
PNG string `json:"PNG"`
WebP string `json:"WebP"`
// Light string
// Category string
// CreatedAt string
}
req, err := http.NewRequest(http.MethodGet, selfhstIcons, nil)
if err != nil {
return nil, nil, nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, nil, nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, nil, err
}
data := make([]SelfhStIcon, 0, 2000)
err = json.Unmarshal(body, &data)
if err != nil {
return nil, nil, nil, err
}
iconList := make(IconList, 0, len(data)*3)
icons := make(IconsMap)
icons["svg"] = make(map[string]struct{}, len(data))
icons["png"] = make(map[string]struct{}, len(data))
icons["webp"] = make(map[string]struct{}, len(data))
referenceToNames := make(ReferenceDisplayNameMap, len(data))
for _, item := range data {
if item.SVG == "Yes" {
icons["svg"][item.Reference+".svg"] = struct{}{}
iconList = append(iconList, "@selfhst/"+item.Reference+".svg")
}
if item.PNG == "Yes" {
icons["png"][item.Reference+".png"] = struct{}{}
iconList = append(iconList, "@selfhst/"+item.Reference+".png")
}
if item.WebP == "Yes" {
icons["webp"][item.Reference+".webp"] = struct{}{}
iconList = append(iconList, "@selfhst/"+item.Reference+".webp")
}
referenceToNames[item.Reference] = item.Name
}
return icons, iconList, referenceToNames, nil
}

View File

@@ -4,6 +4,7 @@ import (
"bufio"
"io"
"net/http"
"os"
"sync"
"time"
@@ -53,6 +54,7 @@ type (
)
const (
StdoutbufSize = 64
MinBufferSize = 4 * kilobyte
MaxBufferSize = 1 * megabyte
)
@@ -79,6 +81,22 @@ func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) *AccessLo
return NewAccessLoggerWithIO(parent, NewMockFile(), cfg)
}
func unwrap[Writer any](w io.Writer) []Writer {
var result []Writer
if unwrapped, ok := w.(MultiWriterInterface); ok {
for _, w := range unwrapped.Unwrap() {
if unwrapped, ok := w.(Writer); ok {
result = append(result, unwrapped)
}
}
return result
}
if unwrapped, ok := w.(Writer); ok {
return []Writer{unwrapped}
}
return nil
}
func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg AnyConfig) *AccessLogger {
cfg := anyCfg.ToConfig()
if cfg.BufferSize == 0 {
@@ -90,6 +108,10 @@ func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg Any
if cfg.BufferSize > MaxBufferSize {
cfg.BufferSize = MaxBufferSize
}
if _, ok := writer.(*os.File); ok {
cfg.BufferSize = StdoutbufSize
}
l := &AccessLogger{
task: parent.Subtask("accesslog."+writer.Name(), true),
cfg: cfg,
@@ -99,23 +121,8 @@ func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg Any
logger: logging.With().Str("file", writer.Name()).Logger(),
}
if unwrapped, ok := writer.(MultiWriterInterface); ok {
for _, w := range unwrapped.Unwrap() {
if sr, ok := w.(supportRotate); ok {
l.supportRotate = append(l.supportRotate, sr)
}
if closer, ok := w.(io.Closer); ok {
l.closer = append(l.closer, closer)
}
}
} else {
if sr, ok := writer.(supportRotate); ok {
l.supportRotate = append(l.supportRotate, sr)
}
if closer, ok := writer.(io.Closer); ok {
l.closer = append(l.closer, closer)
}
}
l.supportRotate = unwrap[supportRotate](writer)
l.closer = unwrap[io.Closer](writer)
if cfg.req != nil {
fmt := CommonFormatter{cfg: &cfg.req.Fields}

View File

@@ -133,11 +133,6 @@ func (lb *LoadBalancer) AddServer(srv Server) {
lb.rebalance()
lb.impl.OnAddServer(srv)
lb.l.Debug().
Str("action", "add").
Str("server", srv.Name()).
Msgf("%d servers available", lb.pool.Size())
}
func (lb *LoadBalancer) RemoveServer(srv Server) {

View File

@@ -17,8 +17,7 @@ import (
)
type cloudflareRealIP struct {
realIP realIP
Recursive bool
realIP realIP
}
const (
@@ -47,7 +46,7 @@ var CloudflareRealIP = NewMiddleware[cloudflareRealIP]()
func (cri *cloudflareRealIP) setup() {
cri.realIP.RealIPOpts = RealIPOpts{
Header: "CF-Connecting-IP",
Recursive: cri.Recursive,
Recursive: true,
}
}

View File

@@ -35,8 +35,8 @@ var allMiddlewares = map[string]*Middleware{
}
var (
ErrUnknownMiddleware = gperr.New("unknown middleware")
ErrDuplicatedMiddleware = gperr.New("duplicated middleware")
ErrUnknownMiddleware = gperr.New("unknown middleware")
ErrMiddlewareAlreadyExists = gperr.New("middleware with the same name already exists")
)
func Get(name string) (*Middleware, Error) {
@@ -69,7 +69,7 @@ func LoadComposeFiles() {
for name, m := range mws {
name = strutils.ToLowerNoSnake(name)
if _, ok := allMiddlewares[name]; ok {
errs.Add(ErrDuplicatedMiddleware.Subject(name))
errs.Add(ErrMiddlewareAlreadyExists.Subject(name))
continue
}
allMiddlewares[name] = m

View File

@@ -83,9 +83,6 @@ func (disp *Dispatcher) start() {
}
func (disp *Dispatcher) dispatch(msg *LogMessage) {
if true {
return
}
task := disp.task.Subtask("dispatcher")
defer task.Finish("notif dispatched")

View File

@@ -80,8 +80,11 @@ func (p *DockerProvider) loadRoutesImpl() (route.Routes, gperr.Error) {
errs.Add(err.Subject(container.ContainerName))
}
for k, v := range newEntries {
if routes.Contains(k) {
errs.Addf("duplicated alias %s", k)
if conflict, ok := routes[k]; ok {
errs.Add(gperr.Multiline().
Addf("route with alias %s already exists", k).
Addf("container %s", container.ContainerName).
Addf("conflicting container %s", conflict.Container.ContainerName))
} else {
routes[k] = v
}

View File

@@ -12,19 +12,13 @@ import (
type EventHandler struct {
provider *Provider
errs *gperr.Builder
added *gperr.Builder
removed *gperr.Builder
updated *gperr.Builder
errs *gperr.Builder
}
func (p *Provider) newEventHandler() *EventHandler {
return &EventHandler{
provider: p,
errs: gperr.NewBuilder("event errors"),
added: gperr.NewBuilder("added"),
removed: gperr.NewBuilder("removed"),
updated: gperr.NewBuilder("updated"),
}
}
@@ -88,15 +82,12 @@ func (handler *EventHandler) Add(parent task.Parent, route *route.Route) {
err := handler.provider.startRoute(parent, route)
if err != nil {
handler.errs.Add(err.Subject("add"))
} else {
handler.added.Adds(route.Alias)
}
}
func (handler *EventHandler) Remove(route *route.Route) {
route.Finish("route removed")
delete(handler.provider.routes, route.Alias)
handler.removed.Adds(route.Alias)
}
func (handler *EventHandler) Update(parent task.Parent, oldRoute *route.Route, newRoute *route.Route) {
@@ -104,18 +95,11 @@ func (handler *EventHandler) Update(parent task.Parent, oldRoute *route.Route, n
err := handler.provider.startRoute(parent, newRoute)
if err != nil {
handler.errs.Add(err.Subject("update"))
} else {
handler.updated.Adds(newRoute.Alias)
}
}
func (handler *EventHandler) Log() {
results := gperr.NewBuilder("event occurred")
results.AddFrom(handler.added, false)
results.AddFrom(handler.removed, false)
results.AddFrom(handler.updated, false)
results.AddFrom(handler.errs, false)
if result := results.String(); result != "" {
handler.provider.Logger().Info().Msg(result)
if err := handler.errs.Error(); err != nil {
handler.provider.Logger().Info().Msg(err.Error())
}
}

View File

@@ -22,19 +22,19 @@ import (
"github.com/yusing/go-proxy/internal/watcher/health/monitor"
)
type (
ReveseProxyRoute struct {
*Route
type ReveseProxyRoute struct {
*Route
HealthMon health.HealthMonitor `json:"health,omitempty"`
HealthMon health.HealthMonitor `json:"health,omitempty"`
loadBalancer *loadbalancer.LoadBalancer
handler http.Handler
rp *reverseproxy.ReverseProxy
loadBalancer *loadbalancer.LoadBalancer
handler http.Handler
rp *reverseproxy.ReverseProxy
task *task.Task
}
)
task *task.Task
}
var _ routes.ReverseProxyRoute = (*ReveseProxyRoute)(nil)
// var globalMux = http.NewServeMux() // TODO: support regex subdomain matching.
@@ -88,6 +88,11 @@ func NewReverseProxyRoute(base *Route) (*ReveseProxyRoute, gperr.Error) {
return r, nil
}
// ReverseProxy implements routes.ReverseProxyRoute.
func (r *ReveseProxyRoute) ReverseProxy() *reverseproxy.ReverseProxy {
return r.rp
}
// Start implements task.TaskStarter.
func (r *ReveseProxyRoute) Start(parent task.Parent) gperr.Error {
if existing, ok := routes.HTTP.Get(r.Key()); ok && !r.UseLoadBalance() {

View File

@@ -8,7 +8,6 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/yusing/go-proxy/agent/pkg/agent"
"github.com/yusing/go-proxy/internal"
"github.com/yusing/go-proxy/internal/docker"
"github.com/yusing/go-proxy/internal/gperr"
"github.com/yusing/go-proxy/internal/homepage"
@@ -484,7 +483,7 @@ func (r *Route) FinalizeHomepageConfig() {
} else {
key = r.Alias
}
displayName, ok := internal.GetDisplayName(key)
displayName, ok := homepage.GetDisplayName(key)
if ok {
hp.Name = displayName
} else {

View File

@@ -60,7 +60,7 @@ func Ver(major, minor, patch int) Version {
}
func (v Version) String() string {
return fmt.Sprintf("%d.%d.%d", v.Generation, v.Major, v.Minor)
return fmt.Sprintf("v%d.%d.%d", v.Generation, v.Major, v.Minor)
}
func (v Version) MarshalText() ([]byte, error) {
@@ -101,17 +101,17 @@ func ParseVersion(v string) (ver Version) {
if len(parts) != 3 {
return
}
major, err := strconv.Atoi(parts[0])
gen, err := strconv.Atoi(parts[0])
if err != nil {
return
}
minor, err := strconv.Atoi(parts[1])
major, err := strconv.Atoi(parts[1])
if err != nil {
return
}
patch, err := strconv.Atoi(parts[2])
minor, err := strconv.Atoi(parts[2])
if err != nil {
return
}
return Ver(major, minor, patch)
return Ver(gen, major, minor)
}