Compare commits

..

27 Commits
v0.24.3 ... dev

Author SHA1 Message Date
yusing
d61bd5ce51 chore: update go to 1.25.6 and dependencies 2026-01-16 18:34:39 +08:00
yusing
bad3e9a989 chore(README): remove zeabur badge 2026-01-16 14:21:37 +08:00
yusing
9adfd73121 fix(health): correct docker fallback url 2026-01-16 14:09:07 +08:00
yusing
4a652aaf55 fix(swagger): explicit set type names for IconFetchResult and IconMetaSearch 2026-01-16 12:26:14 +08:00
yusing
16c986978d chore(idlewatcher): remove junk comment 2026-01-16 11:35:40 +08:00
yusing
107b7c5f64 Merge branch 'main' into dev 2026-01-16 10:10:21 +08:00
yusing
818d75c8b7 Merge branch 'main' into dev 2026-01-04 12:43:18 +08:00
yusing
f1bc5de3ea Merge branch 'main' into dev 2026-01-04 12:28:32 +08:00
yusing
425ff0b25c Merge branch 'main' into dev 2026-01-02 22:12:11 +08:00
yusing
1f6614e337 refactor(config): correct logic in InitFromFile 2026-01-02 21:57:31 +08:00
yusing
9ba102a33d chore: update goutils 2026-01-02 21:56:55 +08:00
yusing
31c616246b Merge branch 'main' into dev 2026-01-02 15:49:20 +08:00
yusing
390859bd1f Merge branch 'main' into dev 2026-01-02 15:43:04 +08:00
yusing
243662c13b Merge branch 'main' into dev 2026-01-01 18:25:56 +08:00
yusing
588e9f5b18 Merge branch 'main' into dev 2025-12-30 22:01:48 +08:00
yusing
a3bf88cc9c chore(goutils): update subproject commit reference to 51a75d68 2025-12-30 22:00:28 +08:00
yusing
9b1af57859 Merge branch 'main' into dev 2025-12-30 21:52:24 +08:00
yusing
bb7471cc9c fix(tests/metrics): correct syntax error 2025-12-30 21:52:22 +08:00
yusing
a403b2b629 Merge branch 'main' into dev 2025-12-23 12:30:26 +08:00
yusing
54b9e7f236 Merge branch 'main' into dev 2025-12-22 17:15:02 +08:00
yusing
45b89cd452 fix(oidc): add trailing slash to OIDCAuthBasePath to work with paths like /authorize 2025-12-22 17:13:42 +08:00
yusing
72fea96c7b Merge branch 'main' into dev 2025-12-22 12:10:31 +08:00
yusing
aef646be6f Merge branch 'main' into dev 2025-12-22 10:45:44 +08:00
yusing
135a4ff6c7 Merge branch 'main' into dev 2025-12-20 19:31:12 +08:00
yusing
5f418b62c7 chore: upgrade dependencies 2025-12-17 17:37:58 +08:00
yusing
bd92c46375 refactor(http): enhance health check error logic by treating all 5xx as unhealthy 2025-12-17 12:24:04 +08:00
yusing
21a23dd147 fix(idlewatcher): directly serve the request on ready instead of redirecting 2025-12-17 11:48:22 +08:00
38 changed files with 533 additions and 1277 deletions

Submodule goutils updated: 92af8e0866...326c1f1eb3

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"math"
"net"
"sync/atomic"
"time"
"github.com/puzpuzpuz/xsync/v4"
@@ -74,7 +75,8 @@ type ipLog struct {
allowed bool
}
type ContextKey struct{}
// could be nil
var ActiveConfig atomic.Pointer[Config]
const cacheTTL = 1 * time.Minute
@@ -290,16 +292,16 @@ func (c *Config) IPAllowed(ip net.IP) bool {
}
ipAndStr := &maxmind.IPInfo{IP: ip, Str: ipStr}
if c.Deny.Match(ipAndStr) {
c.logAndNotify(ipAndStr, false)
c.cacheRecord(ipAndStr, false)
return false
}
if c.Allow.Match(ipAndStr) {
c.logAndNotify(ipAndStr, true)
c.cacheRecord(ipAndStr, true)
return true
}
if c.Deny.Match(ipAndStr) {
c.logAndNotify(ipAndStr, false)
c.cacheRecord(ipAndStr, false)
return false
}
c.logAndNotify(ipAndStr, c.defaultAllow)
c.cacheRecord(ipAndStr, c.defaultAllow)

View File

@@ -4191,6 +4191,12 @@
"RequestLoggerConfig": {
"type": "object",
"properties": {
"buffer_size": {
"description": "Deprecated: buffer size is adjusted dynamically",
"type": "integer",
"x-nullable": false,
"x-omitempty": false
},
"fields": {
"$ref": "#/definitions/accesslog.Fields",
"x-nullable": false,

View File

@@ -879,6 +879,9 @@ definitions:
type: object
RequestLoggerConfig:
properties:
buffer_size:
description: 'Deprecated: buffer size is adjusted dynamically'
type: integer
fields:
$ref: '#/definitions/accesslog.Fields'
filters:

View File

@@ -10,7 +10,6 @@ import (
"github.com/yusing/godoxy/internal/common"
config "github.com/yusing/godoxy/internal/config/types"
"github.com/yusing/godoxy/internal/notif"
"github.com/yusing/godoxy/internal/route/routes"
"github.com/yusing/godoxy/internal/watcher"
"github.com/yusing/godoxy/internal/watcher/events"
gperr "github.com/yusing/goutils/errs"
@@ -60,15 +59,6 @@ func Load() error {
cfgWatcher = watcher.NewConfigFileWatcher(common.ConfigFileName)
// disable pool logging temporary since we already have pretty logging
routes.HTTP.DisableLog(true)
routes.Stream.DisableLog(true)
defer func() {
routes.HTTP.DisableLog(false)
routes.Stream.DisableLog(false)
}()
initErr := state.InitFromFile(common.ConfigPath)
err := errors.Join(initErr, state.StartProviders())
if err != nil {

View File

@@ -28,6 +28,7 @@ import (
"github.com/yusing/godoxy/internal/maxmind"
"github.com/yusing/godoxy/internal/notif"
route "github.com/yusing/godoxy/internal/route/provider"
"github.com/yusing/godoxy/internal/route/routes"
"github.com/yusing/godoxy/internal/serialization"
"github.com/yusing/godoxy/internal/types"
gperr "github.com/yusing/goutils/errs"
@@ -73,6 +74,7 @@ func SetState(state config.State) {
cfg := state.Value()
config.ActiveState.Store(state)
acl.ActiveConfig.Store(cfg.ACL)
entrypoint.ActiveConfig.Store(&cfg.Entrypoint)
homepage.ActiveConfig.Store(&cfg.Homepage)
if autocertProvider := state.AutoCertProvider(); autocertProvider != nil {
@@ -117,7 +119,7 @@ func (state *state) Init(data []byte) error {
errs := g.Wait()
// these won't benefit from running on goroutines
errs.Add(state.initNotification())
errs.Add(state.initACL())
errs.Add(state.initAccessLogger())
errs.Add(state.initEntrypoint())
return errs.Error()
}
@@ -190,17 +192,12 @@ func (state *state) FlushTmpLog() {
state.tmpLogBuf.Reset()
}
// initACL initializes the ACL.
func (state *state) initACL() error {
// this one is connection level access logger, different from entrypoint access logger
func (state *state) initAccessLogger() error {
if !state.ACL.Valid() {
return nil
}
err := state.ACL.Start(state.task)
if err != nil {
return err
}
state.task.SetValue(acl.ContextKey{}, state.ACL)
return nil
return state.ACL.Start(state.task)
}
func (state *state) initEntrypoint() error {
@@ -322,6 +319,15 @@ func (state *state) storeProvider(p types.RouteProvider) {
}
func (state *state) loadRouteProviders() error {
// disable pool logging temporary since we will have pretty logging below
routes.HTTP.ToggleLog(false)
routes.Stream.ToggleLog(false)
defer func() {
routes.HTTP.ToggleLog(true)
routes.Stream.ToggleLog(true)
}()
providers := &state.Providers
errs := gperr.NewGroup("route provider errors")
results := gperr.NewGroup("loaded route providers")

View File

@@ -210,25 +210,23 @@ func setPrivateHostname(c *types.Container, helper containerHelper) {
return
}
if c.Network != "" {
v, hasNetwork := helper.NetworkSettings.Networks[c.Network]
if hasNetwork && v.IPAddress.IsValid() {
v, ok := helper.NetworkSettings.Networks[c.Network]
if ok && v.IPAddress.IsValid() {
c.PrivateHostname = v.IPAddress.String()
return
}
var hasComposeNetwork bool
// try {project_name}_{network_name}
if proj := DockerComposeProject(c); proj != "" {
newNetwork := fmt.Sprintf("%s_%s", proj, c.Network)
v, hasComposeNetwork = helper.NetworkSettings.Networks[newNetwork]
if hasComposeNetwork && v.IPAddress.IsValid() {
c.Network = newNetwork // update network to the new one
c.PrivateHostname = v.IPAddress.String()
return
oldNetwork, newNetwork := c.Network, fmt.Sprintf("%s_%s", proj, c.Network)
if newNetwork != oldNetwork {
v, ok = helper.NetworkSettings.Networks[newNetwork]
if ok && v.IPAddress.IsValid() {
c.Network = newNetwork // update network to the new one
c.PrivateHostname = v.IPAddress.String()
return
}
}
}
if hasNetwork || hasComposeNetwork { // network is found, but no IP assigned yet
return
}
nearest := gperr.DoYouMeanField(c.Network, helper.NetworkSettings.Networks)
addError(c, fmt.Errorf("network %q not found, %w", c.Network, nearest))
return

View File

@@ -100,7 +100,7 @@ func (ep *Entrypoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
rec := accesslog.GetResponseRecorder(w)
w = rec
defer func() {
ep.accessLogger.LogRequest(r, rec.Response())
ep.accessLogger.Log(r, rec.Response())
accesslog.PutResponseRecorder(rec)
}()
}

View File

@@ -1,6 +1,6 @@
# Homepage
The homepage package provides the GoDoxy WebUI dashboard with support for categories, favorites, widgets, dynamic item configuration, and icon management.
The homepage package provides the GoDoxy WebUI dashboard with support for categories, favorites, widgets, and dynamic item configuration.
## Overview
@@ -194,6 +194,18 @@ Widgets can display various types of information:
- **Links**: Quick access links
- **Custom**: Provider-specific data
## Icon Handling
Icons are handled via `IconURL` type:
```go
type IconURL struct {
// Icon URL with various sources
}
// Automatic favicon fetching from item URL
```
## Categories
### Default Categories

View File

@@ -1,491 +0,0 @@
# Icons Package
Icon URL parsing, fetching, and listing for the homepage dashboard.
## Overview
The icons package manages icon resources from multiple sources with support for light/dark variants and multiple image formats. It provides a unified API for parsing icon URLs, checking icon availability, fetching icon data, and searching available icons from CDN repositories.
### Purpose
- Parse and validate icon URLs from various sources
- Fetch icon data with caching and fallback strategies
- Maintain a searchable index of available icons from walkxcode and selfh.st CDNs
- Support light/dark theme variants and multiple image formats (SVG, PNG, WebP)
### Primary Consumers
- `internal/homepage/` - Homepage route management and icon assignment
- `internal/api/` - Icon search and listing API endpoints
- `internal/route/` - Route icon resolution for proxy targets
### Non-goals
- Icon generation or modification (only fetching)
- Authentication for remote icon sources (public CDNs only)
- Icon validation beyond format checking
### Stability
This package exposes a stable public API. Internal implementations (caching strategies, fetch logic) may change without notice.
## Concepts and Terminology
| Term | Definition |
| ------------ | ------------------------------------------------------------------------------------- |
| **Source** | The origin type of an icon (absolute URL, relative path, walkxcode CDN, selfh.st CDN) |
| **Variant** | Theme variant: none, light, or dark |
| **Key** | Unique identifier combining source and reference (e.g., `@walkxcode/nginx`) |
| **Meta** | Metadata describing available formats and variants for an icon |
| **Provider** | Interface for checking icon existence without fetching data |
## Public API
### Exported Types
#### Source
Source identifies the origin of an icon. Use the constants defined below.
```go
type Source string
const (
// SourceAbsolute is a full URL (http:// or https://)
SourceAbsolute Source = "https://"
// SourceRelative is a path relative to the target service (@target or leading /)
SourceRelative Source = "@target"
// SourceWalkXCode is the walkxcode dashboard-icons CDN
SourceWalkXCode Source = "@walkxcode"
// SourceSelfhSt is the selfh.st icons CDN
SourceSelfhSt Source = "@selfhst"
)
```
#### Variant
Variant indicates the theme preference for icons that support light/dark modes.
```go
type Variant string
const (
VariantNone Variant = "" // Default, no variant suffix
VariantLight Variant = "light" // Light theme variant (-light suffix)
VariantDark Variant = "dark" // Dark theme variant (-dark suffix)
)
```
#### URL
URL represents a parsed icon URL with its source and metadata.
```go
type URL struct {
// Source identifies the icon origin
Source `json:"source"`
// FullURL contains the resolved URL for absolute/relative sources
FullURL *string `json:"value,omitempty"`
// Extra contains metadata for CDN sources (walkxcode/selfhst)
Extra *Extra `json:"extra,omitempty"`
}
```
**URL Methods:**
- `Parse(v string) error` - Parses an icon URL string (implements `strutils.Parser`)
- `URL() string` - Returns the absolute URL for fetching
- `HasIcon() bool` - Checks if the icon exists (requires Provider to be set)
- `WithVariant(variant Variant) *URL` - Returns a new URL with the specified variant
- `String() string` - Returns the original URL representation
- `MarshalText() ([]byte, error)` - Serializes to text (implements `encoding.TextMarshaler`)
- `UnmarshalText(data []byte) error` - Deserializes from text (implements `encoding.TextUnmarshaler`)
#### Extra
Extra contains metadata for icons from CDN sources.
```go
type Extra struct {
// Key is the unique icon key
Key Key `json:"key"`
// Ref is the icon reference name (without variant suffix)
Ref string `json:"ref"`
// FileType is the image format: "svg", "png", or "webp"
FileType string `json:"file_type"`
// IsLight indicates if this is a light variant
IsLight bool `json:"is_light"`
// IsDark indicates if this is a dark variant
IsDark bool `json:"is_dark"`
}
```
#### Key
Key is a unique identifier for an icon from a specific source.
```go
type Key string
// NewKey creates a key from source and reference
func NewKey(source Source, reference string) Key
// SourceRef extracts the source and reference from a key
func (k Key) SourceRef() (Source, string)
```
#### Meta
Meta stores availability metadata for an icon.
```go
type Meta struct {
// Available formats
SVG bool `json:"SVG"` // SVG format available
PNG bool `json:"PNG"` // PNG format available
WebP bool `json:"WebP"` // WebP format available
// Available variants
Light bool `json:"Light"` // Light variant available
Dark bool `json:"Dark"` // Dark variant available
// DisplayName is the human-readable name (selfh.st only)
DisplayName string `json:"-"`
// Tag is the category tag (selfh.st only)
Tag string `json:"-"`
}
// Filenames returns all available filename variants for this icon
func (icon *Meta) Filenames(ref string) []string
```
### Exported Functions
```go
// NewURL creates a URL for a CDN source with the given reference and format
func NewURL(source Source, refOrName, format string) *URL
// ErrInvalidIconURL is returned when icon URL parsing fails
var ErrInvalidIconURL = gperr.New("invalid icon url")
```
### Provider Interface
```go
type Provider interface {
// HasIcon returns true if the icon exists in the provider's catalog
HasIcon(u *URL) bool
}
// SetProvider sets the global icon provider for existence checks
func SetProvider(p Provider)
```
The provider pattern allows the icons package to check icon existence without fetching data. The `list` subpackage registers a provider that checks against the cached icon list.
## Architecture
### Core Components
```mermaid
graph TD
subgraph icons/
URL["URL"] --> Parser[URL Parser]
URL --> VariantHandler[Variant Handler]
Key --> Provider
end
subgraph fetch/
FetchFavIconFromURL --> FetchIconAbsolute
FetchFavIconFromURL --> FindIcon
FindIcon --> fetchKnownIcon
FindIcon --> findIconSlow
fetchKnownIcon --> FetchIconAbsolute
end
subgraph list/
InitCache --> updateIcons
updateIcons --> UpdateWalkxCodeIcons
updateIcons --> UpdateSelfhstIcons
SearchIcons --> fuzzyRank[Fuzzy Rank Match]
HasIcon --> ListAvailableIcons
end
style URL fill:#22553F,color:#fff
style list fill:#22553F,color:#fff
style fetch fill:#22553F,color:#fff
```
### Component Interactions
1. **URL Parsing** (`url.go`): Parses icon URL strings and validates format
2. **Icon Existence** (`provider.go`): Delegates to registered Provider
3. **Icon Fetching** (`fetch/fetch.go`): Fetches icon data with caching
4. **Icon Listing** (`list/list_icons.go`): Maintains cached index of available icons
### Data Flow
```mermaid
sequenceDiagram
participant Client
participant URLParser
participant Provider
participant FetchCache
participant ExternalCDN
Client->>URLParser: Parse("@walkxcode/nginx.svg")
URLParser->>Provider: HasIcon(icon)
Provider->>FetchCache: Check cached list
FetchCache-->>Provider: exists
Provider-->>URLParser: true
URLParser-->>Client: URL object
Client->>FetchCache: FetchFavIconFromURL(url)
FetchCache->>ExternalCDN: GET https://...
ExternalCDN-->>FetchCache: icon data
FetchCache-->>Client: Result{Icon: [...], StatusCode: 200}
```
## Subpackages
### fetch/
Icon fetching implementation with caching and fallback strategies.
```go
type Result struct {
Icon []byte // Raw icon image data
StatusCode int // HTTP status code from fetch
}
// FetchFavIconFromURL fetches an icon from a parsed URL
func FetchFavIconFromURL(ctx context.Context, iconURL *URL) (Result, error)
// FindIcon finds an icon for a route with variant support
func FindIcon(ctx context.Context, r route, uri string, variant Variant) (Result, error)
```
**Key behaviors:**
- `FetchIconAbsolute` is cached with 200 entries and 4-hour TTL
- `findIconSlow` has infinite retries with 15-second backoff
- HTML parsing fallback extracts `<link rel=icon>` from target pages
### list/
Icon catalog management with search and caching.
```go
type IconMap map[Key]*Meta
type IconMetaSearch struct {
*Meta
Source Source `json:"Source"`
Ref string `json:"Ref"`
rank int
}
// InitCache loads icon metadata from cache or remote sources
func InitCache()
// ListAvailableIcons returns the current icon catalog
func ListAvailableIcons() IconMap
// SearchIcons performs fuzzy search on icon names
func SearchIcons(keyword string, limit int) []*IconMetaSearch
// HasIcon checks if an icon exists in the catalog
func HasIcon(icon *URL) bool
```
**Key behaviors:**
- Updates from walkxcode and selfh.st CDNs every 2 hours
- Persists cache to disk for fast startup
- Fuzzy search uses Levenshtein distance ranking
## Configuration
### Cache Location
Icons cache is stored at the path specified by `common.IconListCachePath`.
### Environment Variables
No direct environment variable configuration. Cache is managed internally.
### Reloading
Icon cache updates automatically every 2 hours in the background. Manual refresh requires program restart.
## Observability
### Logs
- `failed to load icons` - Cache load failure at startup
- `icons loaded` - Successful cache load with entry count
- `updating icon data` - Background update started
- `icons list updated` - Successful cache refresh with entry count
- `failed to save icons` - Cache persistence failure
### Metrics
No metrics exposed directly. Status codes in `Result` can be monitored via HTTP handlers.
### Tracing
Standard `context.Context` propagation is used throughout. Fetch operations respect context cancellation and deadlines.
## Security Considerations
- **Input Validation**: Icon URLs are strictly validated for format and source
- **SSRF Protection**: Only absolute URLs passed directly; no arbitrary URL construction
- **Content-Type**: Detected from response headers or inferred from SVG magic bytes
- **Size Limits**: Cache limited to 200 entries; no explicit size limit on icon data
- **Timeouts**: 3-second timeout on favicon fetches, 5-second timeout on list updates
## Performance Characteristics
- **Parsing**: O(1) string parsing with early validation
- **Caching**: LRU-style cache with TTL for fetched icons
- **Background Updates**: Non-blocking updates every 2 hours
- **Search**: O(n) fuzzy match with early exit at rank > 3
- **Memory**: Icon list typically contains ~2000 entries
## Failure Modes and Recovery
| Failure | Behavior | Recovery |
| ---------------------- | ---------------------------------------- | -------------------------------- |
| CDN fetch timeout | Return cached data or fail | Automatic retry with backoff |
| Cache load failure | Attempt legacy format, then remote fetch | Manual cache reset if persistent |
| Icon not found in list | Return error from Parse | User must select valid icon |
| HTML parse failure | Return "icon element not found" | Manual icon selection |
## Usage Examples
### Basic: Parse and Generate URL
```go
package main
import (
"fmt"
"github.com/yusing/godoxy/internal/homepage/icons"
)
func main() {
// Parse a CDN icon URL
url := &icons.URL{}
err := url.Parse("@walkxcode/nginx.svg")
if err != nil {
panic(err)
}
// Get the actual fetchable URL
fmt.Println(url.URL())
// Output: https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/svg/nginx.svg
// Get string representation
fmt.Println(url.String())
// Output: @walkxcode/nginx.svg
// Create with dark variant
darkUrl := url.WithVariant(icons.VariantDark)
fmt.Println(darkUrl.URL())
// Output: https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/svg/nginx-dark.svg
}
```
### Advanced: Fetch Icon Data
```go
package main
import (
"context"
"fmt"
"net/http"
"github.com/yusing/godoxy/internal/homepage/icons/fetch"
)
func main() {
// Initialize the icon list cache first
iconlist.InitCache()
// Parse icon URL
url := &icons.URL{}
if err := url.Parse("@walkxcode/nginx.svg"); err != nil {
panic(err)
}
// Fetch icon data
ctx := context.Background()
result, err := fetch.FetchFavIconFromURL(ctx, url)
if err != nil {
fmt.Printf("Fetch failed: %v\n", err)
return
}
if result.StatusCode != http.StatusOK {
fmt.Printf("HTTP %d\n", result.StatusCode)
return
}
fmt.Printf("Fetched %d bytes, Content-Type: %s\n",
len(result.Icon), result.ContentType())
}
```
### Integration: Search Available Icons
```go
package main
import (
"fmt"
"github.com/yusing/godoxy/internal/homepage/icons/list"
)
func main() {
// Initialize cache
list.InitCache()
// Search for icons matching a keyword
results := list.SearchIcons("nginx", 5)
for _, icon := range results {
source, ref := icon.Key.SourceRef()
fmt.Printf("[%s] %s - SVG:%v PNG:%v WebP:%v\n",
source, ref, icon.SVG, icon.PNG, icon.WebP)
}
}
```
## Testing Notes
- Unit tests in `url_test.go` validate parsing and serialization
- Test mode (`common.IsTest`) bypasses existence checks
- Mock HTTP in list tests via `MockHTTPGet()`
- Golden tests not used; test fixtures embedded in test cases
## Icon URL Formats
| Format | Example | Output URL |
| ------------- | ------------------------------ | --------------------------------------------------------------------- |
| Absolute | `https://example.com/icon.png` | `https://example.com/icon.png` |
| Relative | `@target/favicon.ico` | `/favicon.ico` |
| WalkXCode | `@walkxcode/nginx.svg` | `https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/svg/nginx.svg` |
| Selfh.st | `@selfhst/adguard-home.webp` | `https://cdn.jsdelivr.net/gh/selfhst/icons/webp/adguard-home.webp` |
| Light variant | `@walkxcode/nginx-light.png` | `.../nginx-light.png` |
| Dark variant | `@walkxcode/nginx-dark.svg` | `.../nginx-dark.svg` |

View File

@@ -1,13 +0,0 @@
# Types Package
Configuration types for the homepage package.
## Config
```go
type Config struct {
UseDefaultCategories bool `json:"use_default_categories"`
}
var ActiveConfig atomic.Pointer[Config]
```

View File

@@ -612,6 +612,10 @@ func (w *Watcher) watchUntilDestroy() (returnCause error) {
if ready {
// Container is now ready, notify waiting handlers
w.healthTicker.Stop()
select {
case w.readyNotifyCh <- struct{}{}:
default: // channel full, notification already pending
}
w.resetIdleTimer()
}
// If not ready yet, keep checking on next tick

View File

@@ -20,20 +20,25 @@ import (
)
type (
File interface {
io.WriteCloser
supportRotate
Name() string
AccessLogger interface {
Log(req *http.Request, res *http.Response)
LogError(req *http.Request, err error)
LogACL(info *maxmind.IPInfo, blocked bool)
Config() *Config
Flush()
Close() error
}
fileAccessLogger struct {
accessLogger struct {
task *task.Task
cfg *Config
writer BufferedWriter
file File
writeLock *sync.Mutex
closed bool
writer BufferedWriter
supportRotate SupportRotate
writeLock *sync.Mutex
closed bool
writeCount int64
bufSize int
@@ -43,7 +48,32 @@ type (
logger zerolog.Logger
RequestFormatter
ACLLogFormatter
ACLFormatter
}
Writer interface {
io.WriteCloser
ShouldBeBuffered() bool
Name() string // file name or path
}
SupportRotate interface {
io.Writer
supportRotate
Name() string
}
AccessLogRotater interface {
Rotate(result *RotateResult) (rotated bool, err error)
}
RequestFormatter interface {
// AppendRequestLog appends a log line to line with or without a trailing newline
AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte
}
ACLFormatter interface {
// AppendACLLog appends a log line to line with or without a trailing newline
AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte
}
)
@@ -66,87 +96,112 @@ const (
var bytesPool = synk.GetUnsizedBytesPool()
var sizedPool = synk.GetSizedBytesPool()
func NewFileAccessLogger(parent task.Parent, file File, anyCfg AnyConfig) AccessLogger {
func NewAccessLogger(parent task.Parent, cfg AnyConfig) (AccessLogger, error) {
writers, err := cfg.Writers()
if err != nil {
return nil, err
}
return NewMultiAccessLogger(parent, cfg, writers), nil
}
func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) AccessLogger {
return NewAccessLoggerWithIO(parent, NewMockFile(true), cfg)
}
func NewAccessLoggerWithIO(parent task.Parent, writer Writer, anyCfg AnyConfig) AccessLogger {
cfg := anyCfg.ToConfig()
if cfg.RotateInterval == 0 {
cfg.RotateInterval = defaultRotateInterval
}
name := file.Name()
l := &fileAccessLogger{
task: parent.Subtask("accesslog."+name, true),
l := &accessLogger{
task: parent.Subtask("accesslog."+writer.Name(), true),
cfg: cfg,
bufSize: InitialBufferSize,
errRateLimiter: rate.NewLimiter(rate.Every(errRateLimit), errBurst),
logger: log.With().Str("file", name).Logger(),
logger: log.With().Str("file", writer.Name()).Logger(),
}
l.writeLock, _ = writerLocks.LoadOrStore(name, &sync.Mutex{})
l.writeLock, _ = writerLocks.LoadOrStore(writer.Name(), &sync.Mutex{})
l.writer = ioutils.NewBufferedWriter(file, InitialBufferSize)
l.file = file
if writer.ShouldBeBuffered() {
l.writer = ioutils.NewBufferedWriter(writer, InitialBufferSize)
} else {
l.writer = NewUnbufferedWriter(writer)
}
if supportRotate, ok := writer.(SupportRotate); ok {
l.supportRotate = supportRotate
}
if cfg.req != nil {
fmt := CommonFormatter{cfg: &cfg.req.Fields}
switch cfg.req.Format {
case FormatCommon:
l.RequestFormatter = CommonFormatter{cfg: &cfg.req.Fields}
l.RequestFormatter = &fmt
case FormatCombined:
l.RequestFormatter = CombinedFormatter{CommonFormatter{cfg: &cfg.req.Fields}}
l.RequestFormatter = &CombinedFormatter{fmt}
case FormatJSON:
l.RequestFormatter = JSONFormatter{cfg: &cfg.req.Fields}
l.RequestFormatter = &JSONFormatter{fmt}
default: // should not happen, validation has done by validate tags
panic("invalid access log format")
}
} else {
l.ACLFormatter = ACLLogFormatter{}
}
go l.start()
return l
}
func (l *fileAccessLogger) Config() *Config {
func (l *accessLogger) Config() *Config {
return l.cfg
}
func (l *fileAccessLogger) LogRequest(req *http.Request, res *http.Response) {
if !l.cfg.ShouldLogRequest(req, res) {
func (l *accessLogger) shouldLog(req *http.Request, res *http.Response) bool {
if !l.cfg.req.Filters.StatusCodes.CheckKeep(req, res) ||
!l.cfg.req.Filters.Method.CheckKeep(req, res) ||
!l.cfg.req.Filters.Headers.CheckKeep(req, res) ||
!l.cfg.req.Filters.CIDR.CheckKeep(req, res) {
return false
}
return true
}
func (l *accessLogger) Log(req *http.Request, res *http.Response) {
if !l.shouldLog(req, res) {
return
}
line := bytesPool.GetBuffer()
defer bytesPool.PutBuffer(line)
l.AppendRequestLog(line, req, res)
// line is never empty
if line.Bytes()[line.Len()-1] != '\n' {
line.WriteByte('\n')
line := bytesPool.Get()
line = l.AppendRequestLog(line, req, res)
if line[len(line)-1] != '\n' {
line = append(line, '\n')
}
l.write(line.Bytes())
l.write(line)
bytesPool.Put(line)
}
var internalErrorResponse = &http.Response{
StatusCode: http.StatusInternalServerError,
Status: http.StatusText(http.StatusInternalServerError),
func (l *accessLogger) LogError(req *http.Request, err error) {
l.Log(req, &http.Response{StatusCode: http.StatusInternalServerError, Status: err.Error()})
}
func (l *fileAccessLogger) LogError(req *http.Request, err error) {
l.LogRequest(req, internalErrorResponse)
}
func (l *fileAccessLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
line := bytesPool.GetBuffer()
defer bytesPool.PutBuffer(line)
l.AppendACLLog(line, info, blocked)
// line is never empty
if line.Bytes()[line.Len()-1] != '\n' {
line.WriteByte('\n')
func (l *accessLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
line := bytesPool.Get()
line = l.AppendACLLog(line, info, blocked)
if line[len(line)-1] != '\n' {
line = append(line, '\n')
}
l.write(line.Bytes())
l.write(line)
bytesPool.Put(line)
}
func (l *fileAccessLogger) ShouldRotate() bool {
return l.cfg.Retention.IsValid()
func (l *accessLogger) ShouldRotate() bool {
return l.supportRotate != nil && l.cfg.Retention.IsValid()
}
func (l *fileAccessLogger) Rotate(result *RotateResult) (rotated bool, err error) {
func (l *accessLogger) Rotate(result *RotateResult) (rotated bool, err error) {
if !l.ShouldRotate() {
return false, nil
}
@@ -155,11 +210,11 @@ func (l *fileAccessLogger) Rotate(result *RotateResult) (rotated bool, err error
l.writeLock.Lock()
defer l.writeLock.Unlock()
rotated, err = rotateLogFile(l.file, l.cfg.Retention, result)
rotated, err = rotateLogFile(l.supportRotate, l.cfg.Retention, result)
return
}
func (l *fileAccessLogger) handleErr(err error) {
func (l *accessLogger) handleErr(err error) {
if l.errRateLimiter.Allow() {
gperr.LogError("failed to write access log", err, &l.logger)
} else {
@@ -168,7 +223,7 @@ func (l *fileAccessLogger) handleErr(err error) {
}
}
func (l *fileAccessLogger) start() {
func (l *accessLogger) start() {
defer func() {
l.Flush()
l.Close()
@@ -204,7 +259,7 @@ func (l *fileAccessLogger) start() {
}
}
func (l *fileAccessLogger) Close() error {
func (l *accessLogger) Close() error {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -215,7 +270,7 @@ func (l *fileAccessLogger) Close() error {
return l.writer.Close()
}
func (l *fileAccessLogger) Flush() {
func (l *accessLogger) Flush() {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -224,7 +279,7 @@ func (l *fileAccessLogger) Flush() {
l.writer.Flush()
}
func (l *fileAccessLogger) write(data []byte) {
func (l *accessLogger) write(data []byte) {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -239,7 +294,7 @@ func (l *fileAccessLogger) write(data []byte) {
atomic.AddInt64(&l.writeCount, int64(n))
}
func (l *fileAccessLogger) adjustBuffer() {
func (l *accessLogger) adjustBuffer() {
wps := int(atomic.SwapInt64(&l.writeCount, 0)) / int(bufferAdjustInterval.Seconds())
origBufSize := l.bufSize
newBufSize := origBufSize

View File

@@ -1,7 +1,6 @@
package accesslog_test
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
@@ -54,13 +53,13 @@ var (
)
func fmtLog(cfg *RequestLoggerConfig) (ts string, line string) {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
buf := make([]byte, 0, 1024)
t := time.Now()
logger := NewMockAccessLogger(testTask, cfg)
mockable.MockTimeNow(t)
logger.(RequestFormatter).AppendRequestLog(buf, req, resp)
return t.Format(LogTimeFormat), buf.String()
buf = logger.(RequestFormatter).AppendRequestLog(buf, req, resp)
return t.Format(LogTimeFormat), string(buf)
}
func TestAccessLoggerCommon(t *testing.T) {
@@ -142,6 +141,9 @@ func TestAccessLoggerJSON(t *testing.T) {
expect.Equal(t, entry.UserAgent, ua)
expect.Equal(t, len(entry.Headers), 0)
expect.Equal(t, len(entry.Cookies), 0)
if status >= 400 {
expect.Equal(t, entry.Error, http.StatusText(status))
}
}
func BenchmarkAccessLoggerJSON(b *testing.B) {
@@ -150,7 +152,7 @@ func BenchmarkAccessLoggerJSON(b *testing.B) {
logger := NewMockAccessLogger(testTask, config)
b.ResetTimer()
for b.Loop() {
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
}
@@ -160,6 +162,6 @@ func BenchmarkAccessLoggerCombined(b *testing.B) {
logger := NewMockAccessLogger(testTask, config)
b.ResetTimer()
for b.Loop() {
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
}

View File

@@ -13,9 +13,10 @@ type ReaderAtSeeker interface {
// BackScanner provides an interface to read a file backward line by line.
type BackScanner struct {
file ReaderAtSeeker
size int64
chunkBuf []byte
file ReaderAtSeeker
size int64
chunkSize int
chunkBuf []byte
offset int64
chunk []byte
@@ -26,25 +27,16 @@ type BackScanner struct {
// NewBackScanner creates a new Scanner to read the file backward.
// chunkSize determines the size of each read chunk from the end of the file.
func NewBackScanner(file ReaderAtSeeker, fileSize int64, chunkSize int) *BackScanner {
return newBackScanner(file, fileSize, sizedPool.GetSized(chunkSize))
return newBackScanner(file, fileSize, make([]byte, chunkSize))
}
func newBackScanner(file ReaderAtSeeker, fileSize int64, buf []byte) *BackScanner {
return &BackScanner{
file: file,
size: fileSize,
offset: fileSize,
chunkBuf: buf,
}
}
// Release releases the buffer back to the pool.
func (s *BackScanner) Release() {
sizedPool.Put(s.chunkBuf)
s.chunkBuf = nil
if s.chunk != nil {
sizedPool.Put(s.chunk)
s.chunk = nil
file: file,
size: fileSize,
offset: fileSize,
chunkSize: len(buf),
chunkBuf: buf,
}
}
@@ -72,14 +64,13 @@ func (s *BackScanner) Scan() bool {
// No more data to read; check remaining buffer
if len(s.chunk) > 0 {
s.line = s.chunk
sizedPool.Put(s.chunk)
s.chunk = nil
return true
}
return false
}
newOffset := max(0, s.offset-int64(len(s.chunkBuf)))
newOffset := max(0, s.offset-int64(s.chunkSize))
chunkSize := s.offset - newOffset
chunk := s.chunkBuf[:chunkSize]
@@ -94,19 +85,8 @@ func (s *BackScanner) Scan() bool {
}
// Prepend the chunk to the buffer
if s.chunk == nil { // first chunk
s.chunk = sizedPool.GetSized(2 * len(s.chunkBuf))
copy(s.chunk, chunk[:n])
s.chunk = s.chunk[:n]
} else {
neededSize := n + len(s.chunk)
newChunk := sizedPool.GetSized(max(neededSize, 2*len(s.chunkBuf)))
copy(newChunk, chunk[:n])
copy(newChunk[n:], s.chunk)
sizedPool.Put(s.chunk)
s.chunk = newChunk[:neededSize]
}
clone := append([]byte{}, chunk[:n]...)
s.chunk = append(clone, s.chunk...)
s.offset = newOffset
// Check for newline in the updated buffer
@@ -131,3 +111,12 @@ func (s *BackScanner) Bytes() []byte {
func (s *BackScanner) Err() error {
return s.err
}
func (s *BackScanner) Reset() error {
_, err := s.file.Seek(0, io.SeekStart)
if err != nil {
return err
}
*s = *newBackScanner(s.file, s.size, s.chunkBuf)
return nil
}

View File

@@ -1,17 +1,15 @@
package accesslog
import (
"bytes"
"fmt"
"math/rand/v2"
"net/http"
"net/http/httptest"
"os"
"strconv"
"strings"
"testing"
"github.com/spf13/afero"
expect "github.com/yusing/goutils/testing"
strutils "github.com/yusing/goutils/strings"
"github.com/yusing/goutils/task"
@@ -137,40 +135,88 @@ func TestBackScannerWithVaryingChunkSizes(t *testing.T) {
}
}
var logEntry = func() func() []byte {
func logEntry() []byte {
accesslog := NewMockAccessLogger(task.RootTask("test", false), &RequestLoggerConfig{
Format: FormatJSON,
})
contentTypes := []string{"application/json", "text/html", "text/plain", "application/xml", "application/x-www-form-urlencoded"}
userAgents := []string{"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0"}
methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS", "HEAD"}
paths := []string{"/", "/about", "/contact", "/login", "/logout", "/register", "/profile"}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
allocSize := rand.IntN(8192)
w.Header().Set("Content-Type", contentTypes[rand.IntN(len(contentTypes))])
w.Header().Set("Content-Length", strconv.Itoa(allocSize))
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("hello"))
}))
srv.URL = "http://localhost:8080"
return func() []byte {
// make a request to the server
req, _ := http.NewRequest(http.MethodGet, srv.URL, nil)
res := httptest.NewRecorder()
req.Header.Set("User-Agent", userAgents[rand.IntN(len(userAgents))])
req.Method = methods[rand.IntN(len(methods))]
req.URL.Path = paths[rand.IntN(len(paths))]
// server the request
srv.Config.Handler.ServeHTTP(res, req)
b := bytes.NewBuffer(make([]byte, 0, 1024))
accesslog.(RequestFormatter).AppendRequestLog(b, req, res.Result())
return b.Bytes()
defer srv.Close()
// make a request to the server
req, _ := http.NewRequest(http.MethodGet, srv.URL, nil)
res := httptest.NewRecorder()
// server the request
srv.Config.Handler.ServeHTTP(res, req)
b := accesslog.(RequestFormatter).AppendRequestLog(nil, req, res.Result())
if b[len(b)-1] != '\n' {
b = append(b, '\n')
}
}()
return b
}
func TestReset(t *testing.T) {
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(file.Name())
line := logEntry()
nLines := 1000
for range nLines {
_, err := file.Write(line)
if err != nil {
t.Fatalf("failed to write to temp file: %v", err)
}
}
linesRead := 0
stat, _ := file.Stat()
s := NewBackScanner(file, stat.Size(), defaultChunkSize)
for s.Scan() {
linesRead++
}
if err := s.Err(); err != nil {
t.Errorf("scanner error: %v", err)
}
expect.Equal(t, linesRead, nLines)
err = s.Reset()
if err != nil {
t.Errorf("failed to reset scanner: %v", err)
}
linesRead = 0
for s.Scan() {
linesRead++
}
if err := s.Err(); err != nil {
t.Errorf("scanner error: %v", err)
}
expect.Equal(t, linesRead, nLines)
}
// 100000 log entries.
func BenchmarkBackScanner(b *testing.B) {
mockFile := NewMockFile(false)
line := logEntry()
for range 100000 {
_, _ = mockFile.Write(line)
}
for i := range 14 {
chunkSize := (2 << i) * kilobyte
scanner := NewBackScanner(mockFile, mockFile.MustSize(), chunkSize)
name := strutils.FormatByteSize(chunkSize)
b.ResetTimer()
b.Run(name, func(b *testing.B) {
for b.Loop() {
_ = scanner.Reset()
for scanner.Scan() {
}
}
})
}
}
func BenchmarkBackScannerRealFile(b *testing.B) {
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
if err != nil {
@@ -178,58 +224,51 @@ func BenchmarkBackScannerRealFile(b *testing.B) {
}
defer os.Remove(file.Name())
buf := bytes.NewBuffer(nil)
for range 100000 {
buf.Write(logEntry())
for range 10000 {
_, err = file.Write(logEntry())
if err != nil {
b.Fatalf("failed to write to temp file: %v", err)
}
}
fSize := int64(buf.Len())
_, err = file.Write(buf.Bytes())
if err != nil {
b.Fatalf("failed to write to file: %v", err)
stat, _ := file.Stat()
scanner := NewBackScanner(file, stat.Size(), 256*kilobyte)
b.ResetTimer()
for scanner.Scan() {
}
// file position does not matter, Seek not needed
for i := range 12 {
chunkSize := (2 << i) * kilobyte
name := strutils.FormatByteSize(chunkSize)
b.ResetTimer()
b.Run(name, func(b *testing.B) {
for b.Loop() {
scanner := NewBackScanner(file, fSize, chunkSize)
for scanner.Scan() {
}
scanner.Release()
}
})
if err := scanner.Err(); err != nil {
b.Errorf("scanner error: %v", err)
}
}
/*
BenchmarkBackScannerRealFile
BenchmarkBackScannerRealFile/2_KiB
BenchmarkBackScannerRealFile/2_KiB-10 21 51796773 ns/op 619 B/op 1 allocs/op
BenchmarkBackScannerRealFile/4_KiB
BenchmarkBackScannerRealFile/4_KiB-10 36 32081281 ns/op 699 B/op 1 allocs/op
BenchmarkBackScannerRealFile/8_KiB
BenchmarkBackScannerRealFile/8_KiB-10 57 22155619 ns/op 847 B/op 1 allocs/op
BenchmarkBackScannerRealFile/16_KiB
BenchmarkBackScannerRealFile/16_KiB-10 62 21323125 ns/op 1449 B/op 1 allocs/op
BenchmarkBackScannerRealFile/32_KiB
BenchmarkBackScannerRealFile/32_KiB-10 63 17534883 ns/op 2729 B/op 1 allocs/op
BenchmarkBackScannerRealFile/64_KiB
BenchmarkBackScannerRealFile/64_KiB-10 73 17877029 ns/op 4617 B/op 1 allocs/op
BenchmarkBackScannerRealFile/128_KiB
BenchmarkBackScannerRealFile/128_KiB-10 75 17797267 ns/op 8866 B/op 1 allocs/op
BenchmarkBackScannerRealFile/256_KiB
BenchmarkBackScannerRealFile/256_KiB-10 67 16732108 ns/op 19691 B/op 1 allocs/op
BenchmarkBackScannerRealFile/512_KiB
BenchmarkBackScannerRealFile/512_KiB-10 70 17121683 ns/op 37577 B/op 1 allocs/op
BenchmarkBackScannerRealFile/1_MiB
BenchmarkBackScannerRealFile/1_MiB-10 51 19615791 ns/op 102930 B/op 1 allocs/op
BenchmarkBackScannerRealFile/2_MiB
BenchmarkBackScannerRealFile/2_MiB-10 26 41744928 ns/op 77595287 B/op 57 allocs/op
BenchmarkBackScannerRealFile/4_MiB
BenchmarkBackScannerRealFile/4_MiB-10 22 48081521 ns/op 79692224 B/op 49 allocs/op
BenchmarkBackScanner
BenchmarkBackScanner/2_KiB
BenchmarkBackScanner/2_KiB-20 52 23254071 ns/op 67596663 B/op 26420 allocs/op
BenchmarkBackScanner/4_KiB
BenchmarkBackScanner/4_KiB-20 55 20961059 ns/op 62529378 B/op 13211 allocs/op
BenchmarkBackScanner/8_KiB
BenchmarkBackScanner/8_KiB-20 64 18242460 ns/op 62951141 B/op 6608 allocs/op
BenchmarkBackScanner/16_KiB
BenchmarkBackScanner/16_KiB-20 52 20162076 ns/op 62940256 B/op 3306 allocs/op
BenchmarkBackScanner/32_KiB
BenchmarkBackScanner/32_KiB-20 54 19247968 ns/op 67553645 B/op 1656 allocs/op
BenchmarkBackScanner/64_KiB
BenchmarkBackScanner/64_KiB-20 60 20909046 ns/op 64053342 B/op 827 allocs/op
BenchmarkBackScanner/128_KiB
BenchmarkBackScanner/128_KiB-20 68 17759890 ns/op 62201945 B/op 414 allocs/op
BenchmarkBackScanner/256_KiB
BenchmarkBackScanner/256_KiB-20 52 19531877 ns/op 61030487 B/op 208 allocs/op
BenchmarkBackScanner/512_KiB
BenchmarkBackScanner/512_KiB-20 54 19124656 ns/op 61030485 B/op 208 allocs/op
BenchmarkBackScanner/1_MiB
BenchmarkBackScanner/1_MiB-20 67 17078936 ns/op 61030495 B/op 208 allocs/op
BenchmarkBackScanner/2_MiB
BenchmarkBackScanner/2_MiB-20 66 18467421 ns/op 61030492 B/op 208 allocs/op
BenchmarkBackScanner/4_MiB
BenchmarkBackScanner/4_MiB-20 68 17214573 ns/op 61030486 B/op 208 allocs/op
BenchmarkBackScanner/8_MiB
BenchmarkBackScanner/8_MiB-20 57 18235229 ns/op 61030492 B/op 208 allocs/op
BenchmarkBackScanner/16_MiB
BenchmarkBackScanner/16_MiB-20 57 19343441 ns/op 61030499 B/op 208 allocs/op
*/

View File

@@ -1,7 +1,6 @@
package accesslog
import (
"net/http"
"time"
"github.com/yusing/godoxy/internal/serialization"
@@ -10,15 +9,16 @@ import (
type (
ConfigBase struct {
B int `json:"buffer_size"` // Deprecated: buffer size is adjusted dynamically
Path string `json:"path"`
Stdout bool `json:"stdout"`
Retention *Retention `json:"retention" aliases:"keep"`
RotateInterval time.Duration `json:"rotate_interval,omitempty" swaggertype:"primitive,integer"`
} // @name AccessLoggerConfigBase
}
ACLLoggerConfig struct {
ConfigBase
LogAllowed bool `json:"log_allowed"`
} // @name ACLLoggerConfig
}
RequestLoggerConfig struct {
ConfigBase
Format Format `json:"format" validate:"oneof=common combined json"`
@@ -32,7 +32,7 @@ type (
}
AnyConfig interface {
ToConfig() *Config
Writers() ([]File, error)
Writers() ([]Writer, error)
}
Format string
@@ -66,17 +66,17 @@ func (cfg *ConfigBase) Validate() gperr.Error {
}
// Writers returns a list of writers for the config.
func (cfg *ConfigBase) Writers() ([]File, error) {
writers := make([]File, 0, 2)
func (cfg *ConfigBase) Writers() ([]Writer, error) {
writers := make([]Writer, 0, 2)
if cfg.Path != "" {
f, err := OpenFile(cfg.Path)
io, err := NewFileIO(cfg.Path)
if err != nil {
return nil, err
}
writers = append(writers, f)
writers = append(writers, io)
}
if cfg.Stdout {
writers = append(writers, stdout)
writers = append(writers, NewStdout())
}
return writers, nil
}
@@ -95,16 +95,6 @@ func (cfg *RequestLoggerConfig) ToConfig() *Config {
}
}
func (cfg *Config) ShouldLogRequest(req *http.Request, res *http.Response) bool {
if cfg.req == nil {
return true
}
return cfg.req.Filters.StatusCodes.CheckKeep(req, res) &&
cfg.req.Filters.Method.CheckKeep(req, res) &&
cfg.req.Filters.Headers.CheckKeep(req, res) &&
cfg.req.Filters.CIDR.CheckKeep(req, res)
}
func DefaultRequestLoggerConfig() *RequestLoggerConfig {
return &RequestLoggerConfig{
ConfigBase: ConfigBase{

View File

@@ -1,73 +0,0 @@
package accesslog
import (
"net/http"
"os"
"github.com/rs/zerolog"
maxmind "github.com/yusing/godoxy/internal/maxmind/types"
)
type ConsoleLogger struct {
cfg *Config
formatter ConsoleFormatter
}
var stdoutLogger = func() *zerolog.Logger {
l := zerolog.New(zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) {
w.Out = os.Stdout
w.TimeFormat = zerolog.TimeFieldFormat
w.FieldsOrder = []string{
"uri", "protocol", "type", "size",
"useragent", "query", "headers", "cookies",
"error", "iso_code", "time_zone"}
})).With().Str("level", zerolog.InfoLevel.String()).Timestamp().Logger()
return &l
}()
// placeholder for console logger
var stdout File = &sharedFileHandle{}
func NewConsoleLogger(cfg *Config) AccessLogger {
if cfg == nil {
panic("accesslog: NewConsoleLogger called with nil config")
}
l := &ConsoleLogger{
cfg: cfg,
}
if cfg.req != nil {
l.formatter = ConsoleFormatter{cfg: &cfg.req.Fields}
}
return l
}
func (l *ConsoleLogger) Config() *Config {
return l.cfg
}
func (l *ConsoleLogger) LogRequest(req *http.Request, res *http.Response) {
if !l.cfg.ShouldLogRequest(req, res) {
return
}
l.formatter.LogRequestZeroLog(stdoutLogger, req, res)
}
func (l *ConsoleLogger) LogError(req *http.Request, err error) {
log := stdoutLogger.With().Err(err).Logger()
l.formatter.LogRequestZeroLog(&log, req, internalErrorResponse)
}
func (l *ConsoleLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
ConsoleACLFormatter{}.LogACLZeroLog(stdoutLogger, info, blocked)
}
func (l *ConsoleLogger) Flush() {
// No-op for console logger
}
func (l *ConsoleLogger) Close() error {
// No-op for console logger
return nil
}

View File

@@ -11,8 +11,8 @@ import (
"github.com/yusing/goutils/synk"
)
type sharedFileHandle struct {
*os.File
type File struct {
f *os.File
// os.File.Name() may not equal to key of `openedFiles`.
// Store it for later delete from `openedFiles`.
@@ -22,18 +22,18 @@ type sharedFileHandle struct {
}
var (
openedFiles = make(map[string]*sharedFileHandle)
openedFiles = make(map[string]*File)
openedFilesMu sync.Mutex
)
// OpenFile creates a new file writer with cleaned path.
// NewFileIO creates a new file writer with cleaned path.
//
// If the file is already opened, it will be returned.
func OpenFile(path string) (File, error) {
func NewFileIO(path string) (Writer, error) {
openedFilesMu.Lock()
defer openedFilesMu.Unlock()
var file *sharedFileHandle
var file *File
var err error
// make it absolute path, so that we can use it as key of `openedFiles` and shared lock
@@ -53,38 +53,65 @@ func OpenFile(path string) (File, error) {
return nil, fmt.Errorf("access log open error: %w", err)
}
if _, err := f.Seek(0, io.SeekEnd); err != nil {
f.Close()
return nil, fmt.Errorf("access log seek error: %w", err)
}
file = &sharedFileHandle{File: f, path: path, refCount: synk.NewRefCounter()}
file = &File{f: f, path: path, refCount: synk.NewRefCounter()}
openedFiles[path] = file
log.Debug().Str("path", path).Msg("file opened")
go file.closeOnZero()
return file, nil
}
func (f *sharedFileHandle) Name() string {
// Name returns the absolute path of the file.
func (f *File) Name() string {
return f.path
}
func (f *sharedFileHandle) Close() error {
func (f *File) ShouldBeBuffered() bool {
return true
}
func (f *File) Write(p []byte) (n int, err error) {
return f.f.Write(p)
}
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
return f.f.ReadAt(p, off)
}
func (f *File) WriteAt(p []byte, off int64) (n int, err error) {
return f.f.WriteAt(p, off)
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
return f.f.Seek(offset, whence)
}
func (f *File) Size() (int64, error) {
stat, err := f.f.Stat()
if err != nil {
return 0, err
}
return stat.Size(), nil
}
func (f *File) Truncate(size int64) error {
return f.f.Truncate(size)
}
func (f *File) Close() error {
f.refCount.Sub()
return nil
}
func (f *sharedFileHandle) closeOnZero() {
defer log.Debug().Str("path", f.path).Msg("file closed")
func (f *File) closeOnZero() {
defer log.Debug().
Str("path", f.path).
Msg("access log closed")
<-f.refCount.Zero()
openedFilesMu.Lock()
delete(openedFiles, f.path)
openedFilesMu.Unlock()
err := f.File.Close()
if err != nil {
log.Error().Str("path", f.path).Err(err).Msg("failed to close file")
}
f.f.Close()
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/yusing/goutils/task"
"golang.org/x/sync/errgroup"
)
func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
@@ -19,7 +18,7 @@ func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
cfg.Path = "test.log"
loggerCount := runtime.GOMAXPROCS(0)
accessLogIOs := make([]File, loggerCount)
accessLogIOs := make([]Writer, loggerCount)
// make test log file
file, err := os.Create(cfg.Path)
@@ -29,20 +28,16 @@ func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
assert.NoError(t, os.Remove(cfg.Path))
})
var errs errgroup.Group
var wg sync.WaitGroup
for i := range loggerCount {
errs.Go(func() error {
file, err := OpenFile(cfg.Path)
if err != nil {
return err
}
wg.Go(func() {
file, err := NewFileIO(cfg.Path)
assert.NoError(t, err)
accessLogIOs[i] = file
return nil
})
}
err = errs.Wait()
assert.NoError(t, err)
wg.Wait()
firstIO := accessLogIOs[0]
for _, io := range accessLogIOs {
@@ -63,7 +58,7 @@ func TestConcurrentAccessLoggerLogAndFlush(t *testing.T) {
loggers := make([]AccessLogger, loggerCount)
for i := range loggerCount {
loggers[i] = NewFileAccessLogger(parent, file, cfg)
loggers[i] = NewAccessLoggerWithIO(parent, file, cfg)
}
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
@@ -92,7 +87,7 @@ func concurrentLog(logger AccessLogger, req *http.Request, resp *http.Response,
var wg sync.WaitGroup
for range n {
wg.Go(func() {
logger.LogRequest(req, resp)
logger.Log(req, resp)
if rand.IntN(2) == 0 {
logger.Flush()
}

View File

@@ -16,11 +16,9 @@ type (
CommonFormatter struct {
cfg *Fields
}
CombinedFormatter struct{ CommonFormatter }
JSONFormatter struct{ cfg *Fields }
ConsoleFormatter struct{ cfg *Fields }
ACLLogFormatter struct{}
ConsoleACLFormatter struct{}
CombinedFormatter struct{ CommonFormatter }
JSONFormatter struct{ CommonFormatter }
ACLLogFormatter struct{}
)
const LogTimeFormat = "02/Jan/2006:15:04:05 -0700"
@@ -32,26 +30,24 @@ func scheme(req *http.Request) string {
return "http"
}
func appendRequestURI(line *bytes.Buffer, req *http.Request, query iter.Seq2[string, []string]) {
func appendRequestURI(line []byte, req *http.Request, query iter.Seq2[string, []string]) []byte {
uri := req.URL.EscapedPath()
line.WriteString(uri)
line = append(line, uri...)
isFirst := true
for k, v := range query {
if isFirst {
line.WriteByte('?')
line = append(line, '?')
isFirst = false
} else {
line.WriteByte('&')
line = append(line, '&')
}
for i, val := range v {
if i > 0 {
line.WriteByte('&')
}
line.WriteString(k)
line.WriteByte('=')
line.WriteString(val)
line = append(line, k...)
line = append(line, '=')
for _, v := range v {
line = append(line, v...)
}
}
return line
}
func clientIP(req *http.Request) string {
@@ -62,51 +58,50 @@ func clientIP(req *http.Request) string {
return req.RemoteAddr
}
func (f CommonFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
func (f *CommonFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
query := f.cfg.Query.IterQuery(req.URL.Query())
line.WriteString(req.Host)
line.WriteByte(' ')
line = append(line, req.Host...)
line = append(line, ' ')
line.WriteString(clientIP(req))
line.WriteString(" - - [")
line = append(line, clientIP(req)...)
line = append(line, " - - ["...)
line.WriteString(mockable.TimeNow().Format(LogTimeFormat))
line.WriteString("] \"")
line = mockable.TimeNow().AppendFormat(line, LogTimeFormat)
line = append(line, `] "`...)
line.WriteString(req.Method)
line.WriteByte(' ')
appendRequestURI(line, req, query)
line.WriteByte(' ')
line.WriteString(req.Proto)
line.WriteByte('"')
line.WriteByte(' ')
line = append(line, req.Method...)
line = append(line, ' ')
line = appendRequestURI(line, req, query)
line = append(line, ' ')
line = append(line, req.Proto...)
line = append(line, '"')
line = append(line, ' ')
line.WriteString(strconv.FormatInt(int64(res.StatusCode), 10))
line.WriteByte(' ')
line.WriteString(strconv.FormatInt(res.ContentLength, 10))
line = strconv.AppendInt(line, int64(res.StatusCode), 10)
line = append(line, ' ')
line = strconv.AppendInt(line, res.ContentLength, 10)
return line
}
func (f CombinedFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
f.CommonFormatter.AppendRequestLog(line, req, res)
line.WriteString(" \"")
line.WriteString(req.Referer())
line.WriteString("\" \"")
line.WriteString(req.UserAgent())
line.WriteByte('"')
func (f *CombinedFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
line = f.CommonFormatter.AppendRequestLog(line, req, res)
line = append(line, " \""...)
line = append(line, req.Referer()...)
line = append(line, "\" \""...)
line = append(line, req.UserAgent()...)
line = append(line, '"')
return line
}
func (f JSONFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
logger := zerolog.New(line)
f.LogRequestZeroLog(&logger, req, res)
}
func (f JSONFormatter) LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response) {
func (f *JSONFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
query := f.cfg.Query.ZerologQuery(req.URL.Query())
headers := f.cfg.Headers.ZerologHeaders(req.Header)
cookies := f.cfg.Cookies.ZerologCookies(req.Cookies())
contentType := res.Header.Get("Content-Type")
writer := bytes.NewBuffer(line)
logger := zerolog.New(writer)
event := logger.Info().
Str("time", mockable.TimeNow().Format(LogTimeFormat)).
Str("ip", clientIP(req)).
@@ -124,33 +119,22 @@ func (f JSONFormatter) LogRequestZeroLog(logger *zerolog.Logger, req *http.Reque
Object("headers", headers).
Object("cookies", cookies)
if res.StatusCode >= 400 {
if res.Status != "" {
event.Str("error", res.Status)
} else {
event.Str("error", http.StatusText(res.StatusCode))
}
}
// NOTE: zerolog will append a newline to the buffer
event.Send()
return writer.Bytes()
}
func (f ConsoleFormatter) LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response) {
contentType := res.Header.Get("Content-Type")
var reqURI bytes.Buffer
appendRequestURI(&reqURI, req, f.cfg.Query.IterQuery(req.URL.Query()))
event := logger.Info().
Bytes("uri", reqURI.Bytes()).
Str("protocol", req.Proto).
Str("type", contentType).
Int64("size", res.ContentLength).
Str("useragent", req.UserAgent())
// NOTE: zerolog will append a newline to the buffer
event.Msgf("[%d] %s %s://%s from %s", res.StatusCode, req.Method, scheme(req), req.Host, clientIP(req))
}
func (f ACLLogFormatter) AppendACLLog(line *bytes.Buffer, info *maxmind.IPInfo, blocked bool) {
logger := zerolog.New(line)
f.LogACLZeroLog(&logger, info, blocked)
}
func (f ACLLogFormatter) LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool) {
func (f ACLLogFormatter) AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte {
writer := bytes.NewBuffer(line)
logger := zerolog.New(writer)
event := logger.Info().
Str("time", mockable.TimeNow().Format(LogTimeFormat)).
Str("ip", info.Str)
@@ -160,32 +144,10 @@ func (f ACLLogFormatter) LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPI
event.Str("action", "allow")
}
if info.City != nil {
if isoCode := info.City.Country.IsoCode; isoCode != "" {
event.Str("iso_code", isoCode)
}
if timeZone := info.City.Location.TimeZone; timeZone != "" {
event.Str("time_zone", timeZone)
}
event.Str("iso_code", info.City.Country.IsoCode)
event.Str("time_zone", info.City.Location.TimeZone)
}
// NOTE: zerolog will append a newline to the buffer
event.Send()
}
func (f ConsoleACLFormatter) LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool) {
event := logger.Info()
if info.City != nil {
if isoCode := info.City.Country.IsoCode; isoCode != "" {
event.Str("iso_code", isoCode)
}
if timeZone := info.City.Location.TimeZone; timeZone != "" {
event.Str("time_zone", timeZone)
}
}
action := "accepted"
if blocked {
action = "denied"
}
// NOTE: zerolog will append a newline to the buffer
event.Msgf("request %s from %s", action, info.Str)
return writer.Bytes()
}

View File

@@ -13,7 +13,7 @@ type MockFile struct {
buffered bool
}
var _ File = (*MockFile)(nil)
var _ SupportRotate = (*MockFile)(nil)
func NewMockFile(buffered bool) *MockFile {
f, _ := afero.TempFile(afero.NewMemMapFs(), "", "")
@@ -52,9 +52,14 @@ func (m *MockFile) NumLines() int {
return count
}
func (m *MockFile) MustSize() int64 {
func (m *MockFile) Size() (int64, error) {
stat, _ := m.Stat()
return stat.Size()
return stat.Size(), nil
}
func (m *MockFile) MustSize() int64 {
size, _ := m.Size()
return size
}
func (m *MockFile) Close() error {

View File

@@ -15,21 +15,14 @@ type MultiAccessLogger struct {
//
// If there is only one writer, it will return a single AccessLogger.
// Otherwise, it will return a MultiAccessLogger that writes to all the writers.
func NewMultiAccessLogger(parent task.Parent, cfg AnyConfig, writers []File) AccessLogger {
func NewMultiAccessLogger(parent task.Parent, cfg AnyConfig, writers []Writer) AccessLogger {
if len(writers) == 1 {
if writers[0] == stdout {
return NewConsoleLogger(cfg.ToConfig())
}
return NewFileAccessLogger(parent, writers[0], cfg)
return NewAccessLoggerWithIO(parent, writers[0], cfg)
}
accessLoggers := make([]AccessLogger, len(writers))
for i, writer := range writers {
if writer == stdout {
accessLoggers[i] = NewConsoleLogger(cfg.ToConfig())
} else {
accessLoggers[i] = NewFileAccessLogger(parent, writer, cfg)
}
accessLoggers[i] = NewAccessLoggerWithIO(parent, writer, cfg)
}
return &MultiAccessLogger{accessLoggers}
}
@@ -38,9 +31,9 @@ func (m *MultiAccessLogger) Config() *Config {
return m.accessLoggers[0].Config()
}
func (m *MultiAccessLogger) LogRequest(req *http.Request, res *http.Response) {
func (m *MultiAccessLogger) Log(req *http.Request, res *http.Response) {
for _, accessLogger := range m.accessLoggers {
accessLogger.LogRequest(req, res)
accessLogger.Log(req, res)
}
}

View File

@@ -16,7 +16,7 @@ func TestNewMultiAccessLogger(t *testing.T) {
testTask := task.RootTask("test", false)
cfg := DefaultRequestLoggerConfig()
writers := []File{
writers := []Writer{
NewMockFile(true),
NewMockFile(true),
}
@@ -30,7 +30,7 @@ func TestMultiAccessLoggerConfig(t *testing.T) {
cfg := DefaultRequestLoggerConfig()
cfg.Format = FormatCommon
writers := []File{
writers := []Writer{
NewMockFile(true),
NewMockFile(true),
}
@@ -48,7 +48,7 @@ func TestMultiAccessLoggerLog(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -68,7 +68,7 @@ func TestMultiAccessLoggerLog(t *testing.T) {
ContentLength: 100,
}
logger.LogRequest(req, resp)
logger.Log(req, resp)
logger.Flush()
expect.Equal(t, writer1.NumLines(), 1)
@@ -81,7 +81,7 @@ func TestMultiAccessLoggerLogError(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -107,7 +107,7 @@ func TestMultiAccessLoggerLogACL(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -129,7 +129,7 @@ func TestMultiAccessLoggerFlush(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -143,7 +143,7 @@ func TestMultiAccessLoggerFlush(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.LogRequest(req, resp)
logger.Log(req, resp)
logger.Flush()
expect.Equal(t, writer1.NumLines(), 1)
@@ -156,7 +156,7 @@ func TestMultiAccessLoggerClose(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -170,7 +170,7 @@ func TestMultiAccessLoggerMultipleLogs(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -185,7 +185,7 @@ func TestMultiAccessLoggerMultipleLogs(t *testing.T) {
resp := &http.Response{
StatusCode: http.StatusOK,
}
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
@@ -199,7 +199,7 @@ func TestMultiAccessLoggerSingleWriter(t *testing.T) {
cfg := DefaultRequestLoggerConfig()
writer := NewMockFile(true)
writers := []File{writer}
writers := []Writer{writer}
logger := NewMultiAccessLogger(testTask, cfg, writers)
expect.NotNil(t, logger)
@@ -214,7 +214,7 @@ func TestMultiAccessLoggerSingleWriter(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.LogRequest(req, resp)
logger.Log(req, resp)
logger.Flush()
expect.Equal(t, writer.NumLines(), 1)
@@ -226,7 +226,7 @@ func TestMultiAccessLoggerMixedOperations(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []File{writer1, writer2}
writers := []Writer{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -241,7 +241,7 @@ func TestMultiAccessLoggerMixedOperations(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.LogRequest(req, resp)
logger.Log(req, resp)
logger.Flush()
info := &maxmind.IPInfo{

View File

@@ -20,7 +20,7 @@ var (
)
// see back_scanner_test.go#L210 for benchmarks
var defaultChunkSize = 32 * kilobyte
var defaultChunkSize = 256 * kilobyte
// Syntax:
//

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"time"
"github.com/rs/zerolog"
@@ -18,7 +17,7 @@ type supportRotate interface {
io.ReaderAt
io.WriterAt
Truncate(size int64) error
Stat() (fs.FileInfo, error)
Size() (int64, error)
}
type RotateResult struct {
@@ -94,11 +93,10 @@ func rotateLogFileByPolicy(file supportRotate, config *Retention, result *Rotate
return false, nil // should not happen
}
stat, err := file.Stat()
fileSize, err := file.Size()
if err != nil {
return false, err
}
fileSize := stat.Size()
// nothing to rotate, return the nothing
if fileSize == 0 {
@@ -106,7 +104,6 @@ func rotateLogFileByPolicy(file supportRotate, config *Retention, result *Rotate
}
s := NewBackScanner(file, fileSize, defaultChunkSize)
defer s.Release()
result.OriginalSize = fileSize
// Store the line positions and sizes we want to keep
@@ -219,17 +216,16 @@ func fileContentMove(file supportRotate, srcPos, dstPos int64, size int) error {
//
// Invalid lines will not be detected and included in the result.
func rotateLogFileBySize(file supportRotate, config *Retention, result *RotateResult) (rotated bool, err error) {
stat, err := file.Stat()
filesize, err := file.Size()
if err != nil {
return false, err
}
fileSize := stat.Size()
result.OriginalSize = fileSize
result.OriginalSize = filesize
keepSize := int64(config.KeepSize)
if keepSize >= fileSize {
result.NumBytesKeep = fileSize
if keepSize >= filesize {
result.NumBytesKeep = filesize
return false, nil
}
result.NumBytesKeep = keepSize

View File

@@ -57,13 +57,13 @@ func TestRotateKeepLast(t *testing.T) {
t.Run(string(format)+" keep last", func(t *testing.T) {
file := NewMockFile(true)
mockable.MockTimeNow(testTime)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
for range 10 {
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
@@ -87,14 +87,14 @@ func TestRotateKeepLast(t *testing.T) {
t.Run(string(format)+" keep days", func(t *testing.T) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -133,14 +133,14 @@ func TestRotateKeepFileSize(t *testing.T) {
for _, format := range ReqLoggerFormats {
t.Run(string(format)+" keep size no rotation", func(t *testing.T) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -165,14 +165,14 @@ func TestRotateKeepFileSize(t *testing.T) {
t.Run("keep size with rotation", func(t *testing.T) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: FormatJSON,
})
expect.Nil(t, logger.Config().Retention)
nLines := 100
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -199,14 +199,14 @@ func TestRotateSkipInvalidTime(t *testing.T) {
for _, format := range ReqLoggerFormats {
t.Run(string(format), func(t *testing.T) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
logger.Flush()
n, err := file.Write([]byte("invalid time\n"))
@@ -241,7 +241,7 @@ func BenchmarkRotate(b *testing.B) {
for _, retention := range tests {
b.Run(fmt.Sprintf("retention_%s", retention.String()), func(b *testing.B) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
ConfigBase: ConfigBase{
Retention: retention,
},
@@ -249,7 +249,7 @@ func BenchmarkRotate(b *testing.B) {
})
for i := range 100 {
mockable.MockTimeNow(testTime.AddDate(0, 0, -100+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
}
logger.Flush()
content := file.Content()
@@ -275,7 +275,7 @@ func BenchmarkRotateWithInvalidTime(b *testing.B) {
for _, retention := range tests {
b.Run(fmt.Sprintf("retention_%s", retention.String()), func(b *testing.B) {
file := NewMockFile(true)
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
ConfigBase: ConfigBase{
Retention: retention,
},
@@ -283,7 +283,7 @@ func BenchmarkRotateWithInvalidTime(b *testing.B) {
})
for i := range 10000 {
mockable.MockTimeNow(testTime.AddDate(0, 0, -10000+i+1))
logger.LogRequest(req, resp)
logger.Log(req, resp)
if i%10 == 0 {
_, _ = file.Write([]byte("invalid time\n"))
}

View File

@@ -0,0 +1,32 @@
package accesslog
import (
"os"
"github.com/rs/zerolog"
"github.com/yusing/godoxy/internal/logging"
)
type Stdout struct {
logger zerolog.Logger
}
func NewStdout() Writer {
return &Stdout{logger: logging.NewLoggerWithFixedLevel(zerolog.InfoLevel, os.Stdout)}
}
func (s Stdout) Name() string {
return "stdout"
}
func (s Stdout) ShouldBeBuffered() bool {
return false
}
func (s Stdout) Write(p []byte) (n int, err error) {
return s.logger.Write(p)
}
func (s Stdout) Close() error {
return nil
}

View File

@@ -1,55 +0,0 @@
package accesslog
import (
"bytes"
"net/http"
"github.com/rs/zerolog"
maxmind "github.com/yusing/godoxy/internal/maxmind/types"
"github.com/yusing/goutils/task"
)
type (
AccessLogger interface {
LogRequest(req *http.Request, res *http.Response)
LogError(req *http.Request, err error)
LogACL(info *maxmind.IPInfo, blocked bool)
Config() *Config
Flush()
Close() error
}
AccessLogRotater interface {
Rotate(result *RotateResult) (rotated bool, err error)
}
RequestFormatter interface {
// AppendRequestLog appends a log line to line with or without a trailing newline
AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response)
}
RequestFormatterZeroLog interface {
// LogRequestZeroLog logs a request log to the logger
LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response)
}
ACLFormatter interface {
// AppendACLLog appends a log line to line with or without a trailing newline
AppendACLLog(line *bytes.Buffer, info *maxmind.IPInfo, blocked bool)
// LogACLZeroLog logs an ACL log to the logger
LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool)
}
)
func NewAccessLogger(parent task.Parent, cfg AnyConfig) (AccessLogger, error) {
writers, err := cfg.Writers()
if err != nil {
return nil, err
}
return NewMultiAccessLogger(parent, cfg, writers), nil
}
func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) AccessLogger {
return NewFileAccessLogger(parent, NewMockFile(true), cfg)
}

View File

@@ -1,203 +0,0 @@
package middleware
import (
"bytes"
"context"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/yusing/godoxy/internal/route/routes"
httputils "github.com/yusing/goutils/http"
ioutils "github.com/yusing/goutils/io"
)
type (
crowdsecMiddleware struct {
CrowdsecMiddlewareOpts
}
CrowdsecMiddlewareOpts struct {
Route string `json:"route" validate:"required"` // route name (alias) or IP address
Port int `json:"port"` // port number (optional if using route name)
APIKey string `json:"api_key" validate:"required"` // API key for CrowdSec AppSec (mandatory)
Endpoint string `json:"endpoint"` // default: "/"
LogBlocked bool `json:"log_blocked"` // default: false
Timeout time.Duration `json:"timeout"` // default: 5 seconds
httpClient *http.Client
}
)
var Crowdsec = NewMiddleware[crowdsecMiddleware]()
func (m *crowdsecMiddleware) setup() {
m.CrowdsecMiddlewareOpts = CrowdsecMiddlewareOpts{
Route: "",
Port: 7422, // default port for CrowdSec AppSec
APIKey: "",
Endpoint: "/",
LogBlocked: false,
Timeout: 5 * time.Second,
}
}
func (m *crowdsecMiddleware) finalize() error {
if !strings.HasPrefix(m.Endpoint, "/") {
return fmt.Errorf("endpoint must start with /")
}
if m.Timeout == 0 {
m.Timeout = 5 * time.Second
}
m.httpClient = &http.Client{
Timeout: m.Timeout,
// do not follow redirects
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
return nil
}
// before implements RequestModifier.
func (m *crowdsecMiddleware) before(w http.ResponseWriter, r *http.Request) (proceed bool) {
// Build CrowdSec URL
crowdsecURL, err := m.buildCrowdSecURL()
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to build CrowdSec URL")
w.WriteHeader(http.StatusInternalServerError)
return false
}
// Determine HTTP method: GET for requests without body, POST for requests with body
method := http.MethodGet
var body io.Reader
if r.Body != nil && r.Body != http.NoBody {
method = http.MethodPost
// Read the body
bodyBytes, release, err := httputils.ReadAllRequestBody(r)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to read request body")
w.WriteHeader(http.StatusInternalServerError)
return false
}
r.Body = ioutils.NewHookReadCloser(io.NopCloser(bytes.NewReader(bodyBytes)), func() {
release(bodyBytes)
})
body = bytes.NewReader(bodyBytes)
}
ctx, cancel := context.WithTimeout(r.Context(), m.Timeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, method, crowdsecURL, body)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to create CrowdSec request")
w.WriteHeader(http.StatusInternalServerError)
return false
}
// Get remote IP
remoteIP, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
remoteIP = r.RemoteAddr
}
// Get HTTP version in integer form (10, 11, 20, etc.)
httpVersion := m.getHTTPVersion(r)
// Copy original headers
req.Header = r.Header.Clone()
// Overwrite CrowdSec required headers to prevent spoofing
req.Header.Set("X-Crowdsec-Appsec-Ip", remoteIP)
req.Header.Set("X-Crowdsec-Appsec-Uri", r.URL.RequestURI())
req.Header.Set("X-Crowdsec-Appsec-Host", r.Host)
req.Header.Set("X-Crowdsec-Appsec-Verb", r.Method)
req.Header.Set("X-Crowdsec-Appsec-Api-Key", m.APIKey)
req.Header.Set("X-Crowdsec-Appsec-User-Agent", r.UserAgent())
req.Header.Set("X-Crowdsec-Appsec-Http-Version", httpVersion)
// Make request to CrowdSec
resp, err := m.httpClient.Do(req)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to connect to CrowdSec server")
w.WriteHeader(http.StatusInternalServerError)
return false
}
defer resp.Body.Close()
// Handle response codes
switch resp.StatusCode {
case http.StatusOK:
// Request is allowed
return true
case http.StatusForbidden:
// Request is blocked by CrowdSec
if m.LogBlocked {
Crowdsec.LogWarn(r).
Str("ip", remoteIP).
Msg("request blocked by CrowdSec")
}
w.WriteHeader(http.StatusForbidden)
return false
case http.StatusInternalServerError:
// CrowdSec server error
bodyBytes, release, err := httputils.ReadAllBody(resp)
if err == nil {
defer release(bodyBytes)
Crowdsec.LogError(r).
Str("crowdsec_response", string(bodyBytes)).
Msg("CrowdSec server error")
}
w.WriteHeader(http.StatusInternalServerError)
return false
default:
// Unexpected response code
Crowdsec.LogWarn(r).
Int("status_code", resp.StatusCode).
Msg("unexpected response from CrowdSec server")
w.WriteHeader(http.StatusInternalServerError)
return false
}
}
// buildCrowdSecURL constructs the CrowdSec server URL based on route or IP configuration
func (m *crowdsecMiddleware) buildCrowdSecURL() (string, error) {
// Try to get route first
if m.Route != "" {
if route, ok := routes.HTTP.Get(m.Route); ok {
// Using route name
targetURL := *route.TargetURL()
targetURL.Path = m.Endpoint
return targetURL.String(), nil
}
// If not found in routes, assume it's an IP address
if m.Port == 0 {
return "", fmt.Errorf("port must be specified when using IP address")
}
return fmt.Sprintf("http://%s%s", net.JoinHostPort(m.Route, strconv.Itoa(m.Port)), m.Endpoint), nil
}
return "", fmt.Errorf("route or IP address must be specified")
}
func (m *crowdsecMiddleware) getHTTPVersion(r *http.Request) string {
switch {
case r.ProtoMajor == 1 && r.ProtoMinor == 0:
return "10"
case r.ProtoMajor == 1 && r.ProtoMinor == 1:
return "11"
case r.ProtoMajor == 2:
return "20"
case r.ProtoMajor == 3:
return "30"
default:
return strconv.Itoa(r.ProtoMajor*10 + r.ProtoMinor)
}
}

View File

@@ -19,7 +19,6 @@ var allMiddlewares = map[string]*Middleware{
"oidc": OIDC,
"forwardauth": ForwardAuth,
"crowdsec": Crowdsec,
"request": ModifyRequest,
"modifyrequest": ModifyRequest,

View File

@@ -143,13 +143,8 @@ func (s *FileServer) RootPath() string {
// ServeHTTP implements http.Handler.
func (s *FileServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if s.accessLogger != nil {
rec := accesslog.GetResponseRecorder(w)
w = rec
defer func() {
s.accessLogger.LogRequest(req, rec.Response())
accesslog.PutResponseRecorder(rec)
}()
}
s.handler.ServeHTTP(w, req)
if s.accessLogger != nil {
s.accessLogger.Log(req, req.Response)
}
}

View File

@@ -74,7 +74,7 @@ func (p *DockerProvider) loadRoutesImpl() (route.Routes, gperr.Error) {
container := docker.FromDocker(&c, p.dockerCfg)
if container.Errors != nil {
errs.Add(gperr.PrependSubject(container.ContainerName, container.Errors))
errs.Add(container.Errors)
continue
}

View File

@@ -277,11 +277,11 @@ func (r *Route) validate() gperr.Error {
if r.Port.Listening != 0 {
errs.Addf("unexpected listening port for %s scheme", r.Scheme)
}
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s", r.Scheme, net.JoinHostPort(r.Host, strconv.Itoa(r.Port.Proxy))))
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", r.Scheme, r.Host, r.Port.Proxy))
case route.SchemeTCP, route.SchemeUDP:
if r.ShouldExclude() {
// should exclude, we don't care the scheme here.
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s", r.Scheme, net.JoinHostPort(r.Host, strconv.Itoa(r.Port.Proxy))))
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", r.Scheme, r.Host, r.Port.Proxy))
} else {
if r.Bind == "" {
r.Bind = "0.0.0.0"
@@ -306,8 +306,8 @@ func (r *Route) validate() gperr.Error {
lScheme := toNetwork(bindIP, r.Scheme)
rScheme := toNetwork(remoteIP, r.Scheme)
r.LisURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s", lScheme, net.JoinHostPort(r.Bind, strconv.Itoa(r.Port.Listening))))
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s", rScheme, net.JoinHostPort(r.Host, strconv.Itoa(r.Port.Proxy))))
r.LisURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", lScheme, r.Bind, r.Port.Listening))
r.ProxyURL = gperr.Collect(&errs, nettypes.ParseURL, fmt.Sprintf("%s://%s:%d", rScheme, r.Host, r.Port.Proxy))
}
}

View File

@@ -50,7 +50,7 @@ func openFile(path string) (io.WriteCloser, gperr.Error) {
return noopWriteCloser{buf}, nil
}
f, err := accesslog.OpenFile(path)
f, err := accesslog.NewFileIO(path)
if err != nil {
return nil, ErrInvalidArguments.With(err)
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/pires/go-proxyproto"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/acl"
"github.com/yusing/godoxy/internal/agentpool"
"github.com/yusing/godoxy/internal/entrypoint"
@@ -51,14 +50,12 @@ func (s *TCPTCPStream) ListenAndServe(ctx context.Context, preDial, onRead netty
return
}
if acl, ok := ctx.Value(acl.ContextKey{}).(*acl.Config); ok {
log.Debug().Str("listener", s.listener.Addr().String()).Msg("wrapping listener with ACL")
s.listener = acl.WrapTCP(s.listener)
}
if proxyProto := entrypoint.ActiveConfig.Load().SupportProxyProtocol; proxyProto {
s.listener = &proxyproto.Listener{Listener: s.listener}
}
if acl := acl.ActiveConfig.Load(); acl != nil {
s.listener = acl.WrapTCP(s.listener)
}
s.preDial = preDial
s.onRead = onRead

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/acl"
"github.com/yusing/godoxy/internal/agentpool"
nettypes "github.com/yusing/godoxy/internal/net/types"
@@ -82,8 +81,7 @@ func (s *UDPUDPStream) ListenAndServe(ctx context.Context, preDial, onRead netty
return
}
s.listener = l
if acl, ok := ctx.Value(acl.ContextKey{}).(*acl.Config); ok {
log.Debug().Str("listener", s.listener.LocalAddr().String()).Msg("wrapping listener with ACL")
if acl := acl.ActiveConfig.Load(); acl != nil {
s.listener = acl.WrapUDP(s.listener)
}
s.preDial = preDial

View File

@@ -168,13 +168,9 @@ get_timezone() {
setenv() {
local key="$1"
local value="$2"
if [[ $(uname -s) == "Darwin" ]]; then
sed -i '' "/^# *${key}=/s/^# *//" "$DOT_ENV_PATH"
sed -i '' "s|${key}=.*|${key}=\"${value}\"|" "$DOT_ENV_PATH"
else
sed -i "/^# *${key}=/s/^# *//" "$DOT_ENV_PATH"
sed -i "s|${key}=.*|${key}=\"${value}\"|" "$DOT_ENV_PATH"
fi
# uncomment line if it is commented
sed -i "/^# *${key}=/s/^# *//" "$DOT_ENV_PATH"
sed -i "s|${key}=.*|${key}=\"${value}\"|" "$DOT_ENV_PATH"
echo "${key}=${value}"
}