mirror of
https://github.com/yusing/godoxy.git
synced 2026-04-17 22:19:42 +02:00
refactor: move accesslog to logging/accesslog
This commit is contained in:
236
internal/logging/accesslog/access_logger.go
Normal file
236
internal/logging/accesslog/access_logger.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils/synk"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type (
|
||||
AccessLogger struct {
|
||||
task *task.Task
|
||||
cfg *Config
|
||||
io AccessLogIO
|
||||
buffered *bufio.Writer
|
||||
supportRotate bool
|
||||
|
||||
lineBufPool *synk.BytesPool // buffer pool for formatting a single log line
|
||||
|
||||
errRateLimiter *rate.Limiter
|
||||
|
||||
logger zerolog.Logger
|
||||
|
||||
Formatter
|
||||
}
|
||||
|
||||
AccessLogIO interface {
|
||||
io.Writer
|
||||
sync.Locker
|
||||
Name() string // file name or path
|
||||
}
|
||||
|
||||
Formatter interface {
|
||||
// AppendLog appends a log line to line with or without a trailing newline
|
||||
AppendLog(line []byte, req *http.Request, res *http.Response) []byte
|
||||
}
|
||||
)
|
||||
|
||||
const MinBufferSize = 4 * kilobyte
|
||||
|
||||
const (
|
||||
flushInterval = 30 * time.Second
|
||||
rotateInterval = time.Hour
|
||||
)
|
||||
|
||||
func NewAccessLogger(parent task.Parent, cfg *Config) (*AccessLogger, error) {
|
||||
var ios []AccessLogIO
|
||||
|
||||
if cfg.Stdout {
|
||||
ios = append(ios, stdoutIO)
|
||||
}
|
||||
|
||||
if cfg.Path != "" {
|
||||
io, err := newFileIO(cfg.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ios = append(ios, io)
|
||||
}
|
||||
|
||||
if len(ios) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return NewAccessLoggerWithIO(parent, NewMultiWriter(ios...), cfg), nil
|
||||
}
|
||||
|
||||
func NewMockAccessLogger(parent task.Parent, cfg *Config) *AccessLogger {
|
||||
return NewAccessLoggerWithIO(parent, NewMockFile(), cfg)
|
||||
}
|
||||
|
||||
func NewAccessLoggerWithIO(parent task.Parent, io AccessLogIO, cfg *Config) *AccessLogger {
|
||||
if cfg.BufferSize == 0 {
|
||||
cfg.BufferSize = DefaultBufferSize
|
||||
}
|
||||
if cfg.BufferSize < MinBufferSize {
|
||||
cfg.BufferSize = MinBufferSize
|
||||
}
|
||||
l := &AccessLogger{
|
||||
task: parent.Subtask("accesslog."+io.Name(), true),
|
||||
cfg: cfg,
|
||||
io: io,
|
||||
buffered: bufio.NewWriterSize(io, cfg.BufferSize),
|
||||
lineBufPool: synk.NewBytesPool(1024, synk.DefaultMaxBytes),
|
||||
errRateLimiter: rate.NewLimiter(rate.Every(time.Second), 1),
|
||||
logger: logging.With().Str("file", io.Name()).Logger(),
|
||||
}
|
||||
|
||||
fmt := CommonFormatter{cfg: &l.cfg.Fields}
|
||||
switch l.cfg.Format {
|
||||
case FormatCommon:
|
||||
l.Formatter = &fmt
|
||||
case FormatCombined:
|
||||
l.Formatter = &CombinedFormatter{fmt}
|
||||
case FormatJSON:
|
||||
l.Formatter = &JSONFormatter{fmt}
|
||||
default: // should not happen, validation has done by validate tags
|
||||
panic("invalid access log format")
|
||||
}
|
||||
|
||||
if _, ok := l.io.(supportRotate); ok {
|
||||
l.supportRotate = true
|
||||
}
|
||||
|
||||
go l.start()
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *AccessLogger) Config() *Config {
|
||||
return l.cfg
|
||||
}
|
||||
|
||||
func (l *AccessLogger) shouldLog(req *http.Request, res *http.Response) bool {
|
||||
if !l.cfg.Filters.StatusCodes.CheckKeep(req, res) ||
|
||||
!l.cfg.Filters.Method.CheckKeep(req, res) ||
|
||||
!l.cfg.Filters.Headers.CheckKeep(req, res) ||
|
||||
!l.cfg.Filters.CIDR.CheckKeep(req, res) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *AccessLogger) Log(req *http.Request, res *http.Response) {
|
||||
if !l.shouldLog(req, res) {
|
||||
return
|
||||
}
|
||||
|
||||
line := l.lineBufPool.Get()
|
||||
defer l.lineBufPool.Put(line)
|
||||
line = l.Formatter.AppendLog(line, req, res)
|
||||
if line[len(line)-1] != '\n' {
|
||||
line = append(line, '\n')
|
||||
}
|
||||
l.lockWrite(line)
|
||||
}
|
||||
|
||||
func (l *AccessLogger) LogError(req *http.Request, err error) {
|
||||
l.Log(req, &http.Response{StatusCode: http.StatusInternalServerError, Status: err.Error()})
|
||||
}
|
||||
|
||||
func (l *AccessLogger) ShouldRotate() bool {
|
||||
return l.cfg.Retention.IsValid() && l.supportRotate
|
||||
}
|
||||
|
||||
func (l *AccessLogger) Rotate() (result *RotateResult, err error) {
|
||||
if !l.ShouldRotate() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
l.io.Lock()
|
||||
defer l.io.Unlock()
|
||||
|
||||
return rotateLogFile(l.io.(supportRotate), l.cfg.Retention)
|
||||
}
|
||||
|
||||
func (l *AccessLogger) handleErr(err error) {
|
||||
if l.errRateLimiter.Allow() {
|
||||
gperr.LogError("failed to write access log", err)
|
||||
} else {
|
||||
gperr.LogError("too many errors, stopping access log", err)
|
||||
l.task.Finish(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *AccessLogger) start() {
|
||||
defer func() {
|
||||
defer l.task.Finish(nil)
|
||||
defer l.close()
|
||||
if err := l.Flush(); err != nil {
|
||||
l.handleErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// flushes the buffer every 30 seconds
|
||||
flushTicker := time.NewTicker(30 * time.Second)
|
||||
defer flushTicker.Stop()
|
||||
|
||||
rotateTicker := time.NewTicker(rotateInterval)
|
||||
defer rotateTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.task.Context().Done():
|
||||
return
|
||||
case <-flushTicker.C:
|
||||
if err := l.Flush(); err != nil {
|
||||
l.handleErr(err)
|
||||
}
|
||||
case <-rotateTicker.C:
|
||||
if !l.ShouldRotate() {
|
||||
continue
|
||||
}
|
||||
l.logger.Info().Msg("rotating access log file")
|
||||
if res, err := l.Rotate(); err != nil {
|
||||
l.handleErr(err)
|
||||
} else if res != nil {
|
||||
res.Print(&l.logger)
|
||||
} else {
|
||||
l.logger.Info().Msg("no rotation needed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *AccessLogger) Flush() error {
|
||||
l.io.Lock()
|
||||
defer l.io.Unlock()
|
||||
return l.buffered.Flush()
|
||||
}
|
||||
|
||||
func (l *AccessLogger) close() {
|
||||
if r, ok := l.io.(io.Closer); ok {
|
||||
l.io.Lock()
|
||||
defer l.io.Unlock()
|
||||
r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (l *AccessLogger) lockWrite(data []byte) {
|
||||
l.io.Lock() // prevent concurrent write, i.e. log rotation, other access loggers
|
||||
_, err := l.buffered.Write(data)
|
||||
l.io.Unlock()
|
||||
if err != nil {
|
||||
l.handleErr(err)
|
||||
} else {
|
||||
logging.Trace().Msg("access log flushed to " + l.io.Name())
|
||||
}
|
||||
}
|
||||
167
internal/logging/accesslog/access_logger_test.go
Normal file
167
internal/logging/accesslog/access_logger_test.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
remote = "192.168.1.1"
|
||||
host = "example.com"
|
||||
uri = "/?bar=baz&foo=bar"
|
||||
uriRedacted = "/?bar=" + RedactedValue + "&foo=" + RedactedValue
|
||||
referer = "https://www.google.com/"
|
||||
proto = "HTTP/1.1"
|
||||
ua = "Go-http-client/1.1"
|
||||
status = http.StatusNotFound
|
||||
contentLength = 100
|
||||
method = http.MethodGet
|
||||
)
|
||||
|
||||
var (
|
||||
testTask = task.RootTask("test", false)
|
||||
testURL = expect.Must(url.Parse("http://" + host + uri))
|
||||
req = &http.Request{
|
||||
RemoteAddr: remote,
|
||||
Method: method,
|
||||
Proto: proto,
|
||||
Host: testURL.Host,
|
||||
URL: testURL,
|
||||
Header: http.Header{
|
||||
"User-Agent": []string{ua},
|
||||
"Referer": []string{referer},
|
||||
"Cookie": []string{
|
||||
"foo=bar",
|
||||
"bar=baz",
|
||||
},
|
||||
},
|
||||
}
|
||||
resp = &http.Response{
|
||||
StatusCode: status,
|
||||
ContentLength: contentLength,
|
||||
Header: http.Header{"Content-Type": []string{"text/plain"}},
|
||||
}
|
||||
)
|
||||
|
||||
func fmtLog(cfg *Config) (ts string, line string) {
|
||||
buf := make([]byte, 0, 1024)
|
||||
|
||||
t := time.Now()
|
||||
logger := NewMockAccessLogger(testTask, cfg)
|
||||
utils.MockTimeNow(t)
|
||||
buf = logger.AppendLog(buf, req, resp)
|
||||
return t.Format(LogTimeFormat), string(buf)
|
||||
}
|
||||
|
||||
func TestAccessLoggerCommon(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Format = FormatCommon
|
||||
ts, log := fmtLog(config)
|
||||
expect.Equal(t, log,
|
||||
fmt.Sprintf("%s %s - - [%s] \"%s %s %s\" %d %d",
|
||||
host, remote, ts, method, uri, proto, status, contentLength,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func TestAccessLoggerCombined(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Format = FormatCombined
|
||||
ts, log := fmtLog(config)
|
||||
expect.Equal(t, log,
|
||||
fmt.Sprintf("%s %s - - [%s] \"%s %s %s\" %d %d \"%s\" \"%s\"",
|
||||
host, remote, ts, method, uri, proto, status, contentLength, referer, ua,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func TestAccessLoggerRedactQuery(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Format = FormatCommon
|
||||
config.Fields.Query.Default = FieldModeRedact
|
||||
ts, log := fmtLog(config)
|
||||
expect.Equal(t, log,
|
||||
fmt.Sprintf("%s %s - - [%s] \"%s %s %s\" %d %d",
|
||||
host, remote, ts, method, uriRedacted, proto, status, contentLength,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
type JSONLogEntry struct {
|
||||
Time string `json:"time"`
|
||||
IP string `json:"ip"`
|
||||
Method string `json:"method"`
|
||||
Scheme string `json:"scheme"`
|
||||
Host string `json:"host"`
|
||||
Path string `json:"path"`
|
||||
Protocol string `json:"protocol"`
|
||||
Status int `json:"status"`
|
||||
Error string `json:"error,omitempty"`
|
||||
ContentType string `json:"type"`
|
||||
Size int64 `json:"size"`
|
||||
Referer string `json:"referer"`
|
||||
UserAgent string `json:"useragent"`
|
||||
Query map[string][]string `json:"query,omitempty"`
|
||||
Headers map[string][]string `json:"headers,omitempty"`
|
||||
Cookies map[string]string `json:"cookies,omitempty"`
|
||||
}
|
||||
|
||||
func getJSONEntry(t *testing.T, config *Config) JSONLogEntry {
|
||||
t.Helper()
|
||||
config.Format = FormatJSON
|
||||
var entry JSONLogEntry
|
||||
_, log := fmtLog(config)
|
||||
err := json.Unmarshal([]byte(log), &entry)
|
||||
expect.NoError(t, err)
|
||||
return entry
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSON(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
entry := getJSONEntry(t, config)
|
||||
expect.Equal(t, entry.IP, remote)
|
||||
expect.Equal(t, entry.Method, method)
|
||||
expect.Equal(t, entry.Scheme, "http")
|
||||
expect.Equal(t, entry.Host, testURL.Host)
|
||||
expect.Equal(t, entry.Path, testURL.Path)
|
||||
expect.Equal(t, entry.Protocol, proto)
|
||||
expect.Equal(t, entry.Status, status)
|
||||
expect.Equal(t, entry.ContentType, "text/plain")
|
||||
expect.Equal(t, entry.Size, contentLength)
|
||||
expect.Equal(t, entry.Referer, referer)
|
||||
expect.Equal(t, entry.UserAgent, ua)
|
||||
expect.Equal(t, len(entry.Headers), 0)
|
||||
expect.Equal(t, len(entry.Cookies), 0)
|
||||
if status >= 400 {
|
||||
expect.Equal(t, entry.Error, http.StatusText(status))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAccessLoggerJSON(b *testing.B) {
|
||||
config := DefaultConfig()
|
||||
config.Format = FormatJSON
|
||||
logger := NewMockAccessLogger(testTask, config)
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAccessLoggerCombined(b *testing.B) {
|
||||
config := DefaultConfig()
|
||||
config.Format = FormatCombined
|
||||
logger := NewMockAccessLogger(testTask, config)
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
}
|
||||
126
internal/logging/accesslog/back_scanner.go
Normal file
126
internal/logging/accesslog/back_scanner.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// BackScanner provides an interface to read a file backward line by line.
|
||||
type BackScanner struct {
|
||||
file supportRotate
|
||||
size int64
|
||||
chunkSize int
|
||||
chunkBuf []byte
|
||||
|
||||
offset int64
|
||||
chunk []byte
|
||||
line []byte
|
||||
err error
|
||||
}
|
||||
|
||||
// NewBackScanner creates a new Scanner to read the file backward.
|
||||
// chunkSize determines the size of each read chunk from the end of the file.
|
||||
func NewBackScanner(file supportRotate, chunkSize int) *BackScanner {
|
||||
size, err := file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return &BackScanner{err: err}
|
||||
}
|
||||
return newBackScanner(file, size, make([]byte, chunkSize))
|
||||
}
|
||||
|
||||
func newBackScanner(file supportRotate, fileSize int64, buf []byte) *BackScanner {
|
||||
return &BackScanner{
|
||||
file: file,
|
||||
size: fileSize,
|
||||
offset: fileSize,
|
||||
chunkSize: len(buf),
|
||||
chunkBuf: buf,
|
||||
}
|
||||
}
|
||||
|
||||
// Scan advances the scanner to the previous line, which will then be available
|
||||
// via the Bytes method. It returns false when there are no more lines.
|
||||
func (s *BackScanner) Scan() bool {
|
||||
if s.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Read chunks until a newline is found or the file is fully read
|
||||
for {
|
||||
// Check if there's a line in the buffer
|
||||
if idx := bytes.LastIndexByte(s.chunk, '\n'); idx >= 0 {
|
||||
s.line = s.chunk[idx+1:]
|
||||
s.chunk = s.chunk[:idx]
|
||||
if len(s.line) > 0 {
|
||||
return true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
if s.offset <= 0 {
|
||||
// No more data to read; check remaining buffer
|
||||
if len(s.chunk) > 0 {
|
||||
s.line = s.chunk
|
||||
s.chunk = nil
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
newOffset := max(0, s.offset-int64(s.chunkSize))
|
||||
chunkSize := s.offset - newOffset
|
||||
chunk := s.chunkBuf[:chunkSize]
|
||||
|
||||
n, err := s.file.ReadAt(chunk, newOffset)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
s.err = err
|
||||
}
|
||||
return false
|
||||
} else if n == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Prepend the chunk to the buffer
|
||||
clone := append([]byte{}, chunk[:n]...)
|
||||
s.chunk = append(clone, s.chunk...)
|
||||
s.offset = newOffset
|
||||
|
||||
// Check for newline in the updated buffer
|
||||
if idx := bytes.LastIndexByte(s.chunk, '\n'); idx >= 0 {
|
||||
s.line = s.chunk[idx+1:]
|
||||
s.chunk = s.chunk[:idx]
|
||||
if len(s.line) > 0 {
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns the most recent line generated by a call to Scan.
|
||||
func (s *BackScanner) Bytes() []byte {
|
||||
return s.line
|
||||
}
|
||||
|
||||
// FileSize returns the size of the file.
|
||||
func (s *BackScanner) FileSize() int64 {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF error encountered by the scanner.
|
||||
func (s *BackScanner) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *BackScanner) Reset() error {
|
||||
_, err := s.file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = *newBackScanner(s.file, s.size, s.chunkBuf)
|
||||
return nil
|
||||
}
|
||||
268
internal/logging/accesslog/back_scanner_test.go
Normal file
268
internal/logging/accesslog/back_scanner_test.go
Normal file
@@ -0,0 +1,268 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
func TestBackScanner(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "empty file",
|
||||
input: "",
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "single line without newline",
|
||||
input: "single line",
|
||||
expected: []string{"single line"},
|
||||
},
|
||||
{
|
||||
name: "single line with newline",
|
||||
input: "single line\n",
|
||||
expected: []string{"single line"},
|
||||
},
|
||||
{
|
||||
name: "multiple lines",
|
||||
input: "first\nsecond\nthird\n",
|
||||
expected: []string{"third", "second", "first"},
|
||||
},
|
||||
{
|
||||
name: "multiple lines without final newline",
|
||||
input: "first\nsecond\nthird",
|
||||
expected: []string{"third", "second", "first"},
|
||||
},
|
||||
{
|
||||
name: "lines longer than chunk size",
|
||||
input: "short\n" + strings.Repeat("a", 20) + "\nshort\n",
|
||||
expected: []string{"short", strings.Repeat("a", 20), "short"},
|
||||
},
|
||||
{
|
||||
name: "empty lines",
|
||||
input: "first\n\n\nlast\n",
|
||||
expected: []string{"last", "first"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup mock file
|
||||
mockFile := NewMockFile()
|
||||
_, err := mockFile.Write([]byte(tt.input))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write to mock file: %v", err)
|
||||
}
|
||||
|
||||
// Create scanner with small chunk size to test chunking
|
||||
scanner := NewBackScanner(mockFile, 10)
|
||||
|
||||
// Collect all lines
|
||||
var lines [][]byte
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Bytes())
|
||||
}
|
||||
|
||||
// Check for scanning errors
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Errorf("scanner error: %v", err)
|
||||
}
|
||||
|
||||
// Compare results
|
||||
if len(lines) != len(tt.expected) {
|
||||
t.Errorf("got %d lines, want %d lines", len(lines), len(tt.expected))
|
||||
return
|
||||
}
|
||||
|
||||
for i, line := range lines {
|
||||
if string(line) != tt.expected[i] {
|
||||
t.Errorf("line %d: got %q, want %q", i, line, tt.expected[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackScannerWithVaryingChunkSizes(t *testing.T) {
|
||||
input := "first\nsecond\nthird\nfourth\nfifth\n"
|
||||
expected := []string{"fifth", "fourth", "third", "second", "first"}
|
||||
chunkSizes := []int{1, 2, 3, 5, 10, 20, 100}
|
||||
|
||||
for _, chunkSize := range chunkSizes {
|
||||
t.Run(fmt.Sprintf("chunk_size_%d", chunkSize), func(t *testing.T) {
|
||||
mockFile := NewMockFile()
|
||||
_, err := mockFile.Write([]byte(input))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write to mock file: %v", err)
|
||||
}
|
||||
|
||||
scanner := NewBackScanner(mockFile, chunkSize)
|
||||
|
||||
var lines [][]byte
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Bytes())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Errorf("scanner error: %v", err)
|
||||
}
|
||||
|
||||
if len(lines) != len(expected) {
|
||||
t.Errorf("got %d lines, want %d lines", len(lines), len(expected))
|
||||
return
|
||||
}
|
||||
|
||||
for i, line := range lines {
|
||||
if string(line) != expected[i] {
|
||||
t.Errorf("chunk size %d, line %d: got %q, want %q",
|
||||
chunkSize, i, line, expected[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func logEntry() []byte {
|
||||
accesslog := NewMockAccessLogger(task.RootTask("test", false), &Config{
|
||||
Format: FormatJSON,
|
||||
})
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("hello"))
|
||||
}))
|
||||
srv.URL = "http://localhost:8080"
|
||||
defer srv.Close()
|
||||
// make a request to the server
|
||||
req, _ := http.NewRequest("GET", srv.URL, nil)
|
||||
res := httptest.NewRecorder()
|
||||
// server the request
|
||||
srv.Config.Handler.ServeHTTP(res, req)
|
||||
b := accesslog.AppendLog(nil, req, res.Result())
|
||||
if b[len(b)-1] != '\n' {
|
||||
b = append(b, '\n')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
line := logEntry()
|
||||
nLines := 1000
|
||||
for range nLines {
|
||||
_, err := file.Write(line)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write to temp file: %v", err)
|
||||
}
|
||||
}
|
||||
linesRead := 0
|
||||
s := NewBackScanner(file, defaultChunkSize)
|
||||
for s.Scan() {
|
||||
linesRead++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Errorf("scanner error: %v", err)
|
||||
}
|
||||
expect.Equal(t, linesRead, nLines)
|
||||
s.Reset()
|
||||
|
||||
linesRead = 0
|
||||
for s.Scan() {
|
||||
linesRead++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Errorf("scanner error: %v", err)
|
||||
}
|
||||
expect.Equal(t, linesRead, nLines)
|
||||
}
|
||||
|
||||
// 100000 log entries
|
||||
func BenchmarkBackScanner(b *testing.B) {
|
||||
mockFile := NewMockFile()
|
||||
line := logEntry()
|
||||
for range 100000 {
|
||||
_, _ = mockFile.Write(line)
|
||||
}
|
||||
for i := range 14 {
|
||||
chunkSize := (2 << i) * kilobyte
|
||||
scanner := NewBackScanner(mockFile, chunkSize)
|
||||
name := strutils.FormatByteSize(chunkSize)
|
||||
b.ResetTimer()
|
||||
b.Run(name, func(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_ = scanner.Reset()
|
||||
for scanner.Scan() {
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBackScannerRealFile(b *testing.B) {
|
||||
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
for range 10000 {
|
||||
_, err = file.Write(logEntry())
|
||||
if err != nil {
|
||||
b.Fatalf("failed to write to temp file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
scanner := NewBackScanner(file, 256*kilobyte)
|
||||
b.ResetTimer()
|
||||
for scanner.Scan() {
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
b.Errorf("scanner error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
BenchmarkBackScanner
|
||||
BenchmarkBackScanner/2_KiB
|
||||
BenchmarkBackScanner/2_KiB-20 52 23254071 ns/op 67596663 B/op 26420 allocs/op
|
||||
BenchmarkBackScanner/4_KiB
|
||||
BenchmarkBackScanner/4_KiB-20 55 20961059 ns/op 62529378 B/op 13211 allocs/op
|
||||
BenchmarkBackScanner/8_KiB
|
||||
BenchmarkBackScanner/8_KiB-20 64 18242460 ns/op 62951141 B/op 6608 allocs/op
|
||||
BenchmarkBackScanner/16_KiB
|
||||
BenchmarkBackScanner/16_KiB-20 52 20162076 ns/op 62940256 B/op 3306 allocs/op
|
||||
BenchmarkBackScanner/32_KiB
|
||||
BenchmarkBackScanner/32_KiB-20 54 19247968 ns/op 67553645 B/op 1656 allocs/op
|
||||
BenchmarkBackScanner/64_KiB
|
||||
BenchmarkBackScanner/64_KiB-20 60 20909046 ns/op 64053342 B/op 827 allocs/op
|
||||
BenchmarkBackScanner/128_KiB
|
||||
BenchmarkBackScanner/128_KiB-20 68 17759890 ns/op 62201945 B/op 414 allocs/op
|
||||
BenchmarkBackScanner/256_KiB
|
||||
BenchmarkBackScanner/256_KiB-20 52 19531877 ns/op 61030487 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/512_KiB
|
||||
BenchmarkBackScanner/512_KiB-20 54 19124656 ns/op 61030485 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/1_MiB
|
||||
BenchmarkBackScanner/1_MiB-20 67 17078936 ns/op 61030495 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/2_MiB
|
||||
BenchmarkBackScanner/2_MiB-20 66 18467421 ns/op 61030492 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/4_MiB
|
||||
BenchmarkBackScanner/4_MiB-20 68 17214573 ns/op 61030486 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/8_MiB
|
||||
BenchmarkBackScanner/8_MiB-20 57 18235229 ns/op 61030492 B/op 208 allocs/op
|
||||
BenchmarkBackScanner/16_MiB
|
||||
BenchmarkBackScanner/16_MiB-20 57 19343441 ns/op 61030499 B/op 208 allocs/op
|
||||
*/
|
||||
71
internal/logging/accesslog/config.go
Normal file
71
internal/logging/accesslog/config.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type (
|
||||
Format string
|
||||
Filters struct {
|
||||
StatusCodes LogFilter[*StatusCodeRange] `json:"status_codes"`
|
||||
Method LogFilter[HTTPMethod] `json:"method"`
|
||||
Host LogFilter[Host] `json:"host"`
|
||||
Headers LogFilter[*HTTPHeader] `json:"headers"` // header exists or header == value
|
||||
CIDR LogFilter[*CIDR] `json:"cidr"`
|
||||
}
|
||||
Fields struct {
|
||||
Headers FieldConfig `json:"headers"`
|
||||
Query FieldConfig `json:"query"`
|
||||
Cookies FieldConfig `json:"cookies"`
|
||||
}
|
||||
Config struct {
|
||||
BufferSize int `json:"buffer_size"`
|
||||
Format Format `json:"format" validate:"oneof=common combined json"`
|
||||
Path string `json:"path"`
|
||||
Stdout bool `json:"stdout"`
|
||||
Filters Filters `json:"filters"`
|
||||
Fields Fields `json:"fields"`
|
||||
Retention *Retention `json:"retention"`
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
FormatCommon Format = "common"
|
||||
FormatCombined Format = "combined"
|
||||
FormatJSON Format = "json"
|
||||
|
||||
AvailableFormats = []Format{FormatCommon, FormatCombined, FormatJSON}
|
||||
)
|
||||
|
||||
const DefaultBufferSize = 64 * kilobyte // 64KB
|
||||
|
||||
func (cfg *Config) Validate() gperr.Error {
|
||||
if cfg.Path == "" && !cfg.Stdout {
|
||||
return gperr.New("path or stdout is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
BufferSize: DefaultBufferSize,
|
||||
Format: FormatCombined,
|
||||
Retention: &Retention{Days: 30},
|
||||
Fields: Fields{
|
||||
Headers: FieldConfig{
|
||||
Default: FieldModeDrop,
|
||||
},
|
||||
Query: FieldConfig{
|
||||
Default: FieldModeKeep,
|
||||
},
|
||||
Cookies: FieldConfig{
|
||||
Default: FieldModeDrop,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
utils.RegisterDefaultValueFactory(DefaultConfig)
|
||||
}
|
||||
53
internal/logging/accesslog/config_test.go
Normal file
53
internal/logging/accesslog/config_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/docker"
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"proxy.buffer_size": "10",
|
||||
"proxy.format": "combined",
|
||||
"proxy.path": "/tmp/access.log",
|
||||
"proxy.filters.status_codes.values": "200-299",
|
||||
"proxy.filters.method.values": "GET, POST",
|
||||
"proxy.filters.headers.values": "foo=bar, baz",
|
||||
"proxy.filters.headers.negative": "true",
|
||||
"proxy.filters.cidr.values": "192.168.10.0/24",
|
||||
"proxy.fields.headers.default": "keep",
|
||||
"proxy.fields.headers.config.foo": "redact",
|
||||
"proxy.fields.query.default": "drop",
|
||||
"proxy.fields.query.config.foo": "keep",
|
||||
"proxy.fields.cookies.default": "redact",
|
||||
"proxy.fields.cookies.config.foo": "keep",
|
||||
}
|
||||
parsed, err := docker.ParseLabels(labels)
|
||||
expect.NoError(t, err)
|
||||
|
||||
var config Config
|
||||
err = utils.Deserialize(parsed, &config)
|
||||
expect.NoError(t, err)
|
||||
|
||||
expect.Equal(t, config.BufferSize, 10)
|
||||
expect.Equal(t, config.Format, FormatCombined)
|
||||
expect.Equal(t, config.Path, "/tmp/access.log")
|
||||
expect.Equal(t, config.Filters.StatusCodes.Values, []*StatusCodeRange{{Start: 200, End: 299}})
|
||||
expect.Equal(t, len(config.Filters.Method.Values), 2)
|
||||
expect.Equal(t, config.Filters.Method.Values, []HTTPMethod{"GET", "POST"})
|
||||
expect.Equal(t, len(config.Filters.Headers.Values), 2)
|
||||
expect.Equal(t, config.Filters.Headers.Values, []*HTTPHeader{{Key: "foo", Value: "bar"}, {Key: "baz", Value: ""}})
|
||||
expect.True(t, config.Filters.Headers.Negative)
|
||||
expect.Equal(t, len(config.Filters.CIDR.Values), 1)
|
||||
expect.Equal(t, config.Filters.CIDR.Values[0].String(), "192.168.10.0/24")
|
||||
expect.Equal(t, config.Fields.Headers.Default, FieldModeKeep)
|
||||
expect.Equal(t, config.Fields.Headers.Config["foo"], FieldModeRedact)
|
||||
expect.Equal(t, config.Fields.Query.Default, FieldModeDrop)
|
||||
expect.Equal(t, config.Fields.Query.Config["foo"], FieldModeKeep)
|
||||
expect.Equal(t, config.Fields.Cookies.Default, FieldModeRedact)
|
||||
expect.Equal(t, config.Fields.Cookies.Config["foo"], FieldModeKeep)
|
||||
}
|
||||
204
internal/logging/accesslog/fields.go
Normal file
204
internal/logging/accesslog/fields.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type (
|
||||
FieldConfig struct {
|
||||
Default FieldMode `json:"default" validate:"oneof=keep drop redact"`
|
||||
Config map[string]FieldMode `json:"config" validate:"dive,oneof=keep drop redact"`
|
||||
}
|
||||
FieldMode string
|
||||
)
|
||||
|
||||
const (
|
||||
FieldModeKeep FieldMode = "keep"
|
||||
FieldModeDrop FieldMode = "drop"
|
||||
FieldModeRedact FieldMode = "redact"
|
||||
|
||||
RedactedValue = "REDACTED"
|
||||
)
|
||||
|
||||
type mapStringStringIter interface {
|
||||
Iter(yield func(k string, v []string) bool)
|
||||
MarshalZerologObject(e *zerolog.Event)
|
||||
}
|
||||
|
||||
type mapStringStringSlice struct {
|
||||
m map[string][]string
|
||||
}
|
||||
|
||||
func (m mapStringStringSlice) Iter(yield func(k string, v []string) bool) {
|
||||
for k, v := range m.m {
|
||||
if !yield(k, v) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m mapStringStringSlice) MarshalZerologObject(e *zerolog.Event) {
|
||||
for k, v := range m.m {
|
||||
e.Strs(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
type mapStringStringRedacted struct {
|
||||
m map[string][]string
|
||||
}
|
||||
|
||||
func (m mapStringStringRedacted) Iter(yield func(k string, v []string) bool) {
|
||||
for k := range m.m {
|
||||
if !yield(k, []string{RedactedValue}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m mapStringStringRedacted) MarshalZerologObject(e *zerolog.Event) {
|
||||
for k, v := range m.Iter {
|
||||
e.Strs(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
type mapStringStringSliceWithConfig struct {
|
||||
m map[string][]string
|
||||
cfg *FieldConfig
|
||||
}
|
||||
|
||||
func (m mapStringStringSliceWithConfig) Iter(yield func(k string, v []string) bool) {
|
||||
var mode FieldMode
|
||||
var ok bool
|
||||
for k, v := range m.m {
|
||||
if mode, ok = m.cfg.Config[k]; !ok {
|
||||
mode = m.cfg.Default
|
||||
}
|
||||
switch mode {
|
||||
case FieldModeKeep:
|
||||
if !yield(k, v) {
|
||||
return
|
||||
}
|
||||
case FieldModeRedact:
|
||||
if !yield(k, []string{RedactedValue}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m mapStringStringSliceWithConfig) MarshalZerologObject(e *zerolog.Event) {
|
||||
for k, v := range m.Iter {
|
||||
e.Strs(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
type mapStringStringDrop struct{}
|
||||
|
||||
func (m mapStringStringDrop) Iter(yield func(k string, v []string) bool) {}
|
||||
func (m mapStringStringDrop) MarshalZerologObject(e *zerolog.Event) {}
|
||||
|
||||
var mapStringStringDropIter mapStringStringIter = mapStringStringDrop{}
|
||||
|
||||
func mapIter[Map http.Header | url.Values](cfg *FieldConfig, m Map) mapStringStringIter {
|
||||
if len(cfg.Config) == 0 {
|
||||
switch cfg.Default {
|
||||
case FieldModeKeep:
|
||||
return mapStringStringSlice{m: m}
|
||||
case FieldModeDrop:
|
||||
return mapStringStringDropIter
|
||||
case FieldModeRedact:
|
||||
return mapStringStringRedacted{m: m}
|
||||
}
|
||||
}
|
||||
return mapStringStringSliceWithConfig{m: m, cfg: cfg}
|
||||
}
|
||||
|
||||
type slice[V any] struct {
|
||||
s []V
|
||||
getKey func(V) string
|
||||
getVal func(V) string
|
||||
cfg *FieldConfig
|
||||
}
|
||||
|
||||
type sliceIter interface {
|
||||
Iter(yield func(k string, v string) bool)
|
||||
MarshalZerologObject(e *zerolog.Event)
|
||||
}
|
||||
|
||||
func (s *slice[V]) Iter(yield func(k string, v string) bool) {
|
||||
for _, v := range s.s {
|
||||
k := s.getKey(v)
|
||||
var mode FieldMode
|
||||
var ok bool
|
||||
if mode, ok = s.cfg.Config[k]; !ok {
|
||||
mode = s.cfg.Default
|
||||
}
|
||||
switch mode {
|
||||
case FieldModeKeep:
|
||||
if !yield(k, s.getVal(v)) {
|
||||
return
|
||||
}
|
||||
case FieldModeRedact:
|
||||
if !yield(k, RedactedValue) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sliceDrop struct{}
|
||||
|
||||
func (s sliceDrop) Iter(yield func(k string, v string) bool) {}
|
||||
func (s sliceDrop) MarshalZerologObject(e *zerolog.Event) {}
|
||||
|
||||
var sliceDropIter sliceIter = sliceDrop{}
|
||||
|
||||
func (s *slice[V]) MarshalZerologObject(e *zerolog.Event) {
|
||||
for k, v := range s.Iter {
|
||||
e.Str(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func iterSlice[V any](cfg *FieldConfig, s []V, getKey func(V) string, getVal func(V) string) sliceIter {
|
||||
if len(s) == 0 ||
|
||||
len(cfg.Config) == 0 && cfg.Default == FieldModeDrop {
|
||||
return sliceDropIter
|
||||
}
|
||||
return &slice[V]{s: s, getKey: getKey, getVal: getVal, cfg: cfg}
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) IterHeaders(headers http.Header) iter.Seq2[string, []string] {
|
||||
return mapIter(cfg, headers).Iter
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) ZerologHeaders(headers http.Header) zerolog.LogObjectMarshaler {
|
||||
return mapIter(cfg, headers)
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) IterQuery(q url.Values) iter.Seq2[string, []string] {
|
||||
return mapIter(cfg, q).Iter
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) ZerologQuery(q url.Values) zerolog.LogObjectMarshaler {
|
||||
return mapIter(cfg, q)
|
||||
}
|
||||
|
||||
func cookieGetKey(c *http.Cookie) string {
|
||||
return c.Name
|
||||
}
|
||||
|
||||
func cookieGetValue(c *http.Cookie) string {
|
||||
return c.Value
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) IterCookies(cookies []*http.Cookie) iter.Seq2[string, string] {
|
||||
return iterSlice(cfg, cookies, cookieGetKey, cookieGetValue).Iter
|
||||
}
|
||||
|
||||
func (cfg *FieldConfig) ZerologCookies(cookies []*http.Cookie) zerolog.LogObjectMarshaler {
|
||||
return iterSlice(cfg, cookies, cookieGetKey, cookieGetValue)
|
||||
}
|
||||
93
internal/logging/accesslog/fields_test.go
Normal file
93
internal/logging/accesslog/fields_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
// Cookie header should be removed,
|
||||
// stored in JSONLogEntry.Cookies instead.
|
||||
func TestAccessLoggerJSONKeepHeaders(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Headers.Default = FieldModeKeep
|
||||
entry := getJSONEntry(t, config)
|
||||
for k, v := range req.Header {
|
||||
if k != "Cookie" {
|
||||
expect.Equal(t, entry.Headers[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
config.Fields.Headers.Config = map[string]FieldMode{
|
||||
"Referer": FieldModeRedact,
|
||||
"User-Agent": FieldModeDrop,
|
||||
}
|
||||
entry = getJSONEntry(t, config)
|
||||
expect.Equal(t, entry.Headers["Referer"], []string{RedactedValue})
|
||||
expect.Equal(t, entry.Headers["User-Agent"], nil)
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONDropHeaders(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Headers.Default = FieldModeDrop
|
||||
entry := getJSONEntry(t, config)
|
||||
for k := range req.Header {
|
||||
expect.Equal(t, entry.Headers[k], nil)
|
||||
}
|
||||
|
||||
config.Fields.Headers.Config = map[string]FieldMode{
|
||||
"Referer": FieldModeKeep,
|
||||
"User-Agent": FieldModeRedact,
|
||||
}
|
||||
entry = getJSONEntry(t, config)
|
||||
expect.Equal(t, entry.Headers["Referer"], []string{req.Header.Get("Referer")})
|
||||
expect.Equal(t, entry.Headers["User-Agent"], []string{RedactedValue})
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONRedactHeaders(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Headers.Default = FieldModeRedact
|
||||
entry := getJSONEntry(t, config)
|
||||
for k := range req.Header {
|
||||
if k != "Cookie" {
|
||||
expect.Equal(t, entry.Headers[k], []string{RedactedValue})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONKeepCookies(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Headers.Default = FieldModeKeep
|
||||
config.Fields.Cookies.Default = FieldModeKeep
|
||||
entry := getJSONEntry(t, config)
|
||||
for _, cookie := range req.Cookies() {
|
||||
expect.Equal(t, entry.Cookies[cookie.Name], cookie.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONRedactCookies(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Headers.Default = FieldModeKeep
|
||||
config.Fields.Cookies.Default = FieldModeRedact
|
||||
entry := getJSONEntry(t, config)
|
||||
for _, cookie := range req.Cookies() {
|
||||
expect.Equal(t, entry.Cookies[cookie.Name], RedactedValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONDropQuery(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Query.Default = FieldModeDrop
|
||||
entry := getJSONEntry(t, config)
|
||||
expect.Equal(t, entry.Query["foo"], nil)
|
||||
expect.Equal(t, entry.Query["bar"], nil)
|
||||
}
|
||||
|
||||
func TestAccessLoggerJSONRedactQuery(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Fields.Query.Default = FieldModeRedact
|
||||
entry := getJSONEntry(t, config)
|
||||
expect.Equal(t, entry.Query["foo"], []string{RedactedValue})
|
||||
expect.Equal(t, entry.Query["bar"], []string{RedactedValue})
|
||||
}
|
||||
68
internal/logging/accesslog/file_logger.go
Normal file
68
internal/logging/accesslog/file_logger.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
pathPkg "path"
|
||||
"sync"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
*os.File
|
||||
sync.Mutex
|
||||
|
||||
// os.File.Name() may not equal to key of `openedFiles`.
|
||||
// Store it for later delete from `openedFiles`.
|
||||
path string
|
||||
|
||||
refCount *utils.RefCount
|
||||
}
|
||||
|
||||
var (
|
||||
openedFiles = make(map[string]*File)
|
||||
openedFilesMu sync.Mutex
|
||||
)
|
||||
|
||||
func newFileIO(path string) (AccessLogIO, error) {
|
||||
openedFilesMu.Lock()
|
||||
|
||||
var file *File
|
||||
path = pathPkg.Clean(path)
|
||||
if opened, ok := openedFiles[path]; ok {
|
||||
opened.refCount.Add()
|
||||
file = opened
|
||||
} else {
|
||||
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
openedFilesMu.Unlock()
|
||||
return nil, fmt.Errorf("access log open error: %w", err)
|
||||
}
|
||||
file = &File{File: f, path: path, refCount: utils.NewRefCounter()}
|
||||
openedFiles[path] = file
|
||||
go file.closeOnZero()
|
||||
}
|
||||
|
||||
openedFilesMu.Unlock()
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (f *File) Close() error {
|
||||
f.refCount.Sub()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) closeOnZero() {
|
||||
defer logging.Debug().
|
||||
Str("path", f.path).
|
||||
Msg("access log closed")
|
||||
|
||||
<-f.refCount.Zero()
|
||||
|
||||
openedFilesMu.Lock()
|
||||
delete(openedFiles, f.path)
|
||||
openedFilesMu.Unlock()
|
||||
f.File.Close()
|
||||
}
|
||||
94
internal/logging/accesslog/file_logger_test.go
Normal file
94
internal/logging/accesslog/file_logger_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Path = "test.log"
|
||||
|
||||
loggerCount := 10
|
||||
accessLogIOs := make([]AccessLogIO, loggerCount)
|
||||
|
||||
// make test log file
|
||||
file, err := os.Create(cfg.Path)
|
||||
expect.NoError(t, err)
|
||||
file.Close()
|
||||
t.Cleanup(func() {
|
||||
expect.NoError(t, os.Remove(cfg.Path))
|
||||
})
|
||||
|
||||
for i := range loggerCount {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
file, err := newFileIO(cfg.Path)
|
||||
expect.NoError(t, err)
|
||||
accessLogIOs[index] = file
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
firstIO := accessLogIOs[0]
|
||||
for _, io := range accessLogIOs {
|
||||
expect.Equal(t, io, firstIO)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentAccessLoggerLogAndFlush(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.BufferSize = 1024
|
||||
parent := task.RootTask("test", false)
|
||||
|
||||
loggerCount := 5
|
||||
logCountPerLogger := 10
|
||||
loggers := make([]*AccessLogger, loggerCount)
|
||||
|
||||
for i := range loggerCount {
|
||||
loggers[i] = NewAccessLoggerWithIO(parent, file, cfg)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
resp := &http.Response{StatusCode: http.StatusOK}
|
||||
|
||||
wg.Add(len(loggers))
|
||||
for _, logger := range loggers {
|
||||
go func(l *AccessLogger) {
|
||||
defer wg.Done()
|
||||
parallelLog(l, req, resp, logCountPerLogger)
|
||||
l.Flush()
|
||||
}(logger)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
expected := loggerCount * logCountPerLogger
|
||||
actual := file.NumLines()
|
||||
expect.Equal(t, actual, expected)
|
||||
}
|
||||
|
||||
func parallelLog(logger *AccessLogger, req *http.Request, resp *http.Response, n int) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(n)
|
||||
for range n {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
logger.Log(req, resp)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
101
internal/logging/accesslog/filter.go
Normal file
101
internal/logging/accesslog/filter.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
gpnet "github.com/yusing/go-proxy/internal/net/types"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
type (
|
||||
LogFilter[T Filterable] struct {
|
||||
Negative bool
|
||||
Values []T
|
||||
}
|
||||
Filterable interface {
|
||||
comparable
|
||||
Fulfill(req *http.Request, res *http.Response) bool
|
||||
}
|
||||
HTTPMethod string
|
||||
HTTPHeader struct {
|
||||
Key, Value string
|
||||
}
|
||||
Host string
|
||||
CIDR struct {
|
||||
gpnet.CIDR
|
||||
}
|
||||
)
|
||||
|
||||
var ErrInvalidHTTPHeaderFilter = gperr.New("invalid http header filter")
|
||||
|
||||
func (f *LogFilter[T]) CheckKeep(req *http.Request, res *http.Response) bool {
|
||||
if len(f.Values) == 0 {
|
||||
return !f.Negative
|
||||
}
|
||||
for _, check := range f.Values {
|
||||
if check.Fulfill(req, res) {
|
||||
return !f.Negative
|
||||
}
|
||||
}
|
||||
return f.Negative
|
||||
}
|
||||
|
||||
func (r *StatusCodeRange) Fulfill(req *http.Request, res *http.Response) bool {
|
||||
return r.Includes(res.StatusCode)
|
||||
}
|
||||
|
||||
func (method HTTPMethod) Fulfill(req *http.Request, res *http.Response) bool {
|
||||
return req.Method == string(method)
|
||||
}
|
||||
|
||||
// Parse implements strutils.Parser.
|
||||
func (k *HTTPHeader) Parse(v string) error {
|
||||
split := strutils.SplitRune(v, '=')
|
||||
switch len(split) {
|
||||
case 1:
|
||||
split = append(split, "")
|
||||
case 2:
|
||||
default:
|
||||
return ErrInvalidHTTPHeaderFilter.Subject(v)
|
||||
}
|
||||
k.Key = split[0]
|
||||
k.Value = split[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *HTTPHeader) Fulfill(req *http.Request, res *http.Response) bool {
|
||||
wanted := k.Value
|
||||
// non canonical key matching
|
||||
got, ok := req.Header[k.Key]
|
||||
if wanted == "" {
|
||||
return ok
|
||||
}
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, v := range got {
|
||||
if strings.EqualFold(v, wanted) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h Host) Fulfill(req *http.Request, res *http.Response) bool {
|
||||
return req.Host == string(h)
|
||||
}
|
||||
|
||||
func (cidr *CIDR) Fulfill(req *http.Request, res *http.Response) bool {
|
||||
ip, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
if err != nil {
|
||||
ip = req.RemoteAddr
|
||||
}
|
||||
netIP := net.ParseIP(ip)
|
||||
if netIP == nil {
|
||||
return false
|
||||
}
|
||||
return cidr.Contains(netIP)
|
||||
}
|
||||
191
internal/logging/accesslog/filter_test.go
Normal file
191
internal/logging/accesslog/filter_test.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
gpnet "github.com/yusing/go-proxy/internal/net/types"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
func TestStatusCodeFilter(t *testing.T) {
|
||||
values := []*StatusCodeRange{
|
||||
strutils.MustParse[*StatusCodeRange]("200-308"),
|
||||
}
|
||||
t.Run("positive", func(t *testing.T) {
|
||||
filter := &LogFilter[*StatusCodeRange]{}
|
||||
expect.True(t, filter.CheckKeep(nil, nil))
|
||||
|
||||
// keep any 2xx 3xx (inclusive)
|
||||
filter.Values = values
|
||||
expect.False(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusForbidden,
|
||||
}))
|
||||
expect.True(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}))
|
||||
expect.True(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusMultipleChoices,
|
||||
}))
|
||||
expect.True(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusPermanentRedirect,
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("negative", func(t *testing.T) {
|
||||
filter := &LogFilter[*StatusCodeRange]{
|
||||
Negative: true,
|
||||
}
|
||||
expect.False(t, filter.CheckKeep(nil, nil))
|
||||
|
||||
// drop any 2xx 3xx (inclusive)
|
||||
filter.Values = values
|
||||
expect.True(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusForbidden,
|
||||
}))
|
||||
expect.False(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}))
|
||||
expect.False(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusMultipleChoices,
|
||||
}))
|
||||
expect.False(t, filter.CheckKeep(nil, &http.Response{
|
||||
StatusCode: http.StatusPermanentRedirect,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMethodFilter(t *testing.T) {
|
||||
t.Run("positive", func(t *testing.T) {
|
||||
filter := &LogFilter[HTTPMethod]{}
|
||||
expect.True(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
}, nil))
|
||||
expect.True(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodPost,
|
||||
}, nil))
|
||||
|
||||
// keep get only
|
||||
filter.Values = []HTTPMethod{http.MethodGet}
|
||||
expect.True(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
}, nil))
|
||||
expect.False(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodPost,
|
||||
}, nil))
|
||||
})
|
||||
|
||||
t.Run("negative", func(t *testing.T) {
|
||||
filter := &LogFilter[HTTPMethod]{
|
||||
Negative: true,
|
||||
}
|
||||
expect.False(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
}, nil))
|
||||
expect.False(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodPost,
|
||||
}, nil))
|
||||
|
||||
// drop post only
|
||||
filter.Values = []HTTPMethod{http.MethodPost}
|
||||
expect.False(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodPost,
|
||||
}, nil))
|
||||
expect.True(t, filter.CheckKeep(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
}, nil))
|
||||
})
|
||||
}
|
||||
|
||||
func TestHeaderFilter(t *testing.T) {
|
||||
fooBar := &http.Request{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"bar"},
|
||||
},
|
||||
}
|
||||
fooBaz := &http.Request{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"baz"},
|
||||
},
|
||||
}
|
||||
headerFoo := []*HTTPHeader{
|
||||
strutils.MustParse[*HTTPHeader]("Foo"),
|
||||
}
|
||||
expect.Equal(t, headerFoo[0].Key, "Foo")
|
||||
expect.Equal(t, headerFoo[0].Value, "")
|
||||
headerFooBar := []*HTTPHeader{
|
||||
strutils.MustParse[*HTTPHeader]("Foo=bar"),
|
||||
}
|
||||
expect.Equal(t, headerFooBar[0].Key, "Foo")
|
||||
expect.Equal(t, headerFooBar[0].Value, "bar")
|
||||
|
||||
t.Run("positive", func(t *testing.T) {
|
||||
filter := &LogFilter[*HTTPHeader]{}
|
||||
expect.True(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.True(t, filter.CheckKeep(fooBaz, nil))
|
||||
|
||||
// keep any foo
|
||||
filter.Values = headerFoo
|
||||
expect.True(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.True(t, filter.CheckKeep(fooBaz, nil))
|
||||
|
||||
// keep foo == bar
|
||||
filter.Values = headerFooBar
|
||||
expect.True(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.False(t, filter.CheckKeep(fooBaz, nil))
|
||||
})
|
||||
t.Run("negative", func(t *testing.T) {
|
||||
filter := &LogFilter[*HTTPHeader]{
|
||||
Negative: true,
|
||||
}
|
||||
expect.False(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.False(t, filter.CheckKeep(fooBaz, nil))
|
||||
|
||||
// drop any foo
|
||||
filter.Values = headerFoo
|
||||
expect.False(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.False(t, filter.CheckKeep(fooBaz, nil))
|
||||
|
||||
// drop foo == bar
|
||||
filter.Values = headerFooBar
|
||||
expect.False(t, filter.CheckKeep(fooBar, nil))
|
||||
expect.True(t, filter.CheckKeep(fooBaz, nil))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCIDRFilter(t *testing.T) {
|
||||
cidr := []*CIDR{{gpnet.CIDR{
|
||||
IP: net.ParseIP("192.168.10.0"),
|
||||
Mask: net.CIDRMask(24, 32),
|
||||
}}}
|
||||
expect.Equal(t, cidr[0].String(), "192.168.10.0/24")
|
||||
inCIDR := &http.Request{
|
||||
RemoteAddr: "192.168.10.1",
|
||||
}
|
||||
notInCIDR := &http.Request{
|
||||
RemoteAddr: "192.168.11.1",
|
||||
}
|
||||
|
||||
t.Run("positive", func(t *testing.T) {
|
||||
filter := &LogFilter[*CIDR]{}
|
||||
expect.True(t, filter.CheckKeep(inCIDR, nil))
|
||||
expect.True(t, filter.CheckKeep(notInCIDR, nil))
|
||||
|
||||
filter.Values = cidr
|
||||
expect.True(t, filter.CheckKeep(inCIDR, nil))
|
||||
expect.False(t, filter.CheckKeep(notInCIDR, nil))
|
||||
})
|
||||
|
||||
t.Run("negative", func(t *testing.T) {
|
||||
filter := &LogFilter[*CIDR]{Negative: true}
|
||||
expect.False(t, filter.CheckKeep(inCIDR, nil))
|
||||
expect.False(t, filter.CheckKeep(notInCIDR, nil))
|
||||
|
||||
filter.Values = cidr
|
||||
expect.False(t, filter.CheckKeep(inCIDR, nil))
|
||||
expect.True(t, filter.CheckKeep(notInCIDR, nil))
|
||||
})
|
||||
}
|
||||
157
internal/logging/accesslog/formatter.go
Normal file
157
internal/logging/accesslog/formatter.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"iter"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type (
|
||||
CommonFormatter struct {
|
||||
cfg *Fields
|
||||
}
|
||||
CombinedFormatter struct{ CommonFormatter }
|
||||
JSONFormatter struct{ CommonFormatter }
|
||||
)
|
||||
|
||||
const LogTimeFormat = "02/Jan/2006:15:04:05 -0700"
|
||||
|
||||
func scheme(req *http.Request) string {
|
||||
if req.TLS != nil {
|
||||
return "https"
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func appendRequestURI(line []byte, req *http.Request, query iter.Seq2[string, []string]) []byte {
|
||||
uri := req.URL.EscapedPath()
|
||||
line = append(line, uri...)
|
||||
isFirst := true
|
||||
for k, v := range query {
|
||||
if isFirst {
|
||||
line = append(line, '?')
|
||||
isFirst = false
|
||||
} else {
|
||||
line = append(line, '&')
|
||||
}
|
||||
line = append(line, k...)
|
||||
line = append(line, '=')
|
||||
for _, v := range v {
|
||||
line = append(line, v...)
|
||||
}
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
func clientIP(req *http.Request) string {
|
||||
clientIP, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
if err == nil {
|
||||
return clientIP
|
||||
}
|
||||
return req.RemoteAddr
|
||||
}
|
||||
|
||||
func (f *CommonFormatter) AppendLog(line []byte, req *http.Request, res *http.Response) []byte {
|
||||
query := f.cfg.Query.IterQuery(req.URL.Query())
|
||||
|
||||
line = append(line, req.Host...)
|
||||
line = append(line, ' ')
|
||||
|
||||
line = append(line, clientIP(req)...)
|
||||
line = append(line, " - - ["...)
|
||||
|
||||
line = utils.TimeNow().AppendFormat(line, LogTimeFormat)
|
||||
line = append(line, `] "`...)
|
||||
|
||||
line = append(line, req.Method...)
|
||||
line = append(line, ' ')
|
||||
line = appendRequestURI(line, req, query)
|
||||
line = append(line, ' ')
|
||||
line = append(line, req.Proto...)
|
||||
line = append(line, '"')
|
||||
line = append(line, ' ')
|
||||
|
||||
line = strconv.AppendInt(line, int64(res.StatusCode), 10)
|
||||
line = append(line, ' ')
|
||||
line = strconv.AppendInt(line, res.ContentLength, 10)
|
||||
return line
|
||||
}
|
||||
|
||||
func (f *CombinedFormatter) AppendLog(line []byte, req *http.Request, res *http.Response) []byte {
|
||||
line = f.CommonFormatter.AppendLog(line, req, res)
|
||||
line = append(line, " \""...)
|
||||
line = append(line, req.Referer()...)
|
||||
line = append(line, "\" \""...)
|
||||
line = append(line, req.UserAgent()...)
|
||||
line = append(line, '"')
|
||||
return line
|
||||
}
|
||||
|
||||
type zeroLogStringStringMapMarshaler struct {
|
||||
values map[string]string
|
||||
}
|
||||
|
||||
func (z *zeroLogStringStringMapMarshaler) MarshalZerologObject(e *zerolog.Event) {
|
||||
if len(z.values) == 0 {
|
||||
return
|
||||
}
|
||||
for k, v := range z.values {
|
||||
e.Str(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
type zeroLogStringStringSliceMapMarshaler struct {
|
||||
values map[string][]string
|
||||
}
|
||||
|
||||
func (z *zeroLogStringStringSliceMapMarshaler) MarshalZerologObject(e *zerolog.Event) {
|
||||
if len(z.values) == 0 {
|
||||
return
|
||||
}
|
||||
for k, v := range z.values {
|
||||
e.Strs(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *JSONFormatter) AppendLog(line []byte, req *http.Request, res *http.Response) []byte {
|
||||
query := f.cfg.Query.ZerologQuery(req.URL.Query())
|
||||
headers := f.cfg.Headers.ZerologHeaders(req.Header)
|
||||
cookies := f.cfg.Cookies.ZerologCookies(req.Cookies())
|
||||
contentType := res.Header.Get("Content-Type")
|
||||
|
||||
writer := bytes.NewBuffer(line)
|
||||
logger := zerolog.New(writer).With().Logger()
|
||||
event := logger.Info().
|
||||
Str("time", utils.TimeNow().Format(LogTimeFormat)).
|
||||
Str("ip", clientIP(req)).
|
||||
Str("method", req.Method).
|
||||
Str("scheme", scheme(req)).
|
||||
Str("host", req.Host).
|
||||
Str("path", req.URL.Path).
|
||||
Str("protocol", req.Proto).
|
||||
Int("status", res.StatusCode).
|
||||
Str("type", contentType).
|
||||
Int64("size", res.ContentLength).
|
||||
Str("referer", req.Referer()).
|
||||
Str("useragent", req.UserAgent()).
|
||||
Object("query", query).
|
||||
Object("headers", headers).
|
||||
Object("cookies", cookies)
|
||||
|
||||
if res.StatusCode >= 400 {
|
||||
if res.Status != "" {
|
||||
event.Str("error", res.Status)
|
||||
} else {
|
||||
event.Str("error", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: zerolog will append a newline to the buffer
|
||||
event.Send()
|
||||
return writer.Bytes()
|
||||
}
|
||||
49
internal/logging/accesslog/mock_file.go
Normal file
49
internal/logging/accesslog/mock_file.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
type noLock struct{}
|
||||
|
||||
func (noLock) Lock() {}
|
||||
func (noLock) Unlock() {}
|
||||
|
||||
type MockFile struct {
|
||||
afero.File
|
||||
noLock
|
||||
}
|
||||
|
||||
func NewMockFile() *MockFile {
|
||||
f, _ := afero.TempFile(afero.NewMemMapFs(), "", "")
|
||||
return &MockFile{
|
||||
File: f,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockFile) Len() int64 {
|
||||
filesize, _ := m.Seek(0, io.SeekEnd)
|
||||
_, _ = m.Seek(0, io.SeekStart)
|
||||
return filesize
|
||||
}
|
||||
|
||||
func (m *MockFile) Content() []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
m.Seek(0, io.SeekStart)
|
||||
_, _ = buf.ReadFrom(m.File)
|
||||
m.Seek(0, io.SeekStart)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (m *MockFile) NumLines() int {
|
||||
content := m.Content()
|
||||
count := bytes.Count(content, []byte("\n"))
|
||||
// account for last line if it does not end with a newline
|
||||
if len(content) > 0 && content[len(content)-1] != '\n' {
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
46
internal/logging/accesslog/multi_writer.go
Normal file
46
internal/logging/accesslog/multi_writer.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package accesslog
|
||||
|
||||
import "strings"
|
||||
|
||||
type MultiWriter struct {
|
||||
writers []AccessLogIO
|
||||
}
|
||||
|
||||
func NewMultiWriter(writers ...AccessLogIO) AccessLogIO {
|
||||
if len(writers) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(writers) == 1 {
|
||||
return writers[0]
|
||||
}
|
||||
return &MultiWriter{
|
||||
writers: writers,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Write(p []byte) (n int, err error) {
|
||||
for _, writer := range w.writers {
|
||||
writer.Write(p)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Lock() {
|
||||
for _, writer := range w.writers {
|
||||
writer.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Unlock() {
|
||||
for _, writer := range w.writers {
|
||||
writer.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Name() string {
|
||||
names := make([]string, len(w.writers))
|
||||
for i, writer := range w.writers {
|
||||
names[i] = writer.Name()
|
||||
}
|
||||
return strings.Join(names, ", ")
|
||||
}
|
||||
94
internal/logging/accesslog/retention.go
Normal file
94
internal/logging/accesslog/retention.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
type Retention struct {
|
||||
Days uint64 `json:"days"`
|
||||
Last uint64 `json:"last"`
|
||||
KeepSize uint64 `json:"keep_size"`
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidSyntax = gperr.New("invalid syntax")
|
||||
ErrZeroValue = gperr.New("zero value")
|
||||
)
|
||||
|
||||
// see back_scanner_test.go#L210 for benchmarks
|
||||
var defaultChunkSize = 256 * kilobyte
|
||||
|
||||
// Syntax:
|
||||
//
|
||||
// <N> days|weeks|months
|
||||
//
|
||||
// last <N>
|
||||
//
|
||||
// <N> KB|MB|GB|kb|mb|gb
|
||||
//
|
||||
// Parse implements strutils.Parser.
|
||||
func (r *Retention) Parse(v string) (err error) {
|
||||
split := strutils.SplitSpace(v)
|
||||
if len(split) != 2 {
|
||||
return ErrInvalidSyntax.Subject(v)
|
||||
}
|
||||
switch split[0] {
|
||||
case "last":
|
||||
r.Last, err = strconv.ParseUint(split[1], 10, 64)
|
||||
default: // <N> days|weeks|months
|
||||
n, err := strconv.ParseUint(split[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch split[1] {
|
||||
case "day", "days":
|
||||
r.Days = n
|
||||
case "week", "weeks":
|
||||
r.Days = n * 7
|
||||
case "month", "months":
|
||||
r.Days = n * 30
|
||||
case "kb", "Kb":
|
||||
r.KeepSize = n * kilobits
|
||||
case "KB":
|
||||
r.KeepSize = n * kilobyte
|
||||
case "mb", "Mb":
|
||||
r.KeepSize = n * megabits
|
||||
case "MB":
|
||||
r.KeepSize = n * megabyte
|
||||
case "gb", "Gb":
|
||||
r.KeepSize = n * gigabits
|
||||
case "GB":
|
||||
r.KeepSize = n * gigabyte
|
||||
default:
|
||||
return ErrInvalidSyntax.Subject("unit " + split[1])
|
||||
}
|
||||
}
|
||||
if !r.IsValid() {
|
||||
return ErrZeroValue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Retention) String() string {
|
||||
if r.Days > 0 {
|
||||
return fmt.Sprintf("%d days", r.Days)
|
||||
}
|
||||
if r.Last > 0 {
|
||||
return fmt.Sprintf("last %d", r.Last)
|
||||
}
|
||||
if r.KeepSize > 0 {
|
||||
return strutils.FormatByteSize(r.KeepSize)
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
|
||||
func (r *Retention) IsValid() bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
return r.Days > 0 || r.Last > 0 || r.KeepSize > 0
|
||||
}
|
||||
33
internal/logging/accesslog/retention_test.go
Normal file
33
internal/logging/accesslog/retention_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
func TestParseRetention(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected *Retention
|
||||
shouldErr bool
|
||||
}{
|
||||
{"30 days", &Retention{Days: 30}, false},
|
||||
{"2 weeks", &Retention{Days: 14}, false},
|
||||
{"last 5", &Retention{Last: 5}, false},
|
||||
{"invalid input", &Retention{}, true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
r := &Retention{}
|
||||
err := r.Parse(test.input)
|
||||
if !test.shouldErr {
|
||||
expect.NoError(t, err)
|
||||
} else {
|
||||
expect.Equal(t, r, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
256
internal/logging/accesslog/rotate.go
Normal file
256
internal/logging/accesslog/rotate.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
"github.com/yusing/go-proxy/internal/utils/synk"
|
||||
)
|
||||
|
||||
type supportRotate interface {
|
||||
io.Reader
|
||||
io.Writer
|
||||
io.Seeker
|
||||
io.ReaderAt
|
||||
io.WriterAt
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
type RotateResult struct {
|
||||
Filename string
|
||||
OriginalSize int64 // original size of the file
|
||||
NumBytesRead int64 // number of bytes read from the file
|
||||
NumBytesKeep int64 // number of bytes to keep
|
||||
NumLinesRead int // number of lines read from the file
|
||||
NumLinesKeep int // number of lines to keep
|
||||
NumLinesInvalid int // number of invalid lines
|
||||
}
|
||||
|
||||
func (r *RotateResult) Print(logger *zerolog.Logger) {
|
||||
logger.Info().
|
||||
Str("original_size", strutils.FormatByteSize(r.OriginalSize)).
|
||||
Str("bytes_read", strutils.FormatByteSize(r.NumBytesRead)).
|
||||
Str("bytes_keep", strutils.FormatByteSize(r.NumBytesKeep)).
|
||||
Int("lines_read", r.NumLinesRead).
|
||||
Int("lines_keep", r.NumLinesKeep).
|
||||
Int("lines_invalid", r.NumLinesInvalid).
|
||||
Msg("log rotate result")
|
||||
}
|
||||
|
||||
type lineInfo struct {
|
||||
Pos int64 // Position from the start of the file
|
||||
Size int64 // Size of this line
|
||||
}
|
||||
|
||||
// do not allocate initial size
|
||||
var rotateBytePool = synk.NewBytesPool(0, 16*1024*1024)
|
||||
|
||||
// rotateLogFile rotates the log file based on the retention policy.
|
||||
// It returns the result of the rotation and an error if any.
|
||||
//
|
||||
// The file is rotated by reading the file backward line-by-line
|
||||
// and stop once error occurs or found a line that should not be kept.
|
||||
//
|
||||
// Any invalid lines will be skipped and not included in the result.
|
||||
//
|
||||
// If the file does not need to be rotated, it returns nil, nil.
|
||||
func rotateLogFile(file supportRotate, config *Retention) (result *RotateResult, err error) {
|
||||
if config.KeepSize > 0 {
|
||||
return rotateLogFileBySize(file, config)
|
||||
}
|
||||
|
||||
var shouldStop func() bool
|
||||
t := utils.TimeNow()
|
||||
|
||||
if config.Last > 0 {
|
||||
shouldStop = func() bool { return result.NumLinesKeep-result.NumLinesInvalid == int(config.Last) }
|
||||
// not needed to parse time for last N lines
|
||||
} else if config.Days > 0 {
|
||||
cutoff := utils.TimeNow().AddDate(0, 0, -int(config.Days)+1)
|
||||
shouldStop = func() bool { return t.Before(cutoff) }
|
||||
} else {
|
||||
return nil, nil // should not happen
|
||||
}
|
||||
|
||||
s := NewBackScanner(file, defaultChunkSize)
|
||||
result = &RotateResult{
|
||||
OriginalSize: s.FileSize(),
|
||||
}
|
||||
|
||||
// nothing to rotate, return the nothing
|
||||
if result.OriginalSize == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Store the line positions and sizes we want to keep
|
||||
linesToKeep := make([]lineInfo, 0)
|
||||
lastLineValid := false
|
||||
|
||||
for s.Scan() {
|
||||
result.NumLinesRead++
|
||||
lineSize := int64(len(s.Bytes()) + 1) // +1 for newline
|
||||
linePos := result.OriginalSize - result.NumBytesRead - lineSize
|
||||
result.NumBytesRead += lineSize
|
||||
|
||||
// Check if line has valid time
|
||||
t = ParseLogTime(s.Bytes())
|
||||
if t.IsZero() {
|
||||
result.NumLinesInvalid++
|
||||
lastLineValid = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we should stop based on retention policy
|
||||
if shouldStop() {
|
||||
break
|
||||
}
|
||||
|
||||
// Add line to those we want to keep
|
||||
if lastLineValid {
|
||||
last := linesToKeep[len(linesToKeep)-1]
|
||||
linesToKeep[len(linesToKeep)-1] = lineInfo{
|
||||
Pos: last.Pos - lineSize,
|
||||
Size: last.Size + lineSize,
|
||||
}
|
||||
} else {
|
||||
linesToKeep = append(linesToKeep, lineInfo{
|
||||
Pos: linePos,
|
||||
Size: lineSize,
|
||||
})
|
||||
}
|
||||
result.NumBytesKeep += lineSize
|
||||
result.NumLinesKeep++
|
||||
lastLineValid = true
|
||||
}
|
||||
|
||||
if s.Err() != nil {
|
||||
return nil, s.Err()
|
||||
}
|
||||
|
||||
// nothing to keep, truncate to empty
|
||||
if len(linesToKeep) == 0 {
|
||||
return nil, file.Truncate(0)
|
||||
}
|
||||
|
||||
// nothing to rotate, return nothing
|
||||
if result.NumBytesKeep == result.OriginalSize {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read each line and write it to the beginning of the file
|
||||
writePos := int64(0)
|
||||
buf := rotateBytePool.Get()
|
||||
defer rotateBytePool.Put(buf)
|
||||
|
||||
// in reverse order to keep the order of the lines (from old to new)
|
||||
for i := len(linesToKeep) - 1; i >= 0; i-- {
|
||||
line := linesToKeep[i]
|
||||
n := line.Size
|
||||
if cap(buf) < int(n) {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// Read the line from its original position
|
||||
if _, err := file.ReadAt(buf, line.Pos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write it to the new position
|
||||
if _, err := file.WriteAt(buf, writePos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
writePos += n
|
||||
}
|
||||
|
||||
if err := file.Truncate(writePos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// rotateLogFileBySize rotates the log file by size.
|
||||
// It returns the result of the rotation and an error if any.
|
||||
//
|
||||
// The file is not being read, it just truncate the file to the new size.
|
||||
//
|
||||
// Invalid lines will not be detected and included in the result.
|
||||
func rotateLogFileBySize(file supportRotate, config *Retention) (result *RotateResult, err error) {
|
||||
filesize, err := file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = &RotateResult{
|
||||
OriginalSize: filesize,
|
||||
}
|
||||
|
||||
keepSize := int64(config.KeepSize)
|
||||
if keepSize >= filesize {
|
||||
result.NumBytesKeep = filesize
|
||||
return result, nil
|
||||
}
|
||||
result.NumBytesKeep = keepSize
|
||||
|
||||
err = file.Truncate(keepSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ParseLogTime parses the time from the log line.
|
||||
// It returns the time if the time is found and valid in the log line,
|
||||
// otherwise it returns zero time.
|
||||
func ParseLogTime(line []byte) (t time.Time) {
|
||||
if len(line) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if timeStr := ExtractTime(line); timeStr != nil {
|
||||
t, _ = time.Parse(LogTimeFormat, string(timeStr)) // ignore error
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var timeJSON = []byte(`"time":"`)
|
||||
|
||||
// ExtractTime extracts the time from the log line.
|
||||
// It returns the time if the time is found,
|
||||
// otherwise it returns nil.
|
||||
//
|
||||
// The returned time is not validated.
|
||||
func ExtractTime(line []byte) []byte {
|
||||
//TODO: optimize this
|
||||
switch line[0] {
|
||||
case '{': // JSON format
|
||||
if i := bytes.Index(line, timeJSON); i != -1 {
|
||||
var jsonStart = i + len(`"time":"`)
|
||||
var jsonEnd = i + len(`"time":"`) + len(LogTimeFormat)
|
||||
if len(line) < jsonEnd {
|
||||
return nil
|
||||
}
|
||||
return line[jsonStart:jsonEnd]
|
||||
}
|
||||
return nil // invalid JSON line
|
||||
default:
|
||||
// Common/Combined format
|
||||
// Format: <virtual host> <host ip> - - [02/Jan/2006:15:04:05 -0700] ...
|
||||
start := bytes.IndexByte(line, '[')
|
||||
if start == -1 {
|
||||
return nil
|
||||
}
|
||||
end := start + 1 + len(LogTimeFormat)
|
||||
if len(line) < end {
|
||||
return nil
|
||||
}
|
||||
return line[start+1 : end]
|
||||
}
|
||||
}
|
||||
289
internal/logging/accesslog/rotate_test.go
Normal file
289
internal/logging/accesslog/rotate_test.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package accesslog_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
var (
|
||||
testTime = expect.Must(time.Parse(time.RFC3339, "2024-01-31T03:04:05Z"))
|
||||
testTimeStr = testTime.Format(LogTimeFormat)
|
||||
)
|
||||
|
||||
func TestParseLogTime(t *testing.T) {
|
||||
t.Run("valid time", func(t *testing.T) {
|
||||
tests := []string{
|
||||
`{"foo":"bar","time":"%s","bar":"baz"}`,
|
||||
`example.com 192.168.1.1 - - [%s] "GET / HTTP/1.1" 200 1234`,
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
tests[i] = fmt.Sprintf(test, testTimeStr)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test, func(t *testing.T) {
|
||||
extracted := ExtractTime([]byte(test))
|
||||
expect.Equal(t, string(extracted), testTimeStr)
|
||||
got := ParseLogTime([]byte(test))
|
||||
expect.True(t, got.Equal(testTime), "expected %s, got %s", testTime, got)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid time", func(t *testing.T) {
|
||||
tests := []string{
|
||||
`{"foo":"bar","time":"invalid","bar":"baz"}`,
|
||||
`example.com 192.168.1.1 - - [invalid] "GET / HTTP/1.1" 200 1234`,
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test, func(t *testing.T) {
|
||||
expect.True(t, ParseLogTime([]byte(test)).IsZero(), "expected zero time, got %s", ParseLogTime([]byte(test)))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateKeepLast(t *testing.T) {
|
||||
for _, format := range AvailableFormats {
|
||||
t.Run(string(format)+" keep last", func(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
utils.MockTimeNow(testTime)
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: format,
|
||||
})
|
||||
expect.Nil(t, logger.Config().Retention)
|
||||
|
||||
for range 10 {
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
expect.NoError(t, logger.Flush())
|
||||
|
||||
expect.Greater(t, file.Len(), int64(0))
|
||||
expect.Equal(t, file.NumLines(), 10)
|
||||
|
||||
retention := strutils.MustParse[*Retention]("last 5")
|
||||
expect.Equal(t, retention.Days, 0)
|
||||
expect.Equal(t, retention.Last, 5)
|
||||
expect.Equal(t, retention.KeepSize, 0)
|
||||
logger.Config().Retention = retention
|
||||
|
||||
result, err := logger.Rotate()
|
||||
expect.NoError(t, err)
|
||||
expect.Equal(t, file.NumLines(), int(retention.Last))
|
||||
expect.Equal(t, result.NumLinesKeep, int(retention.Last))
|
||||
expect.Equal(t, result.NumLinesInvalid, 0)
|
||||
})
|
||||
|
||||
t.Run(string(format)+" keep days", func(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: format,
|
||||
})
|
||||
expect.Nil(t, logger.Config().Retention)
|
||||
nLines := 10
|
||||
for i := range nLines {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
logger.Flush()
|
||||
expect.Equal(t, file.NumLines(), nLines)
|
||||
|
||||
retention := strutils.MustParse[*Retention]("3 days")
|
||||
expect.Equal(t, retention.Days, 3)
|
||||
expect.Equal(t, retention.Last, 0)
|
||||
expect.Equal(t, retention.KeepSize, 0)
|
||||
logger.Config().Retention = retention
|
||||
|
||||
utils.MockTimeNow(testTime)
|
||||
result, err := logger.Rotate()
|
||||
expect.NoError(t, err)
|
||||
expect.Equal(t, file.NumLines(), int(retention.Days))
|
||||
expect.Equal(t, result.NumLinesKeep, int(retention.Days))
|
||||
expect.Equal(t, result.NumLinesInvalid, 0)
|
||||
|
||||
rotated := file.Content()
|
||||
rotatedLines := bytes.Split(rotated, []byte("\n"))
|
||||
for i, line := range rotatedLines {
|
||||
if i >= int(retention.Days) { // may ends with a newline
|
||||
break
|
||||
}
|
||||
timeBytes := ExtractTime(line)
|
||||
got, err := time.Parse(LogTimeFormat, string(timeBytes))
|
||||
expect.NoError(t, err)
|
||||
want := testTime.AddDate(0, 0, -int(retention.Days)+i+1)
|
||||
expect.True(t, got.Equal(want), "expected %s, got %s", want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotateKeepFileSize(t *testing.T) {
|
||||
for _, format := range AvailableFormats {
|
||||
t.Run(string(format)+" keep size no rotation", func(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: format,
|
||||
})
|
||||
expect.Nil(t, logger.Config().Retention)
|
||||
nLines := 10
|
||||
for i := range nLines {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
logger.Flush()
|
||||
expect.Equal(t, file.NumLines(), nLines)
|
||||
|
||||
retention := strutils.MustParse[*Retention]("100 KB")
|
||||
expect.Equal(t, retention.KeepSize, 100*1024)
|
||||
expect.Equal(t, retention.Days, 0)
|
||||
expect.Equal(t, retention.Last, 0)
|
||||
logger.Config().Retention = retention
|
||||
|
||||
utils.MockTimeNow(testTime)
|
||||
result, err := logger.Rotate()
|
||||
expect.NoError(t, err)
|
||||
|
||||
// file should be untouched as 100KB > 10 lines * bytes per line
|
||||
expect.Equal(t, result.NumBytesKeep, file.Len())
|
||||
expect.Equal(t, result.NumBytesRead, 0, "should not read any bytes")
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("keep size with rotation", func(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: FormatJSON,
|
||||
})
|
||||
expect.Nil(t, logger.Config().Retention)
|
||||
nLines := 100
|
||||
for i := range nLines {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
logger.Flush()
|
||||
expect.Equal(t, file.NumLines(), nLines)
|
||||
|
||||
retention := strutils.MustParse[*Retention]("10 KB")
|
||||
expect.Equal(t, retention.KeepSize, 10*1024)
|
||||
expect.Equal(t, retention.Days, 0)
|
||||
expect.Equal(t, retention.Last, 0)
|
||||
logger.Config().Retention = retention
|
||||
|
||||
utils.MockTimeNow(testTime)
|
||||
result, err := logger.Rotate()
|
||||
expect.NoError(t, err)
|
||||
expect.Equal(t, result.NumBytesKeep, int64(retention.KeepSize))
|
||||
expect.Equal(t, file.Len(), int64(retention.KeepSize))
|
||||
expect.Equal(t, result.NumBytesRead, 0, "should not read any bytes")
|
||||
})
|
||||
}
|
||||
|
||||
// skipping invalid lines is not supported for keep file_size
|
||||
func TestRotateSkipInvalidTime(t *testing.T) {
|
||||
for _, format := range AvailableFormats {
|
||||
t.Run(string(format), func(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: format,
|
||||
})
|
||||
expect.Nil(t, logger.Config().Retention)
|
||||
nLines := 10
|
||||
for i := range nLines {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
|
||||
logger.Log(req, resp)
|
||||
logger.Flush()
|
||||
|
||||
n, err := file.Write([]byte("invalid time\n"))
|
||||
expect.NoError(t, err)
|
||||
expect.Equal(t, n, len("invalid time\n"))
|
||||
}
|
||||
expect.Equal(t, file.NumLines(), 2*nLines)
|
||||
|
||||
retention := strutils.MustParse[*Retention]("3 days")
|
||||
expect.Equal(t, retention.Days, 3)
|
||||
expect.Equal(t, retention.Last, 0)
|
||||
logger.Config().Retention = retention
|
||||
|
||||
result, err := logger.Rotate()
|
||||
expect.NoError(t, err)
|
||||
// should read one invalid line after every valid line
|
||||
expect.Equal(t, result.NumLinesKeep, int(retention.Days))
|
||||
expect.Equal(t, result.NumLinesInvalid, nLines-int(retention.Days)*2)
|
||||
expect.Equal(t, file.NumLines(), int(retention.Days))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRotate(b *testing.B) {
|
||||
tests := []*Retention{
|
||||
{Days: 30},
|
||||
{Last: 100},
|
||||
{KeepSize: 24 * 1024},
|
||||
}
|
||||
for _, retention := range tests {
|
||||
b.Run(fmt.Sprintf("retention_%s", retention), func(b *testing.B) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: FormatJSON,
|
||||
Retention: retention,
|
||||
})
|
||||
for i := range 100 {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -100+i+1))
|
||||
logger.Log(req, resp)
|
||||
}
|
||||
logger.Flush()
|
||||
content := file.Content()
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
file = NewMockFile()
|
||||
_, _ = file.Write(content)
|
||||
b.StartTimer()
|
||||
_, _ = logger.Rotate()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRotateWithInvalidTime(b *testing.B) {
|
||||
tests := []*Retention{
|
||||
{Days: 30},
|
||||
{Last: 100},
|
||||
{KeepSize: 24 * 1024},
|
||||
}
|
||||
for _, retention := range tests {
|
||||
b.Run(fmt.Sprintf("retention_%s", retention), func(b *testing.B) {
|
||||
file := NewMockFile()
|
||||
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &Config{
|
||||
Format: FormatJSON,
|
||||
Retention: retention,
|
||||
})
|
||||
for i := range 10000 {
|
||||
utils.MockTimeNow(testTime.AddDate(0, 0, -10000+i+1))
|
||||
logger.Log(req, resp)
|
||||
if i%10 == 0 {
|
||||
_, _ = file.Write([]byte("invalid time\n"))
|
||||
}
|
||||
}
|
||||
logger.Flush()
|
||||
content := file.Content()
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
file = NewMockFile()
|
||||
_, _ = file.Write(content)
|
||||
b.StartTimer()
|
||||
_, _ = logger.Rotate()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
52
internal/logging/accesslog/status_code_range.go
Normal file
52
internal/logging/accesslog/status_code_range.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
type StatusCodeRange struct {
|
||||
Start int
|
||||
End int
|
||||
}
|
||||
|
||||
var ErrInvalidStatusCodeRange = gperr.New("invalid status code range")
|
||||
|
||||
func (r *StatusCodeRange) Includes(code int) bool {
|
||||
return r.Start <= code && code <= r.End
|
||||
}
|
||||
|
||||
// Parse implements strutils.Parser.
|
||||
func (r *StatusCodeRange) Parse(v string) error {
|
||||
split := strutils.SplitRune(v, '-')
|
||||
switch len(split) {
|
||||
case 1:
|
||||
start, err := strconv.Atoi(split[0])
|
||||
if err != nil {
|
||||
return gperr.Wrap(err)
|
||||
}
|
||||
r.Start = start
|
||||
r.End = start
|
||||
return nil
|
||||
case 2:
|
||||
start, errStart := strconv.Atoi(split[0])
|
||||
end, errEnd := strconv.Atoi(split[1])
|
||||
if err := gperr.Join(errStart, errEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Start = start
|
||||
r.End = end
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidStatusCodeRange.Subject(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *StatusCodeRange) String() string {
|
||||
if r.Start == r.End {
|
||||
return strconv.Itoa(r.Start)
|
||||
}
|
||||
return strconv.Itoa(r.Start) + "-" + strconv.Itoa(r.End)
|
||||
}
|
||||
18
internal/logging/accesslog/stdout_logger.go
Normal file
18
internal/logging/accesslog/stdout_logger.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type StdoutLogger struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
var stdoutIO = &StdoutLogger{os.Stdout}
|
||||
|
||||
func (l *StdoutLogger) Lock() {}
|
||||
func (l *StdoutLogger) Unlock() {}
|
||||
func (l *StdoutLogger) Name() string {
|
||||
return "stdout"
|
||||
}
|
||||
11
internal/logging/accesslog/units.go
Normal file
11
internal/logging/accesslog/units.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package accesslog
|
||||
|
||||
const (
|
||||
kilobyte = 1024
|
||||
megabyte = 1024 * kilobyte
|
||||
gigabyte = 1024 * megabyte
|
||||
|
||||
kilobits = 1000
|
||||
megabits = 1000 * kilobits
|
||||
gigabits = 1000 * megabits
|
||||
)
|
||||
Reference in New Issue
Block a user