mirror of
https://github.com/yusing/godoxy.git
synced 2026-01-11 21:10:30 +01:00
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56850a9580 | ||
|
|
5f780f4902 | ||
|
|
ccb4639f43 | ||
|
|
ac1470d81d | ||
|
|
efaabfa63a | ||
|
|
9043cf25c5 | ||
|
|
98e90d7a0b | ||
|
|
82c829de18 | ||
|
|
2fe4fef779 | ||
|
|
91302ceed7 | ||
|
|
7fa7b55b18 | ||
|
|
69ee8495d8 | ||
|
|
28d9a72908 | ||
|
|
770c698332 | ||
|
|
cd4c843025 | ||
|
|
f0cf89060b | ||
|
|
f79a15bac6 | ||
|
|
2b4a70a550 | ||
|
|
f06741428c | ||
|
|
16e6e72454 | ||
|
|
100d2c392f | ||
|
|
829eb08e37 | ||
|
|
53d54a09b0 | ||
|
|
62c551c7fe | ||
|
|
80e59bb481 | ||
|
|
7a5afc3612 | ||
|
|
2c0349c11c | ||
|
|
8e3c2cc8d4 | ||
|
|
d35afdb3c9 | ||
|
|
ae093ebf40 | ||
|
|
aa8af4185b | ||
|
|
0029cf69d6 | ||
|
|
33e400a17e | ||
|
|
1d22bcfed9 | ||
|
|
978d82060e | ||
|
|
7aa1215491 | ||
|
|
0b69589586 | ||
|
|
bca3cd84d1 | ||
|
|
ce4bf2f646 | ||
|
|
c49016f22c | ||
|
|
8da63daf02 | ||
|
|
c5fd21552e | ||
|
|
27409abc24 | ||
|
|
21c9e46274 | ||
|
|
22a12d3116 | ||
|
|
89d93dd878 | ||
|
|
66853dfc52 | ||
|
|
c72f66d64b | ||
|
|
59bc342a40 | ||
|
|
e11579df10 |
15
.env.example
15
.env.example
@@ -4,6 +4,10 @@ TAG=latest
|
||||
# set timezone to get correct log timestamp
|
||||
TZ=ETC/UTC
|
||||
|
||||
# container uid and gid (must match the owner of mounted directories)
|
||||
GODOXY_UID=1000
|
||||
GODOXY_GID=1000
|
||||
|
||||
# API JWT Configuration (common)
|
||||
# generate secret with `openssl rand -base64 32`
|
||||
GODOXY_API_JWT_SECRET=
|
||||
@@ -44,9 +48,19 @@ GODOXY_API_PASSWORD=password
|
||||
GODOXY_HTTP_ADDR=:80
|
||||
GODOXY_HTTPS_ADDR=:443
|
||||
|
||||
# Enable HTTP3
|
||||
GODOXY_HTTP3_ENABLED=true
|
||||
|
||||
# API listening address
|
||||
GODOXY_API_ADDR=127.0.0.1:8888
|
||||
|
||||
# Metrics
|
||||
GODOXY_METRICS_DISABLE_CPU=false
|
||||
GODOXY_METRICS_DISABLE_MEMORY=false
|
||||
GODOXY_METRICS_DISABLE_DISK=false
|
||||
GODOXY_METRICS_DISABLE_NETWORK=false
|
||||
GODOXY_METRICS_DISABLE_SENSORS=false
|
||||
|
||||
# Frontend listening port
|
||||
GODOXY_FRONTEND_PORT=3000
|
||||
|
||||
@@ -56,6 +70,7 @@ GODOXY_FRONTEND_ALIASES=godoxy
|
||||
# Docker socket
|
||||
# /var/run/podman/podman.sock for podman
|
||||
DOCKER_SOCKET=/var/run/docker.sock
|
||||
SOCKET_PROXY_LISTEN_ADDR=127.0.0.1:2375
|
||||
|
||||
# Debug mode
|
||||
GODOXY_DEBUG=false
|
||||
4
.vscode/settings.example.json
vendored
4
.vscode/settings.example.json
vendored
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"yaml.schemas": {
|
||||
"https://github.com/yusing/go-proxy/raw/main/schemas/config.schema.json": [
|
||||
"https://github.com/yusing/godoxy-webui/raw/refs/heads/main/src/types/godoxy/config.schema.json": [
|
||||
"config.example.yml",
|
||||
"config.yml"
|
||||
],
|
||||
"https://github.com/yusing/go-proxy/raw/main/schemas/routes.schema.json": [
|
||||
"https://github.com/yusing/godoxy-webui/raw/refs/heads/main/src/types/godoxy/routes.schema.json": [
|
||||
"providers.example.yml"
|
||||
]
|
||||
}
|
||||
|
||||
26
Dockerfile
26
Dockerfile
@@ -6,6 +6,16 @@ HEALTHCHECK NONE
|
||||
# trunk-ignore(hadolint/DL3018)
|
||||
RUN apk add --no-cache tzdata make libcap-setcap
|
||||
|
||||
ENV GOPATH=/root/go
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
COPY agent ./agent
|
||||
COPY internal/dnsproviders ./internal/dnsproviders
|
||||
|
||||
RUN go mod download -x
|
||||
|
||||
# Stage 2: builder
|
||||
FROM deps AS builder
|
||||
|
||||
@@ -17,12 +27,6 @@ COPY internal ./internal
|
||||
COPY pkg ./pkg
|
||||
COPY agent ./agent
|
||||
|
||||
# Only copy go.mod and go.sum initially for better caching
|
||||
COPY go.mod go.sum /src/
|
||||
|
||||
ENV GOPATH=/root/go
|
||||
RUN go mod download -x
|
||||
|
||||
ARG VERSION
|
||||
ENV VERSION=${VERSION}
|
||||
|
||||
@@ -31,9 +35,8 @@ ENV MAKE_ARGS=${MAKE_ARGS}
|
||||
|
||||
ENV GOCACHE=/root/.cache/go-build
|
||||
ENV GOPATH=/root/go
|
||||
RUN make ${MAKE_ARGS} build link-binary && \
|
||||
mv bin /app/ && \
|
||||
mkdir -p /app/error_pages /app/certs
|
||||
|
||||
RUN make ${MAKE_ARGS} docker=1 build
|
||||
|
||||
# Stage 3: Final image
|
||||
FROM scratch
|
||||
@@ -45,10 +48,7 @@ LABEL proxy.exclude=1
|
||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
|
||||
# copy binary
|
||||
COPY --from=builder /app /app
|
||||
|
||||
# copy example config
|
||||
COPY config.example.yml /app/config/config.yml
|
||||
COPY --from=builder /app/run /app/run
|
||||
|
||||
# copy certs
|
||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||
|
||||
26
LICENSE
26
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 [fullname]
|
||||
Copyright (c) 2024 - present Yusing
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -19,3 +19,27 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---
|
||||
|
||||
internal/net/gphttp/reverseproxy/reverse_proxy_mod.go is copied from et/http/httputil/reverseproxy.go with modifications to adapt to this project.
|
||||
|
||||
Copyright 2011 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
|
||||
---
|
||||
|
||||
internal/utils/io.go has a modified version of io.Copy with context and HTTP flusher handling.
|
||||
|
||||
Copyright 2009 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
|
||||
---
|
||||
|
||||
internal/utils/strutils/split_join.go is copied from strings.Split and strings.Join with modifications to adapt to this project.
|
||||
|
||||
Copyright 2009 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
|
||||
23
Makefile
23
Makefile
@@ -1,3 +1,4 @@
|
||||
shell := /bin/sh
|
||||
export VERSION ?= $(shell git describe --tags --abbrev=0)
|
||||
export BUILD_DATE ?= $(shell date -u +'%Y%m%d-%H%M')
|
||||
export GOOS = linux
|
||||
@@ -59,20 +60,29 @@ else
|
||||
SETCAP_CMD = sudo setcap
|
||||
endif
|
||||
|
||||
|
||||
# CAP_NET_BIND_SERVICE: permission for binding to :80 and :443
|
||||
POST_BUILD = $(SETCAP_CMD) CAP_NET_BIND_SERVICE=+ep ${BIN_PATH};
|
||||
ifeq ($(docker), 1)
|
||||
POST_BUILD += mkdir -p /app && mv ${BIN_PATH} /app/run;
|
||||
endif
|
||||
|
||||
.PHONY: debug
|
||||
|
||||
test:
|
||||
GODOXY_TEST=1 go test ./internal/...
|
||||
|
||||
docker-build-test:
|
||||
docker build -t godoxy .
|
||||
docker build --build-arg=MAKE_ARGS=agent=1 -t godoxy-agent .
|
||||
|
||||
get:
|
||||
for dir in ${PWD} ${PWD}/agent; do cd $$dir && go get -u ./... && go mod tidy; done
|
||||
|
||||
build:
|
||||
mkdir -p bin
|
||||
mkdir -p $(shell dirname ${BIN_PATH})
|
||||
cd ${PWD} && go build ${BUILD_FLAGS} -o ${BIN_PATH} ${CMD_PATH}
|
||||
|
||||
# CAP_NET_BIND_SERVICE: permission for binding to :80 and :443
|
||||
$(SETCAP_CMD) CAP_NET_BIND_SERVICE=+ep ${BIN_PATH}
|
||||
${POST_BUILD}
|
||||
|
||||
run:
|
||||
[ -f .env ] && godotenv -f .env go run ${BUILD_FLAGS} ${CMD_PATH}
|
||||
@@ -82,7 +92,7 @@ debug:
|
||||
sh -c 'HTTP_ADDR=:81 HTTPS_ADDR=:8443 API_ADDR=:8899 DEBUG=1 bin/godoxy-test'
|
||||
|
||||
mtrace:
|
||||
bin/godoxy debug-ls-mtrace > mtrace.json
|
||||
${BIN_PATH} debug-ls-mtrace > mtrace.json
|
||||
|
||||
rapid-crash:
|
||||
docker run --restart=always --name test_crash -p 80 debian:bookworm-slim /bin/cat &&\
|
||||
@@ -99,8 +109,5 @@ ci-test:
|
||||
cloc:
|
||||
cloc --not-match-f '_test.go$$' cmd internal pkg
|
||||
|
||||
link-binary:
|
||||
ln -s /app/${NAME} bin/run
|
||||
|
||||
push-github:
|
||||
git push origin $(shell git rev-parse --abbrev-ref HEAD)
|
||||
@@ -10,9 +10,11 @@
|
||||
|
||||
A lightweight, simple, and [performant](https://github.com/yusing/godoxy/wiki/Benchmarks) reverse proxy with WebUI.
|
||||
|
||||
For full documentation, check out **[Wiki](https://github.com/yusing/godoxy/wiki)**
|
||||
<h5>
|
||||
<a href="https://docs.godoxy.dev">Website</a> | <a href="https://docs.godoxy.dev/Home.html">Wiki</a> | <a href="https://discord.gg/umReR62nRd">Discord</a>
|
||||
</h5>
|
||||
|
||||
**EN** | <a href="README_CHT.md">中文</a>
|
||||
<h5>EN | <a href="README_CHT.md">中文</a></h5>
|
||||
|
||||
<img src="screenshots/webui.jpg" style="max-width: 650">
|
||||
|
||||
|
||||
@@ -10,9 +10,11 @@
|
||||
|
||||
輕量、易用、 [高效能](https://github.com/yusing/godoxy/wiki/Benchmarks),且帶有主頁和配置面板的反向代理
|
||||
|
||||
完整文檔請查閱 **[Wiki](https://github.com/yusing/godoxy/wiki)**(暫未有中文翻譯)
|
||||
<h5>
|
||||
<a href="https://docs.godoxy.dev">網站</a> | <a href="https://docs.godoxy.dev/Home.html">文檔</a> | <a href="https://discord.gg/umReR62nRd">Discord</a>
|
||||
</h5>
|
||||
|
||||
<a href="README.md">EN</a> | **中文**
|
||||
<h5><a href="README.md">EN</a> | 中文</h5>
|
||||
|
||||
<img src="https://github.com/user-attachments/assets/4bb371f4-6e4c-425c-89b2-b9e962bdd46f" style="max-width: 650">
|
||||
|
||||
|
||||
29
agent/go.mod
29
agent/go.mod
@@ -9,21 +9,23 @@ require (
|
||||
github.com/docker/docker v28.1.1+incompatible
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/yusing/go-proxy v0.11.1
|
||||
github.com/yusing/go-proxy v0.11.9
|
||||
)
|
||||
|
||||
replace github.com/docker/docker => github.com/godoxy-app/docker v0.0.0-20250418000134-7af8fd7b079e
|
||||
replace github.com/docker/docker => github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1
|
||||
|
||||
replace github.com/shirou/gopsutil/v4 => github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/buger/goterm v1.0.4 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/diskfs/go-diskfs v1.6.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/djherbis/times v1.6.0 // indirect
|
||||
github.com/docker/cli v28.1.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
@@ -39,12 +41,15 @@ require (
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-yaml v1.17.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect
|
||||
github.com/gotify/server/v2 v2.6.1 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/gotify/server/v2 v2.6.3 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/luthermonson/go-proxmox v0.2.2 // indirect
|
||||
github.com/magefile/mage v1.15.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.65 // indirect
|
||||
@@ -59,23 +64,21 @@ require (
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.51.0 // indirect
|
||||
github.com/samber/lo v1.49.1 // indirect
|
||||
github.com/samber/lo v1.50.0 // indirect
|
||||
github.com/samber/slog-common v0.18.1 // indirect
|
||||
github.com/samber/slog-zerolog/v2 v2.7.3 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.4 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/vincent-petithory/dataurl v1.0.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/mock v0.5.1 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
|
||||
72
agent/go.sum
72
agent/go.sum
@@ -8,16 +8,8 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY=
|
||||
github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
|
||||
github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ=
|
||||
github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
@@ -43,6 +35,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
|
||||
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
|
||||
github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
@@ -68,13 +62,17 @@ github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc
|
||||
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
|
||||
github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godoxy-app/docker v0.0.0-20250418000134-7af8fd7b079e h1:LEbMtJ6loEubxetD+Aw8+1x0rShor5iMoy9WuFQ8hN8=
|
||||
github.com/godoxy-app/docker v0.0.0-20250418000134-7af8fd7b079e/go.mod h1:3tMTnTkH7IN5smn7PX83XdmRnNj4Nw2/Pt8GgReqnKM=
|
||||
github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1 h1:fsSqE28vU0PRkq9FdekirRoDBeYJ+UaJ9dTErdXflWg=
|
||||
github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1/go.mod h1:av6ggKWQz6SEkFyShjDEgVqiIB0RHvEQNIkPeqgJEeE=
|
||||
github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97 h1:i52gBYamrKs4DHT1+SiobW2im5UgTMVXK1KIL1djSeA=
|
||||
github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97/go.mod h1:XvbfPmmrdpLrsKwj3irYkxt5ygyMcDsTQTJ7cnZ9RNQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
@@ -82,24 +80,26 @@ github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 h1:gD0vax+4I+mAj+jEChEf25Ia07Jq7kYOFO5PPhAxFl4=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gotify/server/v2 v2.6.1 h1:Kf7v5fzBxzELzZa/jonWfwJMkqYqh1LBzBpCmt5QIAI=
|
||||
github.com/gotify/server/v2 v2.6.1/go.mod h1:Dk8HLyTVDqmXM8YEg6tjROBen6mxyHZFRggJFHTwZLc=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/gotify/server/v2 v2.6.3 h1:2sLDRsQ/No1+hcFwFDvjNtwKepfCSIR8L3BkXl/Vz1I=
|
||||
github.com/gotify/server/v2 v2.6.3/go.mod h1:IyeQ/iL3vetcuqUAzkCMVObIMGGJx4zb13/mVatIwE8=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE=
|
||||
github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
|
||||
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
@@ -143,8 +143,12 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -161,33 +165,26 @@ github.com/quic-go/quic-go v0.51.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew=
|
||||
github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o=
|
||||
github.com/samber/lo v1.50.0 h1:XrG0xOeHs+4FQ8gJR97zDz5uOFMW7OwFWiFVzqopKgY=
|
||||
github.com/samber/lo v1.50.0/go.mod h1:RjZyNk6WSnUFRKK6EyOhsRJMqft3G+pg7dCWHQCWvsc=
|
||||
github.com/samber/slog-common v0.18.1 h1:c0EipD/nVY9HG5shgm/XAs67mgpWDMF+MmtptdJNCkQ=
|
||||
github.com/samber/slog-common v0.18.1/go.mod h1:QNZiNGKakvrfbJ2YglQXLCZauzkI9xZBjOhWFKS3IKk=
|
||||
github.com/samber/slog-zerolog/v2 v2.7.3 h1:/MkPDl/tJhijN2GvB1MWwBn2FU8RiL3rQ8gpXkQm2EY=
|
||||
github.com/samber/slog-zerolog/v2 v2.7.3/go.mod h1:oWU7WHof4Xp8VguiNO02r1a4VzkgoOyOZhY5CuRke60=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI=
|
||||
github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -215,10 +212,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/mock v0.5.1 h1:ASgazW/qBmR+A32MYFDB6E2POoTgOwT509VP0CT/fjs=
|
||||
go.uber.org/mock v0.5.1/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
golang.org/x/arch v0.16.0 h1:foMtLTdyOmIniqWCHjY6+JxuC54XP1fDwx4N0ASyW+U=
|
||||
golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -271,8 +266,10 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -325,8 +322,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||
google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f h1:tjZsroqekhC63+WMqzmWyW5Twj/ZfR5HAlpd5YQ1Vs0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 h1:29cjnHVylHwTzH66WfFZqgSQgnxzvWE+jvBwpZCLRxY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
@@ -339,4 +336,3 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/yusing/go-proxy/internal"
|
||||
"github.com/yusing/go-proxy/internal/api/v1/query"
|
||||
"github.com/yusing/go-proxy/internal/auth"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/config"
|
||||
"github.com/yusing/go-proxy/internal/dnsproviders"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/homepage"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/logging/memlogger"
|
||||
"github.com/yusing/go-proxy/internal/metrics/systeminfo"
|
||||
@@ -50,7 +50,7 @@ func main() {
|
||||
rawLogger.Println("ok")
|
||||
return
|
||||
case common.CommandListIcons:
|
||||
icons, err := internal.ListAvailableIcons()
|
||||
icons, err := homepage.ListAvailableIcons()
|
||||
if err != nil {
|
||||
rawLogger.Fatal(err)
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func main() {
|
||||
logging.Info().Msgf("GoDoxy version %s", pkg.GetVersion())
|
||||
logging.Trace().Msg("trace enabled")
|
||||
parallel(
|
||||
internal.InitIconListCache,
|
||||
homepage.InitIconListCache,
|
||||
systeminfo.Poller.Start,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,21 +1,48 @@
|
||||
---
|
||||
services:
|
||||
socket-proxy:
|
||||
container_name: socket-proxy
|
||||
image: lscr.io/linuxserver/socket-proxy:latest
|
||||
environment:
|
||||
- ALLOW_START=1
|
||||
- ALLOW_STOP=1
|
||||
- ALLOW_RESTARTS=1
|
||||
- CONTAINERS=1
|
||||
- EVENTS=1
|
||||
- INFO=1
|
||||
- PING=1
|
||||
- POST=1
|
||||
- VERSION=1
|
||||
volumes:
|
||||
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
tmpfs:
|
||||
- /run
|
||||
ports:
|
||||
- ${SOCKET_PROXY_LISTEN_ADDR:-127.0.0.1:2375}:2375
|
||||
labels:
|
||||
proxy.exclude: true
|
||||
frontend:
|
||||
image: ghcr.io/yusing/godoxy-frontend:${TAG:-latest}
|
||||
container_name: godoxy-frontend
|
||||
restart: unless-stopped
|
||||
network_mode: host # do not change this
|
||||
env_file: .env
|
||||
user: ${GODOXY_UID:-1000}:${GODOXY_GID:-1000}
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- all
|
||||
depends_on:
|
||||
- app
|
||||
environment:
|
||||
HOSTNAME: 127.0.0.1
|
||||
PORT: ${GODOXY_FRONTEND_PORT:-3000}
|
||||
|
||||
# modify below to fit your needs
|
||||
labels:
|
||||
proxy.aliases: ${GODOXY_FRONTEND_ALIASES:-godoxy}
|
||||
proxy.godoxy.port: ${GODOXY_FRONTEND_PORT:-3000}
|
||||
# proxy.godoxy.middlewares.cidr_whitelist: |
|
||||
proxy.#1.port: ${GODOXY_FRONTEND_PORT:-3000}
|
||||
# proxy.#1.middlewares.cidr_whitelist: |
|
||||
# status: 403
|
||||
# message: IP not allowed
|
||||
# allow:
|
||||
@@ -29,11 +56,22 @@ services:
|
||||
restart: always
|
||||
network_mode: host # do not change this
|
||||
env_file: .env
|
||||
user: ${GODOXY_UID:-1000}:${GODOXY_GID:-1000}
|
||||
depends_on:
|
||||
socket-proxy:
|
||||
condition: service_started
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- all
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
environment:
|
||||
- DOCKER_HOST=tcp://${SOCKET_PROXY_LISTEN_ADDR:-127.0.0.1:2375}
|
||||
volumes:
|
||||
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
- ./logs:/app/logs
|
||||
- ./error_pages:/app/error_pages
|
||||
- ./error_pages:/app/error_pages:ro
|
||||
- ./data:/app/data
|
||||
|
||||
# To use autocert, certs will be stored in "./certs".
|
||||
|
||||
34
go.mod
34
go.mod
@@ -15,11 +15,11 @@ require (
|
||||
github.com/go-acme/lego/v4 v4.23.1 // acme client
|
||||
github.com/go-playground/validator/v10 v10.26.0 // validator
|
||||
github.com/gobwas/glob v0.2.3 // glob matcher for route rules
|
||||
github.com/gotify/server/v2 v2.6.1 // reference the Message struct for json response
|
||||
github.com/gotify/server/v2 v2.6.3 // reference the Message struct for json response
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // fuzzy search for searching icons and filtering metrics
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // lock free map for concurrent operations
|
||||
github.com/rs/zerolog v1.34.0 // logging
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 // system info metrics
|
||||
github.com/shirou/gopsutil/v4 v4.25.4 // system info metrics
|
||||
github.com/vincent-petithory/dataurl v1.0.0 // data url for fav icon
|
||||
golang.org/x/crypto v0.37.0 // encrypting password with bcrypt
|
||||
golang.org/x/net v0.39.0 // HTTP header utilities
|
||||
@@ -41,19 +41,21 @@ require (
|
||||
github.com/samber/slog-zerolog/v2 v2.7.3
|
||||
github.com/spf13/afero v1.14.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/yusing/go-proxy/agent v0.0.0-00010101000000-000000000000
|
||||
github.com/yusing/go-proxy/internal/dnsproviders v0.0.0-00010101000000-000000000000
|
||||
github.com/yusing/go-proxy/agent v0.0.0-20250501215534-7fa7b55b1889
|
||||
github.com/yusing/go-proxy/internal/dnsproviders v0.0.0-20250501215534-7fa7b55b1889
|
||||
go.uber.org/atomic v1.11.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/docker/docker => github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1
|
||||
|
||||
replace github.com/shirou/gopsutil/v4 => github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
@@ -86,12 +88,12 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
github.com/baidubce/bce-sdk-go v0.9.224 // indirect
|
||||
github.com/baidubce/bce-sdk-go v0.9.225 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/boombuler/barcode v1.0.2 // indirect
|
||||
github.com/buger/goterm v1.0.4 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/civo/civogo v0.3.99 // indirect
|
||||
github.com/civo/civogo v0.4.1 // indirect
|
||||
github.com/cloudflare/cloudflare-go v0.115.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/diskfs/go-diskfs v1.6.0 // indirect
|
||||
@@ -101,7 +103,7 @@ require (
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.2 // indirect
|
||||
github.com/exoscale/egoscale/v3 v3.1.15 // indirect
|
||||
github.com/exoscale/egoscale/v3 v3.1.16 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
@@ -120,7 +122,7 @@ require (
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
@@ -131,7 +133,7 @@ require (
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.146 // indirect
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.147 // indirect
|
||||
github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df // indirect
|
||||
github.com/infobloxopen/infoblox-go-client/v2 v2.10.0 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
@@ -173,7 +175,7 @@ require (
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.2 // indirect
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.3 // indirect
|
||||
github.com/ovh/go-ovh v1.7.0 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
@@ -191,7 +193,7 @@ require (
|
||||
github.com/sacloud/iaas-api-go v1.14.0 // indirect
|
||||
github.com/sacloud/packages-go v0.0.11 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/samber/lo v1.49.1 // indirect
|
||||
github.com/samber/lo v1.50.0 // indirect
|
||||
github.com/samber/slog-common v0.18.1 // indirect
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 // indirect
|
||||
github.com/selectel/domains-go v1.1.0 // indirect
|
||||
@@ -207,7 +209,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.20.1 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1151 // indirect
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1158 // indirect
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1136 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
@@ -227,16 +229,16 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/mock v0.5.1 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/ratelimit v0.3.1 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/api v0.230.0 // indirect
|
||||
google.golang.org/api v0.231.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect
|
||||
google.golang.org/grpc v1.72.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
56
go.sum
56
go.sum
@@ -710,8 +710,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjK
|
||||
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
|
||||
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/baidubce/bce-sdk-go v0.9.224 h1:z2L8alGw/y3IUHjrLRyrxrgCvMssYTjgCd7OQdb4gt0=
|
||||
github.com/baidubce/bce-sdk-go v0.9.224/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
|
||||
github.com/baidubce/bce-sdk-go v0.9.225 h1:4zz/cGgrEpAIOM6pkEU3UnlNgEcpO4SV2oVpa0gAZKI=
|
||||
github.com/baidubce/bce-sdk-go v0.9.225/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@@ -749,8 +749,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/civo/civogo v0.3.99 h1:ijv3ecaz9Ju82J72kbQwQvzlSxn9fLMlklu8C8pXGjI=
|
||||
github.com/civo/civogo v0.3.99/go.mod h1:LaEbkszc+9nXSh4YNG0sYXFGYqdQFmXXzQg0gESs2hc=
|
||||
github.com/civo/civogo v0.4.1 h1:C+lwZ7hBqKy6eKy6qgviuselF0V5Z/um0x7X/eLEQ64=
|
||||
github.com/civo/civogo v0.4.1/go.mod h1:LaEbkszc+9nXSh4YNG0sYXFGYqdQFmXXzQg0gESs2hc=
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cloudflare-go v0.115.0 h1:84/dxeeXweCc0PN5Cto44iTA8AkG1fyT11yPO5ZB7sM=
|
||||
@@ -831,8 +831,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||
github.com/exoscale/egoscale/v3 v3.1.15 h1:L2p9jWZhOeSBEwXCP12LPAoclBUv4LqzSN4RgNJtMdg=
|
||||
github.com/exoscale/egoscale/v3 v3.1.15/go.mod h1:t9+MpSEam94na48O/xgvvPFpQPRiwZ3kBN4/UuQtKco=
|
||||
github.com/exoscale/egoscale/v3 v3.1.16 h1:JaAjY9uHLw9K5jA6kVenbTkJxgds3IU2RkrXXWV+d9s=
|
||||
github.com/exoscale/egoscale/v3 v3.1.16/go.mod h1:t9+MpSEam94na48O/xgvvPFpQPRiwZ3kBN4/UuQtKco=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
@@ -933,6 +933,8 @@ github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1 h1:fsSqE28vU0PRk
|
||||
github.com/godoxy-app/docker v0.0.0-20250425105916-b2ad800de7a1/go.mod h1:av6ggKWQz6SEkFyShjDEgVqiIB0RHvEQNIkPeqgJEeE=
|
||||
github.com/godoxy-app/go-oidc/v3 v3.14.2 h1:y1sosR6N7IpMiREM8I8w68zrUhh5P0Hg+6wERmuhFAc=
|
||||
github.com/godoxy-app/go-oidc/v3 v3.14.2/go.mod h1:ZRZLrEz7MmMe1kRzRsYqYmWKN2EHlPVGn71GMbrLLt4=
|
||||
github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97 h1:i52gBYamrKs4DHT1+SiobW2im5UgTMVXK1KIL1djSeA=
|
||||
github.com/godoxy-app/gopsutil/v4 v4.0.0-20250502022742-408a348f1b97/go.mod h1:XvbfPmmrdpLrsKwj3irYkxt5ygyMcDsTQTJ7cnZ9RNQ=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
@@ -1033,8 +1035,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 h1:gD0vax+4I+mAj+jEChEf25Ia07Jq7kYOFO5PPhAxFl4=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
@@ -1082,16 +1084,16 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gotify/server/v2 v2.6.1 h1:Kf7v5fzBxzELzZa/jonWfwJMkqYqh1LBzBpCmt5QIAI=
|
||||
github.com/gotify/server/v2 v2.6.1/go.mod h1:Dk8HLyTVDqmXM8YEg6tjROBen6mxyHZFRggJFHTwZLc=
|
||||
github.com/gotify/server/v2 v2.6.3 h1:2sLDRsQ/No1+hcFwFDvjNtwKepfCSIR8L3BkXl/Vz1I=
|
||||
github.com/gotify/server/v2 v2.6.3/go.mod h1:IyeQ/iL3vetcuqUAzkCMVObIMGGJx4zb13/mVatIwE8=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE=
|
||||
github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
@@ -1147,8 +1149,8 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
|
||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.146 h1:ld5s5UeA9zgyFsZskVD2Tr6k6VnJWkvaLm5nqvfOEf4=
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.146/go.mod h1:Y/+YLCFCJtS29i2MbYPTUlNNfwXvkzEsZKR0imY/2aY=
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.147 h1:ip9+1n9+THhYgChlQpgDLVDVTv4LVJ7AoyPBJBaX2MY=
|
||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.147/go.mod h1:Y/+YLCFCJtS29i2MbYPTUlNNfwXvkzEsZKR0imY/2aY=
|
||||
github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
@@ -1395,8 +1397,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr
|
||||
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A=
|
||||
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU=
|
||||
github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.2 h1:w0GwID9NlT+eg3InbAwkWsazDtxVLYQ8rJb4E33Yb14=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.2/go.mod h1:u6XRPsw9tPziBh76K7GrrRXPa8P8W3BQeqJ6ZZt9VLA=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.3 h1:KSUykb5Ou54jF4SeJNjBwcDg+umbAwcvT+xhrvNDog0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.89.3/go.mod h1:u6XRPsw9tPziBh76K7GrrRXPa8P8W3BQeqJ6ZZt9VLA=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
||||
github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw=
|
||||
@@ -1517,8 +1519,8 @@ github.com/sacloud/packages-go v0.0.11/go.mod h1:XNF5MCTWcHo9NiqWnYctVbASSSZR3ZO
|
||||
github.com/sagikazarmark/crypt v0.10.0/go.mod h1:gwTNHQVoOS3xp9Xvz5LLR+1AauC5M6880z5NWzdhOyQ=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew=
|
||||
github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o=
|
||||
github.com/samber/lo v1.50.0 h1:XrG0xOeHs+4FQ8gJR97zDz5uOFMW7OwFWiFVzqopKgY=
|
||||
github.com/samber/lo v1.50.0/go.mod h1:RjZyNk6WSnUFRKK6EyOhsRJMqft3G+pg7dCWHQCWvsc=
|
||||
github.com/samber/slog-common v0.18.1 h1:c0EipD/nVY9HG5shgm/XAs67mgpWDMF+MmtptdJNCkQ=
|
||||
github.com/samber/slog-common v0.18.1/go.mod h1:QNZiNGKakvrfbJ2YglQXLCZauzkI9xZBjOhWFKS3IKk=
|
||||
github.com/samber/slog-zerolog/v2 v2.7.3 h1:/MkPDl/tJhijN2GvB1MWwBn2FU8RiL3rQ8gpXkQm2EY=
|
||||
@@ -1530,8 +1532,6 @@ github.com/selectel/domains-go v1.1.0 h1:futG50J43ALLKQAnZk9H9yOtLGnSUh7c5hSvuC5
|
||||
github.com/selectel/domains-go v1.1.0/go.mod h1:SugRKfq4sTpnOHquslCpzda72wV8u0cMBHx0C0l+bzA=
|
||||
github.com/selectel/go-selvpcclient/v3 v3.2.1 h1:ny6WIAMiHzKxOgOEnwcWE79wIQij1AHHylzPA41MXCw=
|
||||
github.com/selectel/go-selvpcclient/v3 v3.2.1/go.mod h1:3EfSf8aEWyhspOGbvZ6mvnFg7JN5uckxNyBFPGWsXNQ=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -1615,8 +1615,8 @@ github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNG
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1136/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1151 h1:SBbEaeCwhqmyAEEF5ubpg/2vv3RO6SdBsOSYhpnJaL4=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1151/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1158 h1:N+C8Tz6JKGwnDFDfd3g5CkTsiKTa6/Uia0uAL0OhimE=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1158/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1136 h1:kMIdSU5IvpOROh27ToVQ3hlm6ym3lCRs9tnGCOBoZqk=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1136/go.mod h1:FpyIz3mymKaExVs6Fz27kxDBS42jqZn7vbACtxdeEH4=
|
||||
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
|
||||
@@ -1725,8 +1725,8 @@ go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/mock v0.5.1 h1:ASgazW/qBmR+A32MYFDB6E2POoTgOwT509VP0CT/fjs=
|
||||
go.uber.org/mock v0.5.1/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
@@ -2293,8 +2293,8 @@ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60c
|
||||
google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
|
||||
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
|
||||
google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
|
||||
google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM=
|
||||
google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ=
|
||||
google.golang.org/api v0.231.0 h1:LbUD5FUl0C4qwia2bjXhCMH65yz1MLPzA/0OYEsYY7Q=
|
||||
google.golang.org/api v0.231.0/go.mod h1:H52180fPI/QQlUc0F4xWfGZILdv09GCWKt2bcsn164A=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -2437,8 +2437,8 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f h1:tjZsroqekhC63+WMqzmWyW5Twj/ZfR5HAlpd5YQ1Vs0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 h1:29cjnHVylHwTzH66WfFZqgSQgnxzvWE+jvBwpZCLRxY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
||||
@@ -2,16 +2,13 @@ package acl
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/zerolog"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
"github.com/yusing/go-proxy/internal/maxmind"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
@@ -19,43 +16,23 @@ import (
|
||||
type Config struct {
|
||||
Default string `json:"default" validate:"omitempty,oneof=allow deny"` // default: allow
|
||||
AllowLocal *bool `json:"allow_local"` // default: true
|
||||
Allow []string `json:"allow"`
|
||||
Deny []string `json:"deny"`
|
||||
Allow Matchers `json:"allow"`
|
||||
Deny Matchers `json:"deny"`
|
||||
Log *accesslog.ACLLoggerConfig `json:"log"`
|
||||
|
||||
MaxMind *MaxMindConfig `json:"maxmind" validate:"omitempty"`
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
type (
|
||||
MaxMindDatabaseType string
|
||||
MaxMindConfig struct {
|
||||
AccountID string `json:"account_id" validate:"required"`
|
||||
LicenseKey string `json:"license_key" validate:"required"`
|
||||
Database MaxMindDatabaseType `json:"database" validate:"required,oneof=geolite geoip2"`
|
||||
|
||||
logger zerolog.Logger
|
||||
lastUpdate time.Time
|
||||
db struct {
|
||||
*maxminddb.Reader
|
||||
sync.RWMutex
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
type config struct {
|
||||
defaultAllow bool
|
||||
allowLocal bool
|
||||
allow []matcher
|
||||
deny []matcher
|
||||
ipCache *xsync.MapOf[string, *checkCache]
|
||||
logAllowed bool
|
||||
logger *accesslog.AccessLogger
|
||||
}
|
||||
|
||||
type checkCache struct {
|
||||
*acl.IPInfo
|
||||
*maxmind.IPInfo
|
||||
allow bool
|
||||
created time.Time
|
||||
}
|
||||
@@ -73,11 +50,6 @@ const (
|
||||
ACLDeny = "deny"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxMindGeoLite MaxMindDatabaseType = "geolite"
|
||||
MaxMindGeoIP2 MaxMindDatabaseType = "geoip2"
|
||||
)
|
||||
|
||||
func (c *Config) Validate() gperr.Error {
|
||||
switch c.Default {
|
||||
case "", ACLAllow:
|
||||
@@ -94,55 +66,19 @@ func (c *Config) Validate() gperr.Error {
|
||||
c.allowLocal = true
|
||||
}
|
||||
|
||||
if c.MaxMind != nil {
|
||||
c.MaxMind.logger = logging.With().Str("type", string(c.MaxMind.Database)).Logger()
|
||||
}
|
||||
|
||||
if c.Log != nil {
|
||||
c.logAllowed = c.Log.LogAllowed
|
||||
}
|
||||
|
||||
errs := gperr.NewBuilder("syntax error")
|
||||
c.allow = make([]matcher, 0, len(c.Allow))
|
||||
c.deny = make([]matcher, 0, len(c.Deny))
|
||||
|
||||
for _, s := range c.Allow {
|
||||
m, err := c.parseMatcher(s)
|
||||
if err != nil {
|
||||
errs.Add(err.Subject(s))
|
||||
continue
|
||||
}
|
||||
c.allow = append(c.allow, m)
|
||||
}
|
||||
for _, s := range c.Deny {
|
||||
m, err := c.parseMatcher(s)
|
||||
if err != nil {
|
||||
errs.Add(err.Subject(s))
|
||||
continue
|
||||
}
|
||||
c.deny = append(c.deny, m)
|
||||
}
|
||||
|
||||
if errs.HasError() {
|
||||
c.allow = nil
|
||||
c.deny = nil
|
||||
return errMatcherFormat.With(errs.Error())
|
||||
}
|
||||
|
||||
c.ipCache = xsync.NewMapOf[string, *checkCache]()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) Valid() bool {
|
||||
return c != nil && (len(c.allow) > 0 || len(c.deny) > 0 || c.allowLocal)
|
||||
return c != nil && (len(c.Allow) > 0 || len(c.Deny) > 0 || c.allowLocal)
|
||||
}
|
||||
|
||||
func (c *Config) Start(parent *task.Task) gperr.Error {
|
||||
if c.MaxMind != nil {
|
||||
if err := c.MaxMind.LoadMaxMindDB(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.Log != nil {
|
||||
logger, err := accesslog.NewAccessLogger(parent, c.Log)
|
||||
if err != nil {
|
||||
@@ -153,7 +89,10 @@ func (c *Config) Start(parent *task.Task) gperr.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) cacheRecord(info *acl.IPInfo, allow bool) {
|
||||
func (c *Config) cacheRecord(info *maxmind.IPInfo, allow bool) {
|
||||
if common.ForceResolveCountry && info.City == nil {
|
||||
maxmind.LookupCity(info)
|
||||
}
|
||||
c.ipCache.Store(info.Str, &checkCache{
|
||||
IPInfo: info,
|
||||
allow: allow,
|
||||
@@ -161,7 +100,7 @@ func (c *config) cacheRecord(info *acl.IPInfo, allow bool) {
|
||||
})
|
||||
}
|
||||
|
||||
func (c *config) log(info *acl.IPInfo, allowed bool) {
|
||||
func (c *config) log(info *maxmind.IPInfo, allowed bool) {
|
||||
if c.logger == nil {
|
||||
return
|
||||
}
|
||||
@@ -175,14 +114,14 @@ func (c *Config) IPAllowed(ip net.IP) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// always allow private and loopback
|
||||
// always allow loopback
|
||||
// loopback is not logged
|
||||
if ip.IsLoopback() {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.allowLocal && ip.IsPrivate() {
|
||||
c.log(&acl.IPInfo{IP: ip, Str: ip.String()}, true)
|
||||
c.log(&maxmind.IPInfo{IP: ip, Str: ip.String()}, true)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -193,15 +132,15 @@ func (c *Config) IPAllowed(ip net.IP) bool {
|
||||
return record.allow
|
||||
}
|
||||
|
||||
ipAndStr := &acl.IPInfo{IP: ip, Str: ipStr}
|
||||
for _, m := range c.allow {
|
||||
ipAndStr := &maxmind.IPInfo{IP: ip, Str: ipStr}
|
||||
for _, m := range c.Allow {
|
||||
if m(ipAndStr) {
|
||||
c.log(ipAndStr, true)
|
||||
c.cacheRecord(ipAndStr, true)
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, m := range c.deny {
|
||||
for _, m := range c.Deny {
|
||||
if m(ipAndStr) {
|
||||
c.log(ipAndStr, false)
|
||||
c.cacheRecord(ipAndStr, false)
|
||||
|
||||
@@ -4,11 +4,12 @@ import (
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/maxmind"
|
||||
)
|
||||
|
||||
type matcher func(*acl.IPInfo) bool
|
||||
type Matcher func(*maxmind.IPInfo) bool
|
||||
type Matchers []Matcher
|
||||
|
||||
const (
|
||||
MatcherTypeIP = "ip"
|
||||
@@ -32,7 +33,7 @@ var (
|
||||
errMaxMindNotConfigured = gperr.New("MaxMind not configured")
|
||||
)
|
||||
|
||||
func (cfg *Config) parseMatcher(s string) (matcher, gperr.Error) {
|
||||
func ParseMatcher(s string) (Matcher, gperr.Error) {
|
||||
parts := strings.Split(s, ":")
|
||||
if len(parts) != 2 {
|
||||
return nil, errSyntax
|
||||
@@ -52,35 +53,44 @@ func (cfg *Config) parseMatcher(s string) (matcher, gperr.Error) {
|
||||
}
|
||||
return matchCIDR(net), nil
|
||||
case MatcherTypeTimeZone:
|
||||
if cfg.MaxMind == nil {
|
||||
if !maxmind.HasInstance() {
|
||||
return nil, errMaxMindNotConfigured
|
||||
}
|
||||
return cfg.MaxMind.matchTimeZone(parts[1]), nil
|
||||
return matchTimeZone(parts[1]), nil
|
||||
case MatcherTypeCountry:
|
||||
if cfg.MaxMind == nil {
|
||||
if !maxmind.HasInstance() {
|
||||
return nil, errMaxMindNotConfigured
|
||||
}
|
||||
return cfg.MaxMind.matchISOCode(parts[1]), nil
|
||||
return matchISOCode(parts[1]), nil
|
||||
default:
|
||||
return nil, errSyntax
|
||||
}
|
||||
}
|
||||
|
||||
func matchIP(ip net.IP) matcher {
|
||||
return func(ip2 *acl.IPInfo) bool {
|
||||
func (matchers Matchers) Match(ip *maxmind.IPInfo) bool {
|
||||
for _, m := range matchers {
|
||||
if m(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchIP(ip net.IP) Matcher {
|
||||
return func(ip2 *maxmind.IPInfo) bool {
|
||||
return ip.Equal(ip2.IP)
|
||||
}
|
||||
}
|
||||
|
||||
func matchCIDR(n *net.IPNet) matcher {
|
||||
return func(ip *acl.IPInfo) bool {
|
||||
func matchCIDR(n *net.IPNet) Matcher {
|
||||
return func(ip *maxmind.IPInfo) bool {
|
||||
return n.Contains(ip.IP)
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) matchTimeZone(tz string) matcher {
|
||||
return func(ip *acl.IPInfo) bool {
|
||||
city, ok := cfg.lookupCity(ip)
|
||||
func matchTimeZone(tz string) Matcher {
|
||||
return func(ip *maxmind.IPInfo) bool {
|
||||
city, ok := maxmind.LookupCity(ip)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
@@ -88,9 +98,9 @@ func (cfg *MaxMindConfig) matchTimeZone(tz string) matcher {
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) matchISOCode(iso string) matcher {
|
||||
return func(ip *acl.IPInfo) bool {
|
||||
city, ok := cfg.lookupCity(ip)
|
||||
func matchISOCode(iso string) Matcher {
|
||||
return func(ip *maxmind.IPInfo) bool {
|
||||
city, ok := maxmind.LookupCity(ip)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,281 +0,0 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
var (
|
||||
updateInterval = 24 * time.Hour
|
||||
httpClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
ErrResponseNotOK = gperr.New("response not OK")
|
||||
ErrDownloadFailure = gperr.New("download failure")
|
||||
)
|
||||
|
||||
func dbPathImpl(dbType MaxMindDatabaseType) string {
|
||||
if dbType == MaxMindGeoLite {
|
||||
return filepath.Join(dataDir, "GeoLite2-City.mmdb")
|
||||
}
|
||||
return filepath.Join(dataDir, "GeoIP2-City.mmdb")
|
||||
}
|
||||
|
||||
func dbURLimpl(dbType MaxMindDatabaseType) string {
|
||||
if dbType == MaxMindGeoLite {
|
||||
return "https://download.maxmind.com/geoip/databases/GeoLite2-City/download?suffix=tar.gz"
|
||||
}
|
||||
return "https://download.maxmind.com/geoip/databases/GeoIP2-City/download?suffix=tar.gz"
|
||||
}
|
||||
|
||||
func dbFilename(dbType MaxMindDatabaseType) string {
|
||||
if dbType == MaxMindGeoLite {
|
||||
return "GeoLite2-City.mmdb"
|
||||
}
|
||||
return "GeoIP2-City.mmdb"
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) LoadMaxMindDB(parent task.Parent) gperr.Error {
|
||||
if cfg.Database == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
path := dbPath(cfg.Database)
|
||||
reader, err := maxmindDBOpen(path)
|
||||
exists := true
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
default:
|
||||
// ignore invalid error, just download it again
|
||||
var invalidErr maxminddb.InvalidDatabaseError
|
||||
if !errors.As(err, &invalidErr) {
|
||||
return gperr.Wrap(err)
|
||||
}
|
||||
}
|
||||
exists = false
|
||||
}
|
||||
|
||||
if !exists {
|
||||
cfg.logger.Info().Msg("MaxMind DB not found/invalid, downloading...")
|
||||
reader, err = cfg.download()
|
||||
if err != nil {
|
||||
return ErrDownloadFailure.With(err)
|
||||
}
|
||||
}
|
||||
cfg.logger.Info().Msg("MaxMind DB loaded")
|
||||
|
||||
cfg.db.Reader = reader
|
||||
go cfg.scheduleUpdate(parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) loadLastUpdate() {
|
||||
f, err := os.Stat(dbPath(cfg.Database))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cfg.lastUpdate = f.ModTime()
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) setLastUpdate(t time.Time) {
|
||||
cfg.lastUpdate = t
|
||||
_ = os.Chtimes(dbPath(cfg.Database), t, t)
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) scheduleUpdate(parent task.Parent) {
|
||||
task := parent.Subtask("schedule_update", true)
|
||||
ticker := time.NewTicker(updateInterval)
|
||||
|
||||
cfg.loadLastUpdate()
|
||||
cfg.update()
|
||||
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
if cfg.db.Reader != nil {
|
||||
cfg.db.Reader.Close()
|
||||
}
|
||||
task.Finish(nil)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-task.Context().Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
cfg.update()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) update() {
|
||||
// check for update
|
||||
cfg.logger.Info().Msg("checking for MaxMind DB update...")
|
||||
remoteLastModified, err := cfg.checkLastest()
|
||||
if err != nil {
|
||||
cfg.logger.Err(err).Msg("failed to check MaxMind DB update")
|
||||
return
|
||||
}
|
||||
if remoteLastModified.Equal(cfg.lastUpdate) {
|
||||
cfg.logger.Info().Msg("MaxMind DB is up to date")
|
||||
return
|
||||
}
|
||||
|
||||
cfg.logger.Info().
|
||||
Time("latest", remoteLastModified.Local()).
|
||||
Time("current", cfg.lastUpdate).
|
||||
Msg("MaxMind DB update available")
|
||||
reader, err := cfg.download()
|
||||
if err != nil {
|
||||
cfg.logger.Err(err).Msg("failed to update MaxMind DB")
|
||||
return
|
||||
}
|
||||
cfg.db.Lock()
|
||||
cfg.db.Close()
|
||||
cfg.db.Reader = reader
|
||||
cfg.setLastUpdate(*remoteLastModified)
|
||||
cfg.db.Unlock()
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB updated")
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) newReq(method string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, dbURL(cfg.Database), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SetBasicAuth(cfg.AccountID, cfg.LicenseKey)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) checkLastest() (lastModifiedT *time.Time, err error) {
|
||||
resp, err := newReq(cfg, http.MethodHead)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
lastModified := resp.Header.Get("Last-Modified")
|
||||
if lastModified == "" {
|
||||
cfg.logger.Warn().Msg("MaxMind responded no last modified time, update skipped")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
lastModifiedTime, err := time.Parse(http.TimeFormat, lastModified)
|
||||
if err != nil {
|
||||
cfg.logger.Warn().Err(err).Msg("MaxMind responded invalid last modified time, update skipped")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lastModifiedTime, nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMindConfig) download() (*maxminddb.Reader, error) {
|
||||
resp, err := newReq(cfg, http.MethodGet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
path := dbPath(cfg.Database)
|
||||
tmpPath := path + "-tmp.tar.gz"
|
||||
file, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.logger.Info().Msg("MaxMind DB downloading...")
|
||||
|
||||
_, err = io.Copy(file, resp.Body)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file.Close()
|
||||
|
||||
// extract .tar.gz and move only the dbFilename to path
|
||||
err = extractFileFromTarGz(tmpPath, dbFilename(cfg.Database), path)
|
||||
if err != nil {
|
||||
return nil, gperr.New("failed to extract database from archive").With(err)
|
||||
}
|
||||
// cleanup the tar.gz file
|
||||
_ = os.Remove(tmpPath)
|
||||
|
||||
db, err := maxmindDBOpen(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func extractFileFromTarGz(tarGzPath, targetFilename, destPath string) error {
|
||||
f, err := os.Open(tarGzPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break // End of archive
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only extract the file that matches targetFilename (basename match)
|
||||
if filepath.Base(hdr.Name) == targetFilename {
|
||||
outFile, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, hdr.FileInfo().Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
_, err = io.Copy(outFile, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil // Done
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("file %s not found in archive", targetFilename)
|
||||
}
|
||||
|
||||
var (
|
||||
dataDir = common.DataDir
|
||||
dbURL = dbURLimpl
|
||||
dbPath = dbPathImpl
|
||||
maxmindDBOpen = maxminddb.Open
|
||||
newReq = (*MaxMindConfig).newReq
|
||||
)
|
||||
@@ -1,213 +0,0 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
func Test_dbPath(t *testing.T) {
|
||||
tmpDataDir := "/tmp/testdata"
|
||||
oldDataDir := dataDir
|
||||
dataDir = tmpDataDir
|
||||
defer func() { dataDir = oldDataDir }()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dbType MaxMindDatabaseType
|
||||
want string
|
||||
}{
|
||||
{"GeoLite", MaxMindGeoLite, filepath.Join(tmpDataDir, "GeoLite2-City.mmdb")},
|
||||
{"GeoIP2", MaxMindGeoIP2, filepath.Join(tmpDataDir, "GeoIP2-City.mmdb")},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := dbPath(tt.dbType); got != tt.want {
|
||||
t.Errorf("dbPath() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_dbURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dbType MaxMindDatabaseType
|
||||
want string
|
||||
}{
|
||||
{"GeoLite", MaxMindGeoLite, "https://download.maxmind.com/geoip/databases/GeoLite2-City/download?suffix=tar.gz"},
|
||||
{"GeoIP2", MaxMindGeoIP2, "https://download.maxmind.com/geoip/databases/GeoIP2-City/download?suffix=tar.gz"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := dbURL(tt.dbType); got != tt.want {
|
||||
t.Errorf("dbURL() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- Helper for MaxMindConfig ---
|
||||
type testLogger struct{ zerolog.Logger }
|
||||
|
||||
func (testLogger) Info() *zerolog.Event { return &zerolog.Event{} }
|
||||
func (testLogger) Warn() *zerolog.Event { return &zerolog.Event{} }
|
||||
func (testLogger) Err(_ error) *zerolog.Event { return &zerolog.Event{} }
|
||||
|
||||
func Test_MaxMindConfig_newReq(t *testing.T) {
|
||||
cfg := &MaxMindConfig{
|
||||
AccountID: "testid",
|
||||
LicenseKey: "testkey",
|
||||
Database: MaxMindGeoLite,
|
||||
logger: zerolog.Nop(),
|
||||
}
|
||||
|
||||
// Patch httpClient to use httptest
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if u, p, ok := r.BasicAuth(); !ok || u != "testid" || p != "testkey" {
|
||||
t.Errorf("basic auth not set correctly")
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
oldURL := dbURL
|
||||
dbURL = func(MaxMindDatabaseType) string { return server.URL }
|
||||
defer func() { dbURL = oldURL }()
|
||||
|
||||
resp, err := cfg.newReq(http.MethodGet)
|
||||
if err != nil {
|
||||
t.Fatalf("newReq() error = %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("unexpected status: %v", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_checkUpdate(t *testing.T) {
|
||||
cfg := &MaxMindConfig{
|
||||
AccountID: "id",
|
||||
LicenseKey: "key",
|
||||
Database: MaxMindGeoLite,
|
||||
logger: zerolog.Nop(),
|
||||
}
|
||||
lastMod := time.Now().UTC().Format(http.TimeFormat)
|
||||
buildTime := time.Now().Add(-time.Hour)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Last-Modified", lastMod)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
oldURL := dbURL
|
||||
dbURL = func(MaxMindDatabaseType) string { return server.URL }
|
||||
defer func() { dbURL = oldURL }()
|
||||
|
||||
latest, err := cfg.checkLastest()
|
||||
if err != nil {
|
||||
t.Fatalf("checkUpdate() error = %v", err)
|
||||
}
|
||||
if latest.Equal(buildTime) {
|
||||
t.Errorf("expected update needed")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeReadCloser struct {
|
||||
firstRead bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (c *fakeReadCloser) Read(p []byte) (int, error) {
|
||||
if !c.firstRead {
|
||||
c.firstRead = true
|
||||
return strings.NewReader("FAKEMMDB").Read(p)
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (c *fakeReadCloser) Close() error {
|
||||
c.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_download(t *testing.T) {
|
||||
cfg := &MaxMindConfig{
|
||||
AccountID: "id",
|
||||
LicenseKey: "key",
|
||||
Database: MaxMindGeoLite,
|
||||
logger: zerolog.Nop(),
|
||||
}
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
io.Copy(w, strings.NewReader("FAKEMMDB"))
|
||||
}))
|
||||
defer server.Close()
|
||||
oldURL := dbURL
|
||||
dbURL = func(MaxMindDatabaseType) string { return server.URL }
|
||||
defer func() { dbURL = oldURL }()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
oldDataDir := dataDir
|
||||
dataDir = tmpDir
|
||||
defer func() { dataDir = oldDataDir }()
|
||||
|
||||
// Patch maxminddb.Open to always succeed
|
||||
origOpen := maxmindDBOpen
|
||||
maxmindDBOpen = func(path string) (*maxminddb.Reader, error) {
|
||||
return &maxminddb.Reader{}, nil
|
||||
}
|
||||
defer func() { maxmindDBOpen = origOpen }()
|
||||
|
||||
rw := &fakeReadCloser{}
|
||||
oldNewReq := newReq
|
||||
newReq = func(cfg *MaxMindConfig, method string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: rw,
|
||||
}, nil
|
||||
}
|
||||
defer func() { newReq = oldNewReq }()
|
||||
|
||||
db, err := cfg.download()
|
||||
if err != nil {
|
||||
t.Fatalf("download() error = %v", err)
|
||||
}
|
||||
if db == nil {
|
||||
t.Error("expected db instance")
|
||||
}
|
||||
if !rw.closed {
|
||||
t.Error("expected rw to be closed")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_loadMaxMindDB(t *testing.T) {
|
||||
// This test should cover both the path where DB exists and where it does not
|
||||
// For brevity, only the non-existing path is tested here
|
||||
cfg := &MaxMindConfig{
|
||||
AccountID: "id",
|
||||
LicenseKey: "key",
|
||||
Database: MaxMindGeoLite,
|
||||
logger: zerolog.Nop(),
|
||||
}
|
||||
oldOpen := maxmindDBOpen
|
||||
maxmindDBOpen = func(path string) (*maxminddb.Reader, error) {
|
||||
return &maxminddb.Reader{}, nil
|
||||
}
|
||||
defer func() { maxmindDBOpen = oldOpen }()
|
||||
|
||||
oldDBPath := dbPath
|
||||
dbPath = func(MaxMindDatabaseType) string { return filepath.Join(t.TempDir(), "maxmind.mmdb") }
|
||||
defer func() { dbPath = oldDBPath }()
|
||||
|
||||
task := task.RootTask("test")
|
||||
defer task.Finish(nil)
|
||||
err := cfg.LoadMaxMindDB(task)
|
||||
if err != nil {
|
||||
t.Errorf("loadMaxMindDB() error = %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -51,12 +52,12 @@ func (t FileType) GetPath(filename string) string {
|
||||
func getArgs(r *http.Request) (fileType FileType, filename string, err error) {
|
||||
fileType = FileType(r.PathValue("type"))
|
||||
if !fileType.IsValid() {
|
||||
err = gphttp.ErrInvalidKey("type")
|
||||
err = fmt.Errorf("invalid file type: %s", fileType)
|
||||
return
|
||||
}
|
||||
filename = r.PathValue("filename")
|
||||
if filename == "" {
|
||||
err = gphttp.ErrMissingKey("filename")
|
||||
err = fmt.Errorf("missing filename")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package favicon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/homepage"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp"
|
||||
"github.com/yusing/go-proxy/internal/route/routes"
|
||||
@@ -21,11 +19,11 @@ import (
|
||||
func GetFavIcon(w http.ResponseWriter, req *http.Request) {
|
||||
url, alias := req.FormValue("url"), req.FormValue("alias")
|
||||
if url == "" && alias == "" {
|
||||
gphttp.ClientError(w, gphttp.ErrMissingKey("url or alias"), http.StatusBadRequest)
|
||||
gphttp.MissingKey(w, "url or alias")
|
||||
return
|
||||
}
|
||||
if url != "" && alias != "" {
|
||||
gphttp.ClientError(w, gperr.New("url and alias are mutually exclusive"), http.StatusBadRequest)
|
||||
gphttp.BadRequest(w, "url and alias are mutually exclusive")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,7 +31,7 @@ func GetFavIcon(w http.ResponseWriter, req *http.Request) {
|
||||
if url != "" {
|
||||
var iconURL homepage.IconURL
|
||||
if err := iconURL.Parse(url); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, req, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fetchResult := homepage.FetchFavIconFromURL(req.Context(), &iconURL)
|
||||
@@ -49,7 +47,7 @@ func GetFavIcon(w http.ResponseWriter, req *http.Request) {
|
||||
// try with route.Icon
|
||||
r, ok := routes.HTTP.Get(alias)
|
||||
if !ok {
|
||||
gphttp.ClientError(w, errors.New("no such route"), http.StatusNotFound)
|
||||
gphttp.ValueNotFound(w, "route", alias)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -57,7 +55,7 @@ func GetFavIcon(w http.ResponseWriter, req *http.Request) {
|
||||
hp := r.HomepageItem()
|
||||
if hp.Icon != nil {
|
||||
if hp.Icon.IconSource == homepage.IconSourceRelative {
|
||||
result = homepage.FindIcon(req.Context(), r, hp.Icon.Value)
|
||||
result = homepage.FindIcon(req.Context(), r, *hp.Icon.FullURL)
|
||||
} else {
|
||||
result = homepage.FetchFavIconFromURL(req.Context(), hp.Icon)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func SetHomePageOverrides(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
r.Body.Close()
|
||||
@@ -53,21 +53,21 @@ func SetHomePageOverrides(w http.ResponseWriter, r *http.Request) {
|
||||
case HomepageOverrideItem:
|
||||
var params HomepageOverrideItemParams
|
||||
if err := json.Unmarshal(data, ¶ms); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
overrides.OverrideItem(params.Which, ¶ms.Value)
|
||||
case HomepageOverrideItemsBatch:
|
||||
var params HomepageOverrideItemsBatchParams
|
||||
if err := json.Unmarshal(data, ¶ms); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
overrides.OverrideItems(params.Value)
|
||||
case HomepageOverrideItemVisible: // POST /v1/item_visible [a,b,c], false => hide a, b, c
|
||||
var params HomepageOverrideItemVisibleParams
|
||||
if err := json.Unmarshal(data, ¶ms); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if params.Value {
|
||||
@@ -78,7 +78,7 @@ func SetHomePageOverrides(w http.ResponseWriter, r *http.Request) {
|
||||
case HomepageOverrideCategoryOrder:
|
||||
var params HomepageOverrideCategoryOrderParams
|
||||
if err := json.Unmarshal(data, ¶ms); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
overrides.SetCategoryOrder(params.Which, params.Value)
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/yusing/go-proxy/internal"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
config "github.com/yusing/go-proxy/internal/config/types"
|
||||
"github.com/yusing/go-proxy/internal/homepage"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp/middleware"
|
||||
"github.com/yusing/go-proxy/internal/route/routes"
|
||||
@@ -67,14 +67,11 @@ func List(cfg config.ConfigInstance, w http.ResponseWriter, r *http.Request) {
|
||||
if err != nil {
|
||||
limit = 0
|
||||
}
|
||||
icons, err := internal.SearchIcons(r.FormValue("keyword"), limit)
|
||||
icons, err := homepage.SearchIcons(r.FormValue("keyword"), limit)
|
||||
if err != nil {
|
||||
gphttp.ClientError(w, err)
|
||||
gphttp.ClientError(w, r, err)
|
||||
return
|
||||
}
|
||||
if icons == nil {
|
||||
icons = []string{}
|
||||
}
|
||||
gphttp.RespondJSON(w, r, icons)
|
||||
case ListTasks:
|
||||
gphttp.RespondJSON(w, r, task.DebugTaskList())
|
||||
|
||||
@@ -20,27 +20,27 @@ func NewAgent(w http.ResponseWriter, r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
name := q.Get("name")
|
||||
if name == "" {
|
||||
gphttp.ClientError(w, gphttp.ErrMissingKey("name"))
|
||||
gphttp.MissingKey(w, "name")
|
||||
return
|
||||
}
|
||||
host := q.Get("host")
|
||||
if host == "" {
|
||||
gphttp.ClientError(w, gphttp.ErrMissingKey("host"))
|
||||
gphttp.MissingKey(w, "host")
|
||||
return
|
||||
}
|
||||
portStr := q.Get("port")
|
||||
if portStr == "" {
|
||||
gphttp.ClientError(w, gphttp.ErrMissingKey("port"))
|
||||
gphttp.MissingKey(w, "port")
|
||||
return
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil || port < 1 || port > 65535 {
|
||||
gphttp.ClientError(w, gphttp.ErrInvalidKey("port"))
|
||||
gphttp.InvalidKey(w, "port")
|
||||
return
|
||||
}
|
||||
hostport := fmt.Sprintf("%s:%d", host, port)
|
||||
if _, ok := config.GetInstance().GetAgent(hostport); ok {
|
||||
gphttp.ClientError(w, gphttp.ErrAlreadyExists("agent", hostport), http.StatusConflict)
|
||||
gphttp.KeyAlreadyExists(w, "agent", hostport)
|
||||
return
|
||||
}
|
||||
t := q.Get("type")
|
||||
@@ -48,10 +48,10 @@ func NewAgent(w http.ResponseWriter, r *http.Request) {
|
||||
case "docker", "system":
|
||||
break
|
||||
case "":
|
||||
gphttp.ClientError(w, gphttp.ErrMissingKey("type"))
|
||||
gphttp.MissingKey(w, "type")
|
||||
return
|
||||
default:
|
||||
gphttp.ClientError(w, gphttp.ErrInvalidKey("type"))
|
||||
gphttp.InvalidKey(w, "type")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -109,13 +109,13 @@ func VerifyNewAgent(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(clientPEMData, &data); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusBadRequest)
|
||||
gphttp.ClientError(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
nRoutesAdded, err := config.GetInstance().VerifyNewAgent(data.Host, data.CA, data.Client)
|
||||
if err != nil {
|
||||
gphttp.ClientError(w, err)
|
||||
gphttp.ClientError(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func VerifyNewAgent(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
filename, ok := certs.AgentCertsFilepath(data.Host)
|
||||
if !ok {
|
||||
gphttp.ClientError(w, gphttp.ErrInvalidKey("host"))
|
||||
gphttp.InvalidKey(w, "host")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
@@ -38,17 +39,35 @@ func IsOIDCEnabled() bool {
|
||||
return common.OIDCIssuerURL != ""
|
||||
}
|
||||
|
||||
type nextHandler struct{}
|
||||
|
||||
var nextHandlerContextKey = nextHandler{}
|
||||
|
||||
func RequireAuth(next http.HandlerFunc) http.HandlerFunc {
|
||||
if IsEnabled() {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := defaultAuth.CheckToken(r); err != nil {
|
||||
gphttp.ClientError(w, err, http.StatusUnauthorized)
|
||||
} else {
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
if !IsEnabled() {
|
||||
return next
|
||||
}
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := defaultAuth.CheckToken(r); err != nil {
|
||||
if IsFrontend(r) {
|
||||
r = r.WithContext(context.WithValue(r.Context(), nextHandlerContextKey, next))
|
||||
defaultAuth.LoginHandler(w, r)
|
||||
} else {
|
||||
gphttp.Unauthorized(w, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func ProceedNext(w http.ResponseWriter, r *http.Request) {
|
||||
next, ok := r.Context().Value(nextHandlerContextKey).(http.HandlerFunc)
|
||||
if ok {
|
||||
next(w, r)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
func AuthCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
@@ -19,6 +21,10 @@ type oauthRefreshToken struct {
|
||||
Username string `json:"username"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Expiry time.Time `json:"expiry"`
|
||||
|
||||
result *refreshResult
|
||||
err error
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type Session struct {
|
||||
@@ -27,6 +33,12 @@ type Session struct {
|
||||
Groups []string `json:"groups"`
|
||||
}
|
||||
|
||||
type refreshResult struct {
|
||||
newSession Session
|
||||
jwt string
|
||||
jwtExpiry time.Time
|
||||
}
|
||||
|
||||
type sessionClaims struct {
|
||||
Session
|
||||
jwt.RegisteredClaims
|
||||
@@ -34,11 +46,12 @@ type sessionClaims struct {
|
||||
|
||||
type sessionID string
|
||||
|
||||
var oauthRefreshTokens jsonstore.MapStore[oauthRefreshToken]
|
||||
var oauthRefreshTokens jsonstore.MapStore[*oauthRefreshToken]
|
||||
|
||||
var (
|
||||
defaultRefreshTokenExpiry = 30 * 24 * time.Hour // 1 month
|
||||
refreshBefore = 30 * time.Second
|
||||
sessionInvalidateDelay = 3 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,7 +63,7 @@ const sessionTokenIssuer = "GoDoxy"
|
||||
|
||||
func init() {
|
||||
if IsOIDCEnabled() {
|
||||
oauthRefreshTokens = jsonstore.Store[oauthRefreshToken]("oauth_refresh_tokens")
|
||||
oauthRefreshTokens = jsonstore.Store[*oauthRefreshToken]("oauth_refresh_tokens")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,7 +74,7 @@ func (token *oauthRefreshToken) expired() bool {
|
||||
func newSessionID() sessionID {
|
||||
b := make([]byte, 32)
|
||||
_, _ = rand.Read(b)
|
||||
return sessionID(base64.StdEncoding.EncodeToString(b))
|
||||
return sessionID(hex.EncodeToString(b))
|
||||
}
|
||||
|
||||
func newSession(username string, groups []string) Session {
|
||||
@@ -72,26 +85,26 @@ func newSession(username string, groups []string) Session {
|
||||
}
|
||||
}
|
||||
|
||||
// getOnceOAuthRefreshToken returns the refresh token for the given session.
|
||||
//
|
||||
// The token is removed from the store after retrieval.
|
||||
func getOnceOAuthRefreshToken(claims *Session) (*oauthRefreshToken, bool) {
|
||||
// getOAuthRefreshToken returns the refresh token for the given session.
|
||||
func getOAuthRefreshToken(claims *Session) (*oauthRefreshToken, bool) {
|
||||
token, ok := oauthRefreshTokens.Load(string(claims.SessionID))
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
invalidateOAuthRefreshToken(claims.SessionID)
|
||||
|
||||
if token.expired() {
|
||||
invalidateOAuthRefreshToken(claims.SessionID)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if claims.Username != token.Username {
|
||||
return nil, false
|
||||
}
|
||||
return &token, true
|
||||
return token, true
|
||||
}
|
||||
|
||||
func storeOAuthRefreshToken(sessionID sessionID, username, token string) {
|
||||
oauthRefreshTokens.Store(string(sessionID), oauthRefreshToken{
|
||||
oauthRefreshTokens.Store(string(sessionID), &oauthRefreshToken{
|
||||
Username: username,
|
||||
RefreshToken: token,
|
||||
Expiry: time.Now().Add(defaultRefreshTokenExpiry),
|
||||
@@ -135,51 +148,75 @@ func (auth *OIDCProvider) parseSessionJWT(sessionJWT string) (claims *sessionCla
|
||||
return claims, sessionToken.Valid && claims.Issuer == sessionTokenIssuer, nil
|
||||
}
|
||||
|
||||
func (auth *OIDCProvider) TryRefreshToken(w http.ResponseWriter, r *http.Request, sessionJWT string) error {
|
||||
func (auth *OIDCProvider) TryRefreshToken(ctx context.Context, sessionJWT string) (*refreshResult, error) {
|
||||
// verify the session cookie
|
||||
claims, valid, err := auth.parseSessionJWT(sessionJWT)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrInvalidSessionToken, err)
|
||||
return nil, fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrInvalidSessionToken, err)
|
||||
}
|
||||
if !valid {
|
||||
return ErrInvalidSessionToken
|
||||
return nil, ErrInvalidSessionToken
|
||||
}
|
||||
|
||||
// check if refresh is possible
|
||||
refreshToken, ok := getOnceOAuthRefreshToken(&claims.Session)
|
||||
refreshToken, ok := getOAuthRefreshToken(&claims.Session)
|
||||
if !ok {
|
||||
return errNoRefreshToken
|
||||
return nil, errNoRefreshToken
|
||||
}
|
||||
|
||||
if !auth.checkAllowed(claims.Username, claims.Groups) {
|
||||
return ErrUserNotAllowed
|
||||
return nil, ErrUserNotAllowed
|
||||
}
|
||||
|
||||
return auth.doRefreshToken(ctx, refreshToken, &claims.Session)
|
||||
}
|
||||
|
||||
func (auth *OIDCProvider) doRefreshToken(ctx context.Context, refreshToken *oauthRefreshToken, claims *Session) (*refreshResult, error) {
|
||||
refreshToken.mu.Lock()
|
||||
defer refreshToken.mu.Unlock()
|
||||
|
||||
// already refreshed
|
||||
// this must be called after refresh but before invalidate
|
||||
if refreshToken.result != nil || refreshToken.err != nil {
|
||||
return refreshToken.result, refreshToken.err
|
||||
}
|
||||
|
||||
// this step refreshes the token
|
||||
// see https://cs.opensource.google/go/x/oauth2/+/refs/tags/v0.29.0:oauth2.go;l=313
|
||||
newToken, err := auth.oauthConfig.TokenSource(r.Context(), &oauth2.Token{
|
||||
newToken, err := auth.oauthConfig.TokenSource(ctx, &oauth2.Token{
|
||||
RefreshToken: refreshToken.RefreshToken,
|
||||
}).Token()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrRefreshTokenFailure, err)
|
||||
refreshToken.err = fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrRefreshTokenFailure, err)
|
||||
return nil, refreshToken.err
|
||||
}
|
||||
|
||||
idTokenJWT, idToken, err := auth.getIdToken(r.Context(), newToken)
|
||||
idTokenJWT, idToken, err := auth.getIdToken(ctx, newToken)
|
||||
if err != nil {
|
||||
return err
|
||||
refreshToken.err = fmt.Errorf("session: %s - %w: %w", claims.SessionID, ErrRefreshTokenFailure, err)
|
||||
return nil, refreshToken.err
|
||||
}
|
||||
|
||||
// in case there're multiple requests for the same session to refresh
|
||||
// invalidate the token after a short delay
|
||||
go func() {
|
||||
<-time.After(sessionInvalidateDelay)
|
||||
invalidateOAuthRefreshToken(claims.SessionID)
|
||||
}()
|
||||
|
||||
sessionID := newSessionID()
|
||||
|
||||
logging.Debug().Str("username", claims.Username).Time("expiry", newToken.Expiry).Msg("refreshed token")
|
||||
storeOAuthRefreshToken(sessionID, claims.Username, newToken.RefreshToken)
|
||||
|
||||
// set new idToken and new sessionToken
|
||||
auth.setIDTokenCookie(w, r, idTokenJWT, time.Until(idToken.Expiry))
|
||||
auth.setSessionTokenCookie(w, r, Session{
|
||||
SessionID: sessionID,
|
||||
Username: claims.Username,
|
||||
Groups: claims.Groups,
|
||||
})
|
||||
return nil
|
||||
refreshToken.result = &refreshResult{
|
||||
newSession: Session{
|
||||
SessionID: sessionID,
|
||||
Username: claims.Username,
|
||||
Groups: claims.Groups,
|
||||
},
|
||||
jwt: idTokenJWT,
|
||||
jwtExpiry: idToken.Expiry,
|
||||
}
|
||||
return refreshToken.result, nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
@@ -47,7 +48,12 @@ const (
|
||||
OIDCLogoutPath = "/auth/logout"
|
||||
)
|
||||
|
||||
var errMissingIDToken = errors.New("missing id_token field from oauth token")
|
||||
var (
|
||||
errMissingIDToken = errors.New("missing id_token field from oauth token")
|
||||
|
||||
ErrMissingOAuthToken = gperr.New("missing oauth token")
|
||||
ErrInvalidOAuthToken = gperr.New("invalid oauth token")
|
||||
)
|
||||
|
||||
// generateState generates a random string for OIDC state.
|
||||
const oidcStateLength = 32
|
||||
@@ -133,6 +139,10 @@ func (auth *OIDCProvider) getIdToken(ctx context.Context, oauthToken *oauth2.Tok
|
||||
}
|
||||
|
||||
func (auth *OIDCProvider) HandleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
if r.TLS == nil && r.Header.Get("X-Forwarded-Proto") != "https" {
|
||||
http.Redirect(w, r, "https://"+requestHost(r)+OIDCAuthInitPath, http.StatusFound)
|
||||
return
|
||||
}
|
||||
switch r.URL.Path {
|
||||
case OIDCAuthInitPath:
|
||||
auth.LoginHandler(w, r)
|
||||
@@ -148,12 +158,19 @@ func (auth *OIDCProvider) HandleAuth(w http.ResponseWriter, r *http.Request) {
|
||||
func (auth *OIDCProvider) LoginHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// check for session token
|
||||
sessionToken, err := r.Cookie(CookieOauthSessionToken)
|
||||
if err == nil {
|
||||
err = auth.TryRefreshToken(w, r, sessionToken.Value)
|
||||
if err != nil {
|
||||
logging.Debug().Err(err).Msg("failed to refresh token")
|
||||
auth.clearCookie(w, r)
|
||||
if err == nil { // session token exists
|
||||
result, err := auth.TryRefreshToken(r.Context(), sessionToken.Value)
|
||||
// redirect back to where they requested
|
||||
// when token refresh is ok
|
||||
if err == nil {
|
||||
auth.setIDTokenCookie(w, r, result.jwt, time.Until(result.jwtExpiry))
|
||||
auth.setSessionTokenCookie(w, r, result.newSession)
|
||||
ProceedNext(w, r)
|
||||
return
|
||||
}
|
||||
// clear cookies then redirect to home
|
||||
logging.Err(err).Msg("failed to refresh token")
|
||||
auth.clearCookie(w, r)
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func TestUserPassLoginCallbackHandler(t *testing.T) {
|
||||
Host: "app.example.com",
|
||||
Body: io.NopCloser(bytes.NewReader(Must(json.Marshal(tt.creds)))),
|
||||
}
|
||||
auth.LoginHandler(w, req)
|
||||
auth.PostAuthCallbackHandler(w, req)
|
||||
if tt.wantErr {
|
||||
ExpectEqual(t, w.Code, http.StatusUnauthorized)
|
||||
} else {
|
||||
|
||||
@@ -10,22 +10,21 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingOAuthToken = gperr.New("missing oauth token")
|
||||
ErrMissingSessionToken = gperr.New("missing session token")
|
||||
ErrInvalidOAuthToken = gperr.New("invalid oauth token")
|
||||
ErrInvalidSessionToken = gperr.New("invalid session token")
|
||||
ErrUserNotAllowed = gperr.New("user not allowed")
|
||||
)
|
||||
|
||||
func IsFrontend(r *http.Request) bool {
|
||||
return r.Host == common.APIHTTPAddr
|
||||
}
|
||||
|
||||
func requestHost(r *http.Request) string {
|
||||
// check if it's from backend
|
||||
switch r.Host {
|
||||
case common.APIHTTPAddr:
|
||||
// use XFH
|
||||
if IsFrontend(r) {
|
||||
return r.Header.Get("X-Forwarded-Host")
|
||||
default:
|
||||
return r.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// cookieDomain returns the fully qualified domain name of the request host
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
@@ -70,7 +69,7 @@ func (cfg *Config) Validate() gperr.Error {
|
||||
if !ok {
|
||||
b.Add(ErrUnknownProvider.
|
||||
Subject(cfg.Provider).
|
||||
Withf(strutils.DoYouMean(utils.NearestField(cfg.Provider, Providers))))
|
||||
With(gperr.DoYouMean(utils.NearestField(cfg.Provider, Providers))))
|
||||
} else {
|
||||
_, err := providerConstructor(cfg.Options)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,8 +15,10 @@ import (
|
||||
"github.com/go-acme/lego/v4/certificate"
|
||||
"github.com/go-acme/lego/v4/lego"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/notif"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
@@ -193,8 +195,18 @@ func (p *Provider) ScheduleRenewal(parent task.Parent) {
|
||||
if err := p.renewIfNeeded(); err != nil {
|
||||
gperr.LogWarn("cert renew failed", err)
|
||||
lastErrOn = time.Now()
|
||||
notif.Notify(¬if.LogMessage{
|
||||
Level: zerolog.ErrorLevel,
|
||||
Title: "SSL certificate renewal failed",
|
||||
Body: notif.MessageBody(err.Error()),
|
||||
})
|
||||
continue
|
||||
}
|
||||
notif.Notify(¬if.LogMessage{
|
||||
Level: zerolog.InfoLevel,
|
||||
Title: "SSL certificate renewed",
|
||||
Body: notif.ListBody(p.cfg.Domains),
|
||||
})
|
||||
// Reset on success
|
||||
lastErrOn = time.Time{}
|
||||
renewalTime = p.ShouldRenewOn()
|
||||
|
||||
@@ -45,6 +45,6 @@ oauth2_config:
|
||||
testYaml = testYaml[1:] // remove first \n
|
||||
opt := make(map[string]any)
|
||||
require.NoError(t, yaml.Unmarshal([]byte(testYaml), &opt))
|
||||
require.NoError(t, utils.Deserialize(opt, cfg))
|
||||
require.NoError(t, utils.MapUnmarshalValidate(opt, cfg))
|
||||
require.Equal(t, cfg, cfgExpected)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func DNSProvider[CT any, PT challenge.Provider](
|
||||
) Generator {
|
||||
return func(opt map[string]any) (challenge.Provider, gperr.Error) {
|
||||
cfg := defaultCfg()
|
||||
err := utils.Deserialize(opt, &cfg)
|
||||
err := utils.MapUnmarshalValidate(opt, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ const (
|
||||
ConfigExampleFileName = "config.example.yml"
|
||||
ConfigPath = ConfigBasePath + "/" + ConfigFileName
|
||||
|
||||
IconListCachePath = ConfigBasePath + "/.icon_list_cache.json"
|
||||
IconCachePath = ConfigBasePath + "/.icon_cache.json"
|
||||
DataDir = "data"
|
||||
IconListCachePath = DataDir + "/.icon_list_cache.json"
|
||||
|
||||
NamespaceHomepageOverrides = ".homepage"
|
||||
NamespaceIconCache = ".icon_cache"
|
||||
@@ -25,14 +25,12 @@ const (
|
||||
|
||||
ComposeFileName = "compose.yml"
|
||||
ComposeExampleFileName = "compose.example.yml"
|
||||
|
||||
DataDir = "data"
|
||||
|
||||
ErrorPagesBasePath = "error_pages"
|
||||
ErrorPagesBasePath = "error_pages"
|
||||
)
|
||||
|
||||
var RequiredDirectories = []string{
|
||||
ConfigBasePath,
|
||||
DataDir,
|
||||
ErrorPagesBasePath,
|
||||
MiddlewareComposeBasePath,
|
||||
}
|
||||
|
||||
@@ -58,6 +58,8 @@ var (
|
||||
MetricsDisableDisk = GetEnvBool("METRICS_DISABLE_DISK", false)
|
||||
MetricsDisableNetwork = GetEnvBool("METRICS_DISABLE_NETWORK", false)
|
||||
MetricsDisableSensors = GetEnvBool("METRICS_DISABLE_SENSORS", false)
|
||||
|
||||
ForceResolveCountry = GetEnvBool("FORCE_RESOLVE_COUNTRY", false)
|
||||
)
|
||||
|
||||
func GetEnv[T any](key string, defaultValue T, parser func(string) (T, error)) T {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/api"
|
||||
autocert "github.com/yusing/go-proxy/internal/autocert"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
@@ -16,12 +17,15 @@ import (
|
||||
"github.com/yusing/go-proxy/internal/entrypoint"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/maxmind"
|
||||
"github.com/yusing/go-proxy/internal/net/gphttp/server"
|
||||
"github.com/yusing/go-proxy/internal/notif"
|
||||
"github.com/yusing/go-proxy/internal/proxmox"
|
||||
proxy "github.com/yusing/go-proxy/internal/route/provider"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
F "github.com/yusing/go-proxy/internal/utils/functional"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils/ansi"
|
||||
"github.com/yusing/go-proxy/internal/watcher"
|
||||
"github.com/yusing/go-proxy/internal/watcher/events"
|
||||
)
|
||||
@@ -114,7 +118,7 @@ func Reload() gperr.Error {
|
||||
err := newCfg.load()
|
||||
if err != nil {
|
||||
newCfg.task.Finish(err)
|
||||
return gperr.New("using last config").With(err)
|
||||
return gperr.New(ansi.Warning("using last config")).With(err)
|
||||
}
|
||||
|
||||
// cancel all current subtasks -> wait
|
||||
@@ -219,7 +223,7 @@ func (cfg *Config) load() gperr.Error {
|
||||
}
|
||||
|
||||
model := config.DefaultConfig()
|
||||
if err := utils.DeserializeYAML(data, model); err != nil {
|
||||
if err := utils.UnmarshalValidateYAML(data, model); err != nil {
|
||||
gperr.LogFatal(errMsg, err)
|
||||
}
|
||||
|
||||
@@ -227,8 +231,10 @@ func (cfg *Config) load() gperr.Error {
|
||||
errs := gperr.NewBuilder(errMsg)
|
||||
errs.Add(cfg.entrypoint.SetMiddlewares(model.Entrypoint.Middlewares))
|
||||
errs.Add(cfg.entrypoint.SetAccessLogger(cfg.task, model.Entrypoint.AccessLog))
|
||||
errs.Add(cfg.initMaxMind(model.Providers.MaxMind))
|
||||
cfg.initNotification(model.Providers.Notification)
|
||||
errs.Add(cfg.initAutoCert(model.AutoCert))
|
||||
errs.Add(cfg.initProxmox(model.Providers.Proxmox))
|
||||
errs.Add(cfg.loadRouteProviders(&model.Providers))
|
||||
|
||||
cfg.value = model
|
||||
@@ -247,7 +253,22 @@ func (cfg *Config) load() gperr.Error {
|
||||
}
|
||||
}
|
||||
|
||||
return errs.Error()
|
||||
if errs.HasError() {
|
||||
notif.Notify(¬if.LogMessage{
|
||||
Level: zerolog.ErrorLevel,
|
||||
Title: "Config Reload Error",
|
||||
Body: notif.ErrorBody{Error: errs.Error()},
|
||||
})
|
||||
return errs.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) initMaxMind(maxmindCfg *maxmind.Config) gperr.Error {
|
||||
if maxmindCfg != nil {
|
||||
return maxmind.SetInstance(cfg.task, maxmindCfg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) initNotification(notifCfg []notif.NotificationConfig) {
|
||||
@@ -278,6 +299,17 @@ func (cfg *Config) initAutoCert(autocertCfg *autocert.Config) gperr.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) initProxmox(proxmoxCfg []proxmox.Config) gperr.Error {
|
||||
proxmox.Clients.Clear()
|
||||
var errs = gperr.NewBuilder()
|
||||
for _, cfg := range proxmoxCfg {
|
||||
if err := cfg.Init(); err != nil {
|
||||
errs.Add(err.Subject(cfg.URL))
|
||||
}
|
||||
}
|
||||
return errs.Error()
|
||||
}
|
||||
|
||||
func (cfg *Config) errIfExists(p *proxy.Provider) gperr.Error {
|
||||
if _, ok := cfg.providers.Load(p.String()); ok {
|
||||
return gperr.Errorf("provider %s already exists", p.String())
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"github.com/yusing/go-proxy/internal/autocert"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging/accesslog"
|
||||
maxmind "github.com/yusing/go-proxy/internal/maxmind/types"
|
||||
"github.com/yusing/go-proxy/internal/notif"
|
||||
"github.com/yusing/go-proxy/internal/proxmox"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
@@ -30,6 +32,8 @@ type (
|
||||
Docker map[string]string `json:"docker" yaml:"docker,omitempty" validate:"non_empty_docker_keys,dive,unix_addr|url"`
|
||||
Agents []*agent.AgentConfig `json:"agents" yaml:"agents,omitempty"`
|
||||
Notification []notif.NotificationConfig `json:"notification" yaml:"notification,omitempty"`
|
||||
Proxmox []proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
|
||||
MaxMind *maxmind.Config `json:"maxmind" yaml:"maxmind,omitempty"`
|
||||
}
|
||||
Entrypoint struct {
|
||||
Middlewares []map[string]any `json:"middlewares"`
|
||||
@@ -86,7 +90,7 @@ func HasInstance() bool {
|
||||
|
||||
func Validate(data []byte) gperr.Error {
|
||||
var model Config
|
||||
return utils.DeserializeYAML(data, &model)
|
||||
return utils.UnmarshalValidateYAML(data, &model)
|
||||
}
|
||||
|
||||
var matchDomainsRegex = regexp.MustCompile(`^[^\.]?([\w\d\-_]\.?)+[^\.]?$`)
|
||||
|
||||
@@ -126,11 +126,27 @@ func (c *Container) isDatabase() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) isLocal() bool {
|
||||
if strings.HasPrefix(c.DockerHost, "unix://") {
|
||||
return true
|
||||
}
|
||||
url, err := url.Parse(c.DockerHost)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
switch url.Hostname() {
|
||||
case "localhost", "127.0.0.1", "::1":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) setPublicHostname() {
|
||||
if !c.Running {
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(c.DockerHost, "unix://") {
|
||||
if c.isLocal() {
|
||||
c.PublicHostname = "127.0.0.1"
|
||||
return
|
||||
}
|
||||
@@ -144,18 +160,17 @@ func (c *Container) setPublicHostname() {
|
||||
}
|
||||
|
||||
func (c *Container) setPrivateHostname(helper containerHelper) {
|
||||
if !strings.HasPrefix(c.DockerHost, "unix://") && c.Agent == nil {
|
||||
if !c.isLocal() && c.Agent == nil {
|
||||
return
|
||||
}
|
||||
if helper.NetworkSettings == nil {
|
||||
return
|
||||
}
|
||||
for _, v := range helper.NetworkSettings.Networks {
|
||||
if v.IPAddress == "" {
|
||||
continue
|
||||
if v.IPAddress != "" {
|
||||
c.PrivateHostname = v.IPAddress
|
||||
return
|
||||
}
|
||||
c.PrivateHostname = v.IPAddress
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +193,7 @@ func (c *Container) loadDeleteIdlewatcherLabels(helper containerHelper) {
|
||||
ContainerName: c.ContainerName,
|
||||
},
|
||||
}
|
||||
err := utils.Deserialize(cfg, idwCfg)
|
||||
err := utils.MapUnmarshalValidate(cfg, idwCfg)
|
||||
if err != nil {
|
||||
gperr.LogWarn("invalid idlewatcher config", gperr.PrependSubject(c.ContainerName, err))
|
||||
} else {
|
||||
|
||||
@@ -37,11 +37,11 @@ func (err *baseError) Subjectf(format string, args ...any) Error {
|
||||
}
|
||||
|
||||
func (err baseError) With(extra error) Error {
|
||||
return &nestedError{&err, []error{extra}}
|
||||
return &nestedError{err.Err, []error{extra}}
|
||||
}
|
||||
|
||||
func (err baseError) Withf(format string, args ...any) Error {
|
||||
return &nestedError{&err, []error{fmt.Errorf(format, args...)}}
|
||||
return &nestedError{err.Err, []error{fmt.Errorf(format, args...)}}
|
||||
}
|
||||
|
||||
func (err *baseError) Error() string {
|
||||
@@ -62,3 +62,11 @@ func (err *baseError) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (err *baseError) Plain() []byte {
|
||||
return Plain(err.Err)
|
||||
}
|
||||
|
||||
func (err *baseError) Markdown() []byte {
|
||||
return Markdown(err.Err)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ func TestBuilderNested(t *testing.T) {
|
||||
• Inner: 1
|
||||
• Inner: 2
|
||||
• Action 2
|
||||
• Inner: 3`
|
||||
• Inner: 3
|
||||
`
|
||||
ExpectEqual(t, got, expected)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,16 @@ type Error interface {
|
||||
Subject(subject string) Error
|
||||
// Subjectf is a wrapper for Subject(fmt.Sprintf(format, args...)).
|
||||
Subjectf(format string, args ...any) Error
|
||||
PlainError
|
||||
MarkdownError
|
||||
}
|
||||
|
||||
type PlainError interface {
|
||||
Plain() []byte
|
||||
}
|
||||
|
||||
type MarkdownError interface {
|
||||
Markdown() []byte
|
||||
}
|
||||
|
||||
// this makes JSON marshaling work,
|
||||
|
||||
@@ -153,6 +153,7 @@ func TestErrorStringNested(t *testing.T) {
|
||||
• 2
|
||||
• action 3 > inner3: generic failure
|
||||
• 3
|
||||
• 3`
|
||||
• 3
|
||||
`
|
||||
expect.Equal(t, ansi.StripANSI(ne.Error()), want)
|
||||
}
|
||||
|
||||
43
internal/gperr/hint.go
Normal file
43
internal/gperr/hint.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package gperr
|
||||
|
||||
import "github.com/yusing/go-proxy/internal/utils/strutils/ansi"
|
||||
|
||||
type Hint struct {
|
||||
Prefix string
|
||||
Message string
|
||||
Suffix string
|
||||
}
|
||||
|
||||
var _ PlainError = (*Hint)(nil)
|
||||
var _ MarkdownError = (*Hint)(nil)
|
||||
|
||||
func (h *Hint) Error() string {
|
||||
return h.Prefix + ansi.Info(h.Message) + h.Suffix
|
||||
}
|
||||
|
||||
func (h *Hint) Plain() []byte {
|
||||
return []byte(h.Prefix + h.Message + h.Suffix)
|
||||
}
|
||||
|
||||
func (h *Hint) Markdown() []byte {
|
||||
return []byte(h.Prefix + "**" + h.Message + "**" + h.Suffix)
|
||||
}
|
||||
|
||||
func (h *Hint) MarshalText() ([]byte, error) {
|
||||
return h.Plain(), nil
|
||||
}
|
||||
|
||||
func (h *Hint) String() string {
|
||||
return h.Error()
|
||||
}
|
||||
|
||||
func DoYouMean(s string) *Hint {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return &Hint{
|
||||
Prefix: "Do you mean ",
|
||||
Message: s,
|
||||
Suffix: "?",
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@ func log(msg string, err error, level zerolog.Level, logger ...*zerolog.Logger)
|
||||
} else {
|
||||
l = logging.GetLogger()
|
||||
}
|
||||
l.WithLevel(level).Msg(New(highlight(msg)).With(err).Error())
|
||||
l.WithLevel(level).Msg(New(highlightANSI(msg)).With(err).Error())
|
||||
switch level {
|
||||
case zerolog.FatalLevel:
|
||||
os.Exit(1)
|
||||
|
||||
@@ -3,8 +3,6 @@ package gperr
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
//nolint:recvcheck
|
||||
@@ -67,48 +65,98 @@ func (err *nestedError) Is(other error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var nilError = newError("<nil>")
|
||||
var bulletPrefix = []byte("• ")
|
||||
var markdownBulletPrefix = []byte("- ")
|
||||
var spaces = []byte(" ")
|
||||
|
||||
type appendLineFunc func(buf []byte, err error, level int) []byte
|
||||
|
||||
func (err *nestedError) Error() string {
|
||||
if err == nil {
|
||||
return makeLine("<nil>", 0)
|
||||
return nilError.Error()
|
||||
}
|
||||
|
||||
if err.Err != nil {
|
||||
lines := make([]string, 0, 1+len(err.Extras))
|
||||
lines = append(lines, makeLine(err.Err.Error(), 0))
|
||||
lines = append(lines, makeLines(err.Extras, 1)...)
|
||||
return strutils.JoinLines(lines)
|
||||
buf := appendLineNormal(nil, err.Err, 0)
|
||||
if len(err.Extras) > 0 {
|
||||
buf = append(buf, '\n')
|
||||
buf = appendLines(buf, err.Extras, 1, appendLineNormal)
|
||||
}
|
||||
return strutils.JoinLines(makeLines(err.Extras, 0))
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
//go:inline
|
||||
func makeLine(err string, level int) string {
|
||||
const bulletPrefix = "• "
|
||||
const spaces = " "
|
||||
func (err *nestedError) Plain() []byte {
|
||||
if err == nil {
|
||||
return appendLinePlain(nil, nilError, 0)
|
||||
}
|
||||
buf := appendLinePlain(nil, err.Err, 0)
|
||||
if len(err.Extras) > 0 {
|
||||
buf = append(buf, '\n')
|
||||
buf = appendLines(buf, err.Extras, 1, appendLinePlain)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func (err *nestedError) Markdown() []byte {
|
||||
if err == nil {
|
||||
return appendLineMd(nil, nilError, 0)
|
||||
}
|
||||
|
||||
buf := appendLineMd(nil, err.Err, 0)
|
||||
if len(err.Extras) > 0 {
|
||||
buf = append(buf, '\n')
|
||||
buf = appendLines(buf, err.Extras, 1, appendLineMd)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func appendLineNormal(buf []byte, err error, level int) []byte {
|
||||
if level == 0 {
|
||||
return err
|
||||
return append(buf, err.Error()...)
|
||||
}
|
||||
return spaces[:2*level] + bulletPrefix + err
|
||||
buf = append(buf, spaces[:2*level]...)
|
||||
buf = append(buf, bulletPrefix...)
|
||||
buf = append(buf, err.Error()...)
|
||||
return buf
|
||||
}
|
||||
|
||||
func makeLines(errs []error, level int) []string {
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
func appendLinePlain(buf []byte, err error, level int) []byte {
|
||||
if level == 0 {
|
||||
return append(buf, Plain(err)...)
|
||||
}
|
||||
buf = append(buf, spaces[:2*level]...)
|
||||
buf = append(buf, bulletPrefix...)
|
||||
buf = append(buf, Plain(err)...)
|
||||
return buf
|
||||
}
|
||||
|
||||
func appendLineMd(buf []byte, err error, level int) []byte {
|
||||
if level == 0 {
|
||||
return append(buf, Markdown(err)...)
|
||||
}
|
||||
buf = append(buf, spaces[:2*level]...)
|
||||
buf = append(buf, markdownBulletPrefix...)
|
||||
buf = append(buf, Markdown(err)...)
|
||||
return buf
|
||||
}
|
||||
|
||||
func appendLines(buf []byte, errs []error, level int, appendLine appendLineFunc) []byte {
|
||||
if len(errs) == 0 {
|
||||
return buf
|
||||
}
|
||||
lines := make([]string, 0, len(errs))
|
||||
for _, err := range errs {
|
||||
switch err := wrap(err).(type) {
|
||||
case *nestedError:
|
||||
if err.Err != nil {
|
||||
lines = append(lines, makeLine(err.Err.Error(), level))
|
||||
lines = append(lines, makeLines(err.Extras, level+1)...)
|
||||
buf = appendLine(buf, err.Err, level)
|
||||
buf = append(buf, '\n')
|
||||
buf = appendLines(buf, err.Extras, level+1, appendLine)
|
||||
} else {
|
||||
lines = append(lines, makeLines(err.Extras, level)...)
|
||||
buf = appendLines(buf, err.Extras, level, appendLine)
|
||||
}
|
||||
default:
|
||||
lines = append(lines, makeLine(err.Error(), level))
|
||||
buf = appendLine(buf, err, level)
|
||||
buf = append(buf, '\n')
|
||||
}
|
||||
}
|
||||
return lines
|
||||
return buf
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package gperr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils/ansi"
|
||||
)
|
||||
@@ -19,10 +19,23 @@ type withSubject struct {
|
||||
|
||||
const subjectSep = " > "
|
||||
|
||||
func highlight(subject string) string {
|
||||
type highlightFunc func(subject string) string
|
||||
|
||||
var _ PlainError = (*withSubject)(nil)
|
||||
var _ MarkdownError = (*withSubject)(nil)
|
||||
|
||||
func highlightANSI(subject string) string {
|
||||
return ansi.HighlightRed + subject + ansi.Reset
|
||||
}
|
||||
|
||||
func highlightMarkdown(subject string) string {
|
||||
return "**" + subject + "**"
|
||||
}
|
||||
|
||||
func noHighlight(subject string) string {
|
||||
return subject
|
||||
}
|
||||
|
||||
func PrependSubject(subject string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
@@ -69,24 +82,38 @@ func (err *withSubject) Unwrap() error {
|
||||
}
|
||||
|
||||
func (err *withSubject) Error() string {
|
||||
return string(err.fmtError(highlightANSI))
|
||||
}
|
||||
|
||||
func (err *withSubject) Plain() []byte {
|
||||
return err.fmtError(noHighlight)
|
||||
}
|
||||
|
||||
func (err *withSubject) Markdown() []byte {
|
||||
return err.fmtError(highlightMarkdown)
|
||||
}
|
||||
|
||||
func (err *withSubject) fmtError(highlight highlightFunc) []byte {
|
||||
// subject is in reversed order
|
||||
n := len(err.Subjects)
|
||||
size := 0
|
||||
errStr := err.Err.Error()
|
||||
var sb strings.Builder
|
||||
var buf bytes.Buffer
|
||||
for _, s := range err.Subjects {
|
||||
size += len(s)
|
||||
}
|
||||
sb.Grow(size + 2 + n*len(subjectSep) + len(errStr) + len(highlight("")))
|
||||
buf.Grow(size + 2 + n*len(subjectSep) + len(errStr) + len(highlight("")))
|
||||
|
||||
for i := n - 1; i > 0; i-- {
|
||||
sb.WriteString(err.Subjects[i])
|
||||
sb.WriteString(subjectSep)
|
||||
buf.WriteString(err.Subjects[i])
|
||||
buf.WriteString(subjectSep)
|
||||
}
|
||||
sb.WriteString(highlight(err.Subjects[0]))
|
||||
sb.WriteString(": ")
|
||||
sb.WriteString(errStr)
|
||||
return sb.String()
|
||||
buf.WriteString(highlight(err.Subjects[0]))
|
||||
if errStr != "" {
|
||||
buf.WriteString(": ")
|
||||
buf.WriteString(errStr)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package gperr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
@@ -29,16 +27,17 @@ func Wrap(err error, message ...string) Error {
|
||||
if len(message) == 0 || message[0] == "" {
|
||||
return wrap(err)
|
||||
}
|
||||
wrapped := &wrappedError{err, message[0]}
|
||||
//nolint:errorlint
|
||||
switch err := err.(type) {
|
||||
case *baseError:
|
||||
err.Err = fmt.Errorf("%s: %w", message[0], err.Err)
|
||||
err.Err = wrapped
|
||||
return err
|
||||
case *nestedError:
|
||||
err.Err = fmt.Errorf("%s: %w", message[0], err.Err)
|
||||
err.Err = wrapped
|
||||
return err
|
||||
}
|
||||
return &baseError{fmt.Errorf("%s: %w", message[0], err)}
|
||||
return &baseError{wrapped}
|
||||
}
|
||||
|
||||
func Unwrap(err error) Error {
|
||||
@@ -65,18 +64,6 @@ func wrap(err error) Error {
|
||||
return &baseError{err}
|
||||
}
|
||||
|
||||
func IsJSONMarshallable(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *nestedError, *withSubject:
|
||||
return true
|
||||
case *baseError:
|
||||
return IsJSONMarshallable(err.Err)
|
||||
default:
|
||||
var v json.Marshaler
|
||||
return errors.As(err, &v)
|
||||
}
|
||||
}
|
||||
|
||||
func Join(errors ...error) Error {
|
||||
n := 0
|
||||
for _, err := range errors {
|
||||
@@ -103,3 +90,27 @@ func Collect[T any, Err error, Arg any, Func func(Arg) (T, Err)](eb *Builder, fn
|
||||
eb.Add(err)
|
||||
return result
|
||||
}
|
||||
|
||||
func Plain(err error) []byte {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if p, ok := err.(PlainError); ok {
|
||||
return p.Plain()
|
||||
}
|
||||
return []byte(err.Error())
|
||||
}
|
||||
|
||||
func Markdown(err error) []byte {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case MarkdownError:
|
||||
return err.Markdown()
|
||||
case interface{ Unwrap() []error }:
|
||||
return appendLines(nil, err.Unwrap(), 0, appendLineMd)
|
||||
default:
|
||||
return []byte(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,63 +1,55 @@
|
||||
package gperr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testErr struct{}
|
||||
|
||||
func (e *testErr) Error() string {
|
||||
func (e testErr) Error() string {
|
||||
return "test error"
|
||||
}
|
||||
|
||||
func (e *testErr) MarshalJSON() ([]byte, error) {
|
||||
return nil, nil
|
||||
func (e testErr) Plain() []byte {
|
||||
return []byte("test error")
|
||||
}
|
||||
|
||||
func TestIsJSONMarshallable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "testErr",
|
||||
err: &testErr{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "baseError",
|
||||
err: &baseError{},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "baseError with json marshallable error",
|
||||
err: &baseError{&testErr{}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "nestedError",
|
||||
err: &nestedError{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "withSubject",
|
||||
err: &withSubject{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "standard error",
|
||||
err: errors.New("test error"),
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
func (e testErr) Markdown() []byte {
|
||||
return []byte("**test error**")
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if got := IsJSONMarshallable(test.err); got != test.want {
|
||||
t.Errorf("IsJSONMarshallable(%v) = %v, want %v", test.err, got, test.want)
|
||||
}
|
||||
})
|
||||
type testMultiErr struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
func (e testMultiErr) Error() string {
|
||||
return Join(e.errors...).Error()
|
||||
}
|
||||
|
||||
func (e testMultiErr) Unwrap() []error {
|
||||
return e.errors
|
||||
}
|
||||
|
||||
func TestFormatting(t *testing.T) {
|
||||
err := testErr{}
|
||||
plain := Plain(err)
|
||||
if string(plain) != "test error" {
|
||||
t.Errorf("expected test error, got %s", string(plain))
|
||||
}
|
||||
md := Markdown(err)
|
||||
if string(md) != "**test error**" {
|
||||
t.Errorf("expected test error, got %s", string(md))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiError(t *testing.T) {
|
||||
err := testMultiErr{[]error{testErr{}, testErr{}}}
|
||||
plain := Plain(err)
|
||||
if string(plain) != "test error\ntest error" {
|
||||
t.Errorf("expected test error, got %s", string(plain))
|
||||
}
|
||||
md := Markdown(err)
|
||||
if string(md) != "**test error**\n**test error**" {
|
||||
t.Errorf("expected test error, got %s", string(md))
|
||||
}
|
||||
}
|
||||
|
||||
34
internal/gperr/wrapped.go
Normal file
34
internal/gperr/wrapped.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package gperr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type wrappedError struct {
|
||||
Err error
|
||||
Message string
|
||||
}
|
||||
|
||||
var _ PlainError = (*wrappedError)(nil)
|
||||
var _ MarkdownError = (*wrappedError)(nil)
|
||||
|
||||
func (e *wrappedError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Message, e.Err.Error())
|
||||
}
|
||||
|
||||
func (e *wrappedError) Plain() []byte {
|
||||
return fmt.Appendf(nil, "%s: %s", e.Message, e.Err.Error())
|
||||
}
|
||||
|
||||
func (e *wrappedError) Markdown() []byte {
|
||||
return fmt.Appendf(nil, "**%s**: %s", e.Message, e.Err.Error())
|
||||
}
|
||||
|
||||
func (e *wrappedError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *wrappedError) Is(target error) bool {
|
||||
return errors.Is(e.Err, target)
|
||||
}
|
||||
@@ -2,9 +2,8 @@ package homepage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
config "github.com/yusing/go-proxy/internal/config/types"
|
||||
"github.com/yusing/go-proxy/internal/homepage/widgets"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
@@ -13,20 +12,21 @@ type (
|
||||
Category []*Item
|
||||
|
||||
ItemConfig struct {
|
||||
Show bool `json:"show"`
|
||||
Name string `json:"name"` // display name
|
||||
Icon *IconURL `json:"icon"`
|
||||
Category string `json:"category"`
|
||||
Description string `json:"description" aliases:"desc"`
|
||||
SortOrder int `json:"sort_order"`
|
||||
WidgetConfig map[string]any `json:"widget_config" aliases:"widget"`
|
||||
Show bool `json:"show"`
|
||||
Name string `json:"name"` // display name
|
||||
Icon *IconURL `json:"icon"`
|
||||
Category string `json:"category"`
|
||||
Description string `json:"description" aliases:"desc"`
|
||||
SortOrder int `json:"sort_order"`
|
||||
}
|
||||
|
||||
Item struct {
|
||||
*ItemConfig
|
||||
WidgetConfig *widgets.Config `json:"widget_config,omitempty" aliases:"widget"`
|
||||
|
||||
Alias string
|
||||
Provider string
|
||||
Alias string
|
||||
Provider string
|
||||
OriginURL string
|
||||
}
|
||||
)
|
||||
|
||||
@@ -43,23 +43,10 @@ func (cfg *ItemConfig) GetOverride(alias string) *ItemConfig {
|
||||
}
|
||||
|
||||
func (item *Item) MarshalJSON() ([]byte, error) {
|
||||
var url *string
|
||||
if !strings.ContainsRune(item.Alias, '.') {
|
||||
godoxyCfg := config.GetInstance().Value()
|
||||
// use first domain as base domain
|
||||
domains := godoxyCfg.MatchDomains
|
||||
if len(domains) > 0 {
|
||||
url = new(string)
|
||||
*url = item.Alias + domains[0]
|
||||
}
|
||||
} else {
|
||||
url = &item.Alias
|
||||
}
|
||||
return json.Marshal(map[string]any{
|
||||
"show": item.Show,
|
||||
"alias": item.Alias,
|
||||
"provider": item.Provider,
|
||||
"url": url,
|
||||
"name": item.Name,
|
||||
"icon": item.Icon,
|
||||
"category": item.Category,
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package homepage
|
||||
package homepage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/homepage"
|
||||
. "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
@@ -13,7 +14,7 @@ func TestOverrideItem(t *testing.T) {
|
||||
Show: false,
|
||||
Name: "Foo",
|
||||
Icon: &IconURL{
|
||||
Value: "/favicon.ico",
|
||||
FullURL: strPtr("/favicon.ico"),
|
||||
IconSource: IconSourceRelative,
|
||||
},
|
||||
Category: "App",
|
||||
@@ -24,7 +25,7 @@ func TestOverrideItem(t *testing.T) {
|
||||
Name: "Bar",
|
||||
Category: "Test",
|
||||
Icon: &IconURL{
|
||||
Value: "@walkxcode/example.png",
|
||||
FullURL: strPtr("@walkxcode/example.png"),
|
||||
IconSource: IconSourceWalkXCode,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/jsonstore"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
@@ -15,34 +16,24 @@ import (
|
||||
|
||||
type cacheEntry struct {
|
||||
Icon []byte `json:"icon"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentType string `json:"content_type,omitempty"`
|
||||
LastAccess atomic.Value[time.Time] `json:"last_access"`
|
||||
}
|
||||
|
||||
// cache key can be absolute url or route name.
|
||||
var (
|
||||
iconCache = make(map[string]*cacheEntry)
|
||||
iconCacheMu sync.RWMutex
|
||||
iconCache = jsonstore.Store[*cacheEntry](common.NamespaceIconCache)
|
||||
iconMu sync.RWMutex
|
||||
)
|
||||
|
||||
const (
|
||||
iconCacheTTL = 3 * 24 * time.Hour
|
||||
cleanUpInterval = time.Minute
|
||||
maxCacheSize = 1024 * 1024 // 1MB
|
||||
maxIconSize = 1024 * 1024 // 1MB
|
||||
maxCacheEntries = 100
|
||||
)
|
||||
|
||||
func InitIconCache() {
|
||||
iconCacheMu.Lock()
|
||||
defer iconCacheMu.Unlock()
|
||||
|
||||
err := utils.LoadJSONIfExist(common.IconCachePath, &iconCache)
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to load icon cache")
|
||||
} else if len(iconCache) > 0 {
|
||||
logging.Info().Int("count", len(iconCache)).Msg("icon cache loaded")
|
||||
}
|
||||
|
||||
func init() {
|
||||
go func() {
|
||||
cleanupTicker := time.NewTicker(cleanUpInterval)
|
||||
defer cleanupTicker.Stop()
|
||||
@@ -55,36 +46,21 @@ func InitIconCache() {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
task.OnProgramExit("save_favicon_cache", func() {
|
||||
iconCacheMu.Lock()
|
||||
defer iconCacheMu.Unlock()
|
||||
|
||||
if len(iconCache) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.SaveJSON(common.IconCachePath, &iconCache, 0o644); err != nil {
|
||||
logging.Error().Err(err).Msg("failed to save icon cache")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func pruneExpiredIconCache() {
|
||||
iconCacheMu.Lock()
|
||||
defer iconCacheMu.Unlock()
|
||||
|
||||
nPruned := 0
|
||||
for key, icon := range iconCache {
|
||||
for key, icon := range iconCache.Range {
|
||||
if icon.IsExpired() {
|
||||
delete(iconCache, key)
|
||||
iconCache.Delete(key)
|
||||
nPruned++
|
||||
}
|
||||
}
|
||||
if len(iconCache) > maxCacheEntries {
|
||||
if iconCache.Size() > maxCacheEntries {
|
||||
iconCache.Clear()
|
||||
newIconCache := make(map[string]*cacheEntry, maxCacheEntries)
|
||||
i := 0
|
||||
for key, icon := range iconCache {
|
||||
for key, icon := range iconCache.Range {
|
||||
if i == maxCacheEntries {
|
||||
break
|
||||
}
|
||||
@@ -93,7 +69,9 @@ func pruneExpiredIconCache() {
|
||||
i++
|
||||
}
|
||||
}
|
||||
iconCache = newIconCache
|
||||
for key, icon := range newIconCache {
|
||||
iconCache.Store(key, icon)
|
||||
}
|
||||
}
|
||||
if nPruned > 0 {
|
||||
logging.Info().Int("pruned", nPruned).Msg("pruned expired icon cache")
|
||||
@@ -101,21 +79,18 @@ func pruneExpiredIconCache() {
|
||||
}
|
||||
|
||||
func PruneRouteIconCache(route route) {
|
||||
iconCacheMu.Lock()
|
||||
defer iconCacheMu.Unlock()
|
||||
delete(iconCache, route.Key())
|
||||
iconCache.Delete(route.Key())
|
||||
}
|
||||
|
||||
func loadIconCache(key string) *FetchResult {
|
||||
iconCacheMu.RLock()
|
||||
defer iconCacheMu.RUnlock()
|
||||
|
||||
icon, ok := iconCache[key]
|
||||
iconMu.RLock()
|
||||
defer iconMu.RUnlock()
|
||||
icon, ok := iconCache.Load(key)
|
||||
if ok && len(icon.Icon) > 0 {
|
||||
logging.Debug().
|
||||
Str("key", key).
|
||||
Msg("icon found in cache")
|
||||
icon.LastAccess.Store(time.Now())
|
||||
icon.LastAccess.Store(utils.TimeNow())
|
||||
return &FetchResult{Icon: icon.Icon, contentType: icon.ContentType}
|
||||
}
|
||||
return nil
|
||||
@@ -123,15 +98,17 @@ func loadIconCache(key string) *FetchResult {
|
||||
|
||||
func storeIconCache(key string, result *FetchResult) {
|
||||
icon := result.Icon
|
||||
if len(icon) > maxCacheSize {
|
||||
if len(icon) > maxIconSize {
|
||||
logging.Debug().Int("size", len(icon)).Msg("icon cache size exceeds max cache size")
|
||||
return
|
||||
}
|
||||
iconCacheMu.Lock()
|
||||
defer iconCacheMu.Unlock()
|
||||
|
||||
iconMu.Lock()
|
||||
defer iconMu.Unlock()
|
||||
|
||||
entry := &cacheEntry{Icon: icon, ContentType: result.contentType}
|
||||
entry.LastAccess.Store(time.Now())
|
||||
iconCache[key] = entry
|
||||
iconCache.Store(key, entry)
|
||||
logging.Debug().Str("key", key).Int("size", len(icon)).Msg("stored icon cache")
|
||||
}
|
||||
|
||||
@@ -140,12 +117,20 @@ func (e *cacheEntry) IsExpired() bool {
|
||||
}
|
||||
|
||||
func (e *cacheEntry) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Icon []byte `json:"icon"`
|
||||
ContentType string `json:"content_type,omitempty"`
|
||||
LastAccess time.Time `json:"last_access"`
|
||||
}
|
||||
// check if data is json
|
||||
if json.Valid(data) {
|
||||
err := json.Unmarshal(data, &e)
|
||||
err := json.Unmarshal(data, &tmp)
|
||||
// return only if unmarshal is successful
|
||||
// otherwise fallback to base64
|
||||
if err == nil {
|
||||
e.Icon = tmp.Icon
|
||||
e.ContentType = tmp.ContentType
|
||||
e.LastAccess.Store(tmp.LastAccess)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,68 +9,78 @@ import (
|
||||
|
||||
type (
|
||||
IconURL struct {
|
||||
Value string `json:"value"`
|
||||
FullValue string `json:"full_value"`
|
||||
FullURL *string `json:"value,omitempty"` // only for absolute/relative icons
|
||||
Extra *IconExtra `json:"extra,omitempty"` // only for walkxcode/selfhst icons
|
||||
IconSource `json:"source"`
|
||||
Extra *IconExtra `json:"extra"`
|
||||
}
|
||||
|
||||
IconExtra struct {
|
||||
FileType string `json:"file_type"`
|
||||
Name string `json:"name"`
|
||||
Key IconKey `json:"key"`
|
||||
Ref string `json:"ref"`
|
||||
FileType string `json:"file_type"`
|
||||
IsLight bool `json:"is_light"`
|
||||
IsDark bool `json:"is_dark"`
|
||||
}
|
||||
|
||||
IconSource int
|
||||
IconSource string
|
||||
)
|
||||
|
||||
const (
|
||||
IconSourceAbsolute IconSource = iota
|
||||
IconSourceRelative
|
||||
IconSourceWalkXCode
|
||||
IconSourceSelfhSt
|
||||
IconSourceAbsolute IconSource = "https://"
|
||||
IconSourceRelative IconSource = "@target"
|
||||
IconSourceWalkXCode IconSource = "@walkxcode"
|
||||
IconSourceSelfhSt IconSource = "@selfhst"
|
||||
)
|
||||
|
||||
var ErrInvalidIconURL = gperr.New("invalid icon url")
|
||||
|
||||
func NewSelfhStIconURL(reference, format string) *IconURL {
|
||||
func NewIconURL(source IconSource, refOrName, format string) *IconURL {
|
||||
switch source {
|
||||
case IconSourceWalkXCode, IconSourceSelfhSt:
|
||||
default:
|
||||
panic("invalid icon source")
|
||||
}
|
||||
isLight, isDark := false, false
|
||||
if strings.HasSuffix(refOrName, "-light") {
|
||||
isLight = true
|
||||
refOrName = strings.TrimSuffix(refOrName, "-light")
|
||||
} else if strings.HasSuffix(refOrName, "-dark") {
|
||||
isDark = true
|
||||
refOrName = strings.TrimSuffix(refOrName, "-dark")
|
||||
}
|
||||
return &IconURL{
|
||||
Value: reference + "." + format,
|
||||
FullValue: fmt.Sprintf("@selfhst/%s.%s", reference, format),
|
||||
IconSource: IconSourceSelfhSt,
|
||||
IconSource: source,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(source, refOrName),
|
||||
FileType: format,
|
||||
Name: reference,
|
||||
Ref: refOrName,
|
||||
IsLight: isLight,
|
||||
IsDark: isDark,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewSelfhStIconURL(refOrName, format string) *IconURL {
|
||||
return NewIconURL(IconSourceSelfhSt, refOrName, format)
|
||||
}
|
||||
|
||||
func NewWalkXCodeIconURL(name, format string) *IconURL {
|
||||
return &IconURL{
|
||||
Value: name + "." + format,
|
||||
FullValue: fmt.Sprintf("@walkxcode/%s.%s", name, format),
|
||||
IconSource: IconSourceWalkXCode,
|
||||
Extra: &IconExtra{
|
||||
FileType: format,
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
return NewIconURL(IconSourceWalkXCode, name, format)
|
||||
}
|
||||
|
||||
// HasIcon checks if the icon referenced by the IconURL exists in the cache based on its source.
|
||||
// Returns false if the icon does not exist for IconSourceSelfhSt or IconSourceWalkXCode,
|
||||
// otherwise returns true.
|
||||
func (u *IconURL) HasIcon() bool {
|
||||
if u.IconSource == IconSourceSelfhSt {
|
||||
return HasSelfhstIcon(u.Extra.Name, u.Extra.FileType)
|
||||
}
|
||||
if u.IconSource == IconSourceWalkXCode {
|
||||
return HasWalkxCodeIcon(u.Extra.Name, u.Extra.FileType)
|
||||
}
|
||||
return true
|
||||
return HasIcon(u)
|
||||
}
|
||||
|
||||
// Parse implements strutils.Parser.
|
||||
func (u *IconURL) Parse(v string) error {
|
||||
return u.parse(v, true)
|
||||
}
|
||||
|
||||
func (u *IconURL) parse(v string, checkExists bool) error {
|
||||
if v == "" {
|
||||
return ErrInvalidIconURL
|
||||
}
|
||||
@@ -78,33 +88,25 @@ func (u *IconURL) Parse(v string) error {
|
||||
if slashIndex == -1 {
|
||||
return ErrInvalidIconURL
|
||||
}
|
||||
u.FullValue = v
|
||||
beforeSlash := v[:slashIndex]
|
||||
switch beforeSlash {
|
||||
case "http:", "https:":
|
||||
u.Value = v
|
||||
u.FullURL = &v
|
||||
u.IconSource = IconSourceAbsolute
|
||||
case "@target", "": // @target/favicon.ico, /favicon.ico
|
||||
u.Value = v[slashIndex:]
|
||||
u.IconSource = IconSourceRelative
|
||||
if u.Value == "/" {
|
||||
url := v[slashIndex:]
|
||||
if url == "/" {
|
||||
return ErrInvalidIconURL.Withf("%s", "empty path")
|
||||
}
|
||||
case "png", "svg", "webp": // walkxcode Icons
|
||||
u.Value = v
|
||||
u.IconSource = IconSourceWalkXCode
|
||||
u.Extra = &IconExtra{
|
||||
FileType: beforeSlash,
|
||||
Name: strings.TrimSuffix(v[slashIndex+1:], "."+beforeSlash),
|
||||
}
|
||||
u.FullURL = &url
|
||||
u.IconSource = IconSourceRelative
|
||||
case "@selfhst", "@walkxcode": // selfh.st / walkxcode Icons, @selfhst/<reference>.<format>
|
||||
u.Value = v[slashIndex+1:]
|
||||
if beforeSlash == "@selfhst" {
|
||||
u.IconSource = IconSourceSelfhSt
|
||||
} else {
|
||||
u.IconSource = IconSourceWalkXCode
|
||||
}
|
||||
parts := strings.Split(u.Value, ".")
|
||||
parts := strings.Split(v[slashIndex+1:], ".")
|
||||
if len(parts) != 2 {
|
||||
return ErrInvalidIconURL.Withf("expect @%s/<reference>.<format>, e.g. @%s/adguard-home.webp", beforeSlash, beforeSlash)
|
||||
}
|
||||
@@ -117,40 +119,67 @@ func (u *IconURL) Parse(v string) error {
|
||||
default:
|
||||
return ErrInvalidIconURL.Withf("%s", "invalid image format, expect svg/png/webp")
|
||||
}
|
||||
isLight, isDark := false, false
|
||||
if strings.HasSuffix(reference, "-light") {
|
||||
isLight = true
|
||||
reference = strings.TrimSuffix(reference, "-light")
|
||||
} else if strings.HasSuffix(reference, "-dark") {
|
||||
isDark = true
|
||||
reference = strings.TrimSuffix(reference, "-dark")
|
||||
}
|
||||
u.Extra = &IconExtra{
|
||||
Key: NewIconKey(u.IconSource, reference),
|
||||
FileType: format,
|
||||
Name: reference,
|
||||
Ref: reference,
|
||||
IsLight: isLight,
|
||||
IsDark: isDark,
|
||||
}
|
||||
if checkExists && !u.HasIcon() {
|
||||
return ErrInvalidIconURL.Withf("no such icon %s.%s from %s", reference, format, u.IconSource)
|
||||
}
|
||||
default:
|
||||
return ErrInvalidIconURL.Withf("%s", v)
|
||||
return ErrInvalidIconURL.Subject(v)
|
||||
}
|
||||
|
||||
if u.Value == "" {
|
||||
return ErrInvalidIconURL.Withf("%s", "empty")
|
||||
}
|
||||
|
||||
if !u.HasIcon() {
|
||||
return ErrInvalidIconURL.Withf("no such icon %s from %s", u.Value, beforeSlash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *IconURL) URL() string {
|
||||
if u.FullURL != nil {
|
||||
return *u.FullURL
|
||||
}
|
||||
if u.Extra == nil {
|
||||
return ""
|
||||
}
|
||||
filename := u.Extra.Ref
|
||||
if u.Extra.IsLight {
|
||||
filename += "-light"
|
||||
} else if u.Extra.IsDark {
|
||||
filename += "-dark"
|
||||
}
|
||||
switch u.IconSource {
|
||||
case IconSourceAbsolute:
|
||||
return u.Value
|
||||
case IconSourceRelative:
|
||||
return "/" + u.Value
|
||||
case IconSourceWalkXCode:
|
||||
return fmt.Sprintf("https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/%s/%s.%s", u.Extra.FileType, u.Extra.Name, u.Extra.FileType)
|
||||
return fmt.Sprintf("https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/%s/%s.%s", u.Extra.FileType, filename, u.Extra.FileType)
|
||||
case IconSourceSelfhSt:
|
||||
return fmt.Sprintf("https://cdn.jsdelivr.net/gh/selfhst/icons/%s/%s.%s", u.Extra.FileType, u.Extra.Name, u.Extra.FileType)
|
||||
return fmt.Sprintf("https://cdn.jsdelivr.net/gh/selfhst/icons/%s/%s.%s", u.Extra.FileType, filename, u.Extra.FileType)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (u *IconURL) String() string {
|
||||
return u.FullValue
|
||||
if u.FullURL != nil {
|
||||
return *u.FullURL
|
||||
}
|
||||
if u.Extra == nil {
|
||||
return ""
|
||||
}
|
||||
var suffix string
|
||||
if u.Extra.IsLight {
|
||||
suffix = "-light"
|
||||
} else if u.Extra.IsDark {
|
||||
suffix = "-dark"
|
||||
}
|
||||
return fmt.Sprintf("%s/%s%s.%s", u.IconSource, u.Extra.Ref, suffix, u.Extra.FileType)
|
||||
}
|
||||
|
||||
func (u *IconURL) MarshalText() ([]byte, error) {
|
||||
@@ -159,5 +188,5 @@ func (u *IconURL) MarshalText() ([]byte, error) {
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (u *IconURL) UnmarshalText(data []byte) error {
|
||||
return u.Parse(string(data))
|
||||
return u.parse(string(data), false)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
package homepage
|
||||
package homepage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/homepage"
|
||||
expect "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
|
||||
func strPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestIconURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -17,7 +22,7 @@ func TestIconURL(t *testing.T) {
|
||||
name: "absolute",
|
||||
input: "http://example.com/icon.png",
|
||||
wantValue: &IconURL{
|
||||
Value: "http://example.com/icon.png",
|
||||
FullURL: strPtr("http://example.com/icon.png"),
|
||||
IconSource: IconSourceAbsolute,
|
||||
},
|
||||
},
|
||||
@@ -25,7 +30,7 @@ func TestIconURL(t *testing.T) {
|
||||
name: "relative",
|
||||
input: "@target/icon.png",
|
||||
wantValue: &IconURL{
|
||||
Value: "/icon.png",
|
||||
FullURL: strPtr("/icon.png"),
|
||||
IconSource: IconSourceRelative,
|
||||
},
|
||||
},
|
||||
@@ -33,7 +38,7 @@ func TestIconURL(t *testing.T) {
|
||||
name: "relative2",
|
||||
input: "/icon.png",
|
||||
wantValue: &IconURL{
|
||||
Value: "/icon.png",
|
||||
FullURL: strPtr("/icon.png"),
|
||||
IconSource: IconSourceRelative,
|
||||
},
|
||||
},
|
||||
@@ -49,25 +54,26 @@ func TestIconURL(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "walkxcode",
|
||||
input: "png/adguard-home.png",
|
||||
input: "@walkxcode/adguard-home.png",
|
||||
wantValue: &IconURL{
|
||||
Value: "png/adguard-home.png",
|
||||
IconSource: IconSourceWalkXCode,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(IconSourceWalkXCode, "adguard-home"),
|
||||
FileType: "png",
|
||||
Name: "adguard-home",
|
||||
Ref: "adguard-home",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "walkxcode_alt",
|
||||
input: "@walkxcode/adguard-home.png",
|
||||
name: "walkxcode_light",
|
||||
input: "@walkxcode/pfsense-light.png",
|
||||
wantValue: &IconURL{
|
||||
Value: "adguard-home.png",
|
||||
IconSource: IconSourceWalkXCode,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(IconSourceWalkXCode, "pfsense"),
|
||||
FileType: "png",
|
||||
Name: "adguard-home",
|
||||
Ref: "pfsense",
|
||||
IsLight: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -78,13 +84,39 @@ func TestIconURL(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "selfh.st_valid",
|
||||
input: "@selfhst/adguard-home.png",
|
||||
input: "@selfhst/adguard-home.webp",
|
||||
wantValue: &IconURL{
|
||||
Value: "adguard-home.png",
|
||||
IconSource: IconSourceSelfhSt,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "adguard-home"),
|
||||
FileType: "webp",
|
||||
Ref: "adguard-home",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "selfh.st_light",
|
||||
input: "@selfhst/adguard-home-light.png",
|
||||
wantValue: &IconURL{
|
||||
IconSource: IconSourceSelfhSt,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "adguard-home"),
|
||||
FileType: "png",
|
||||
Name: "adguard-home",
|
||||
Ref: "adguard-home",
|
||||
IsLight: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "selfh.st_dark",
|
||||
input: "@selfhst/adguard-home-dark.svg",
|
||||
wantValue: &IconURL{
|
||||
IconSource: IconSourceSelfhSt,
|
||||
Extra: &IconExtra{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "adguard-home"),
|
||||
FileType: "svg",
|
||||
Ref: "adguard-home",
|
||||
IsDark: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -116,7 +148,6 @@ func TestIconURL(t *testing.T) {
|
||||
if tc.wantErr {
|
||||
expect.ErrorIs(t, ErrInvalidIconURL, err)
|
||||
} else {
|
||||
tc.wantValue.FullValue = tc.input
|
||||
expect.NoError(t, err)
|
||||
expect.Equal(t, u, tc.wantValue)
|
||||
}
|
||||
|
||||
68
internal/homepage/integrations/qbittorrent/client.go
Normal file
68
internal/homepage/integrations/qbittorrent/client.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package qbittorrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/homepage/widgets"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
func (c *Client) Initialize(ctx context.Context, url string, cfg map[string]any) error {
|
||||
c.URL = url
|
||||
c.Username = cfg["username"].(string)
|
||||
c.Password = cfg["password"].(string)
|
||||
|
||||
_, err := c.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) doRequest(ctx context.Context, method, endpoint string, query url.Values, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, c.URL+endpoint+query.Encode(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Username != "" && c.Password != "" {
|
||||
req.SetBasicAuth(c.Username, c.Password)
|
||||
}
|
||||
|
||||
resp, err := widgets.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, gperr.Errorf("%w: %d %s", widgets.ErrHTTPStatus, resp.StatusCode, resp.Status)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func jsonRequest[T any](ctx context.Context, client *Client, endpoint string, query url.Values) (result T, err error) {
|
||||
resp, err := client.doRequest(ctx, http.MethodGet, endpoint, query, nil)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
102
internal/homepage/integrations/qbittorrent/logs.go
Normal file
102
internal/homepage/integrations/qbittorrent/logs.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package qbittorrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const endpointLogs = "/api/v2/log/main"
|
||||
|
||||
type LogEntry struct {
|
||||
ID int `json:"id"`
|
||||
Timestamp int `json:"timestamp"`
|
||||
Type int `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
const (
|
||||
LogSeverityNormal = 1 << iota
|
||||
LogSeverityInfo
|
||||
LogSeverityWarning
|
||||
LogSeverityCritical
|
||||
)
|
||||
|
||||
func (l *LogEntry) Time() time.Time {
|
||||
return time.Unix(int64(l.Timestamp), 0)
|
||||
}
|
||||
|
||||
func (l *LogEntry) Level() string {
|
||||
switch l.Type {
|
||||
case LogSeverityNormal:
|
||||
return "Normal"
|
||||
case LogSeverityInfo:
|
||||
return "Info"
|
||||
case LogSeverityWarning:
|
||||
return "Warning"
|
||||
case LogSeverityCritical:
|
||||
return "Critical"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LogEntry) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
"id": l.ID,
|
||||
"timestamp": l.Timestamp,
|
||||
"level": l.Level(),
|
||||
"message": l.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// params:
|
||||
//
|
||||
// normal: bool
|
||||
// info: bool
|
||||
// warning: bool
|
||||
// critical: bool
|
||||
// last_known_id: int
|
||||
func (c *Client) GetLogs(ctx context.Context, lastKnownID int) ([]*LogEntry, error) {
|
||||
return jsonRequest[[]*LogEntry](ctx, c, endpointLogs, url.Values{
|
||||
"last_known_id": {strconv.Itoa(lastKnownID)},
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) WatchLogs(ctx context.Context) (<-chan *LogEntry, <-chan error) {
|
||||
ch := make(chan *LogEntry)
|
||||
errCh := make(chan error)
|
||||
|
||||
lastKnownID := -1
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer close(errCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
logs, err := c.GetLogs(ctx, lastKnownID)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
|
||||
if len(logs) == 0 {
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, log := range logs {
|
||||
ch <- log
|
||||
}
|
||||
lastKnownID = logs[len(logs)-1].ID
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errCh
|
||||
}
|
||||
32
internal/homepage/integrations/qbittorrent/transfer_info.go
Normal file
32
internal/homepage/integrations/qbittorrent/transfer_info.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package qbittorrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/homepage/widgets"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
const endpointTransferInfo = "/api/v2/transfer/info"
|
||||
|
||||
type TransferInfo struct {
|
||||
ConnectionStatus string `json:"connection_status"`
|
||||
SessionDownloads uint64 `json:"dl_info_data"`
|
||||
SessionUploads uint64 `json:"up_info_data"`
|
||||
DownloadSpeed uint64 `json:"dl_info_speed"`
|
||||
UploadSpeed uint64 `json:"up_info_speed"`
|
||||
}
|
||||
|
||||
func (c *Client) Data(ctx context.Context) ([]widgets.NameValue, error) {
|
||||
info, err := jsonRequest[TransferInfo](ctx, c, endpointTransferInfo, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []widgets.NameValue{
|
||||
{Name: "Status", Value: info.ConnectionStatus},
|
||||
{Name: "Download", Value: strutils.FormatByteSize(info.SessionDownloads)},
|
||||
{Name: "Upload", Value: strutils.FormatByteSize(info.SessionUploads)},
|
||||
{Name: "Download Speed", Value: strutils.FormatByteSize(info.DownloadSpeed) + "/s"},
|
||||
{Name: "Upload Speed", Value: strutils.FormatByteSize(info.UploadSpeed) + "/s"},
|
||||
}, nil
|
||||
}
|
||||
21
internal/homepage/integrations/qbittorrent/version.go
Normal file
21
internal/homepage/integrations/qbittorrent/version.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package qbittorrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
func (c *Client) Version(ctx context.Context) (string, error) {
|
||||
resp, err := c.doRequest(ctx, "GET", "/api/v2/app/version", nil, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
}
|
||||
@@ -1,297 +0,0 @@
|
||||
package homepage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lithammer/fuzzysearch/fuzzy"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type GitHubContents struct { //! keep this, may reuse in future
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Sha string `json:"sha"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
type (
|
||||
IconsMap map[string]map[string]struct{}
|
||||
IconList []string
|
||||
Cache struct {
|
||||
WalkxCode, Selfhst IconsMap
|
||||
DisplayNames ReferenceDisplayNameMap
|
||||
IconList IconList // combined into a single list
|
||||
}
|
||||
ReferenceDisplayNameMap map[string]string
|
||||
)
|
||||
|
||||
func (icons *Cache) needUpdate() bool {
|
||||
return len(icons.WalkxCode) == 0 || len(icons.Selfhst) == 0 || len(icons.IconList) == 0 || len(icons.DisplayNames) == 0
|
||||
}
|
||||
|
||||
const updateInterval = 2 * time.Hour
|
||||
|
||||
var (
|
||||
iconsCache *Cache
|
||||
iconsCahceMu sync.RWMutex
|
||||
lastUpdate time.Time
|
||||
)
|
||||
|
||||
const (
|
||||
walkxcodeIcons = "https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons@master/tree.json"
|
||||
selfhstIcons = "https://cdn.selfh.st/directory/icons.json"
|
||||
)
|
||||
|
||||
func InitIconListCache() {
|
||||
iconsCahceMu.Lock()
|
||||
defer iconsCahceMu.Unlock()
|
||||
|
||||
iconsCache = &Cache{
|
||||
WalkxCode: make(IconsMap),
|
||||
Selfhst: make(IconsMap),
|
||||
DisplayNames: make(ReferenceDisplayNameMap),
|
||||
IconList: []string{},
|
||||
}
|
||||
err := utils.LoadJSONIfExist(common.IconListCachePath, iconsCache)
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to load icon list cache config")
|
||||
} else if len(iconsCache.IconList) > 0 {
|
||||
logging.Info().
|
||||
Int("icons", len(iconsCache.IconList)).
|
||||
Int("display_names", len(iconsCache.DisplayNames)).
|
||||
Msg("icon list cache loaded")
|
||||
}
|
||||
}
|
||||
|
||||
func ListAvailableIcons() (*Cache, error) {
|
||||
iconsCahceMu.RLock()
|
||||
if time.Since(lastUpdate) < updateInterval {
|
||||
if !iconsCache.needUpdate() {
|
||||
iconsCahceMu.RUnlock()
|
||||
return iconsCache, nil
|
||||
}
|
||||
}
|
||||
iconsCahceMu.RUnlock()
|
||||
|
||||
iconsCahceMu.Lock()
|
||||
defer iconsCahceMu.Unlock()
|
||||
|
||||
logging.Info().Msg("updating icon data")
|
||||
icons, err := fetchIconData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logging.Info().
|
||||
Int("icons", len(icons.IconList)).
|
||||
Int("display_names", len(icons.DisplayNames)).
|
||||
Msg("icons list updated")
|
||||
|
||||
iconsCache = icons
|
||||
lastUpdate = time.Now()
|
||||
|
||||
err = utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
|
||||
if err != nil {
|
||||
logging.Warn().Err(err).Msg("failed to save icon list cache")
|
||||
}
|
||||
return icons, nil
|
||||
}
|
||||
|
||||
func SearchIcons(keyword string, limit int) ([]string, error) {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if keyword == "" {
|
||||
return utils.Slice(icons.IconList, limit), nil
|
||||
}
|
||||
return utils.Slice(fuzzy.Find(keyword, icons.IconList), limit), nil
|
||||
}
|
||||
|
||||
func HasWalkxCodeIcon(name string, filetype string) bool {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return false
|
||||
}
|
||||
if _, ok := icons.WalkxCode[filetype]; !ok {
|
||||
return false
|
||||
}
|
||||
_, ok := icons.WalkxCode[filetype][name+"."+filetype]
|
||||
return ok
|
||||
}
|
||||
|
||||
func HasSelfhstIcon(name string, filetype string) bool {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return false
|
||||
}
|
||||
if _, ok := icons.Selfhst[filetype]; !ok {
|
||||
return false
|
||||
}
|
||||
_, ok := icons.Selfhst[filetype][name+"."+filetype]
|
||||
return ok
|
||||
}
|
||||
|
||||
func GetDisplayName(reference string) (string, bool) {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return "", false
|
||||
}
|
||||
displayName, ok := icons.DisplayNames[reference]
|
||||
return displayName, ok
|
||||
}
|
||||
|
||||
func fetchIconData() (*Cache, error) {
|
||||
walkxCodeIconMap, walkxCodeIconList, err := fetchWalkxCodeIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := 0
|
||||
for _, items := range walkxCodeIconMap {
|
||||
n += len(items)
|
||||
}
|
||||
|
||||
selfhstIconMap, selfhstIconList, referenceToNames, err := fetchSelfhstIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Cache{
|
||||
WalkxCode: walkxCodeIconMap,
|
||||
Selfhst: selfhstIconMap,
|
||||
DisplayNames: referenceToNames,
|
||||
IconList: append(walkxCodeIconList, selfhstIconList...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"png": [
|
||||
"*.png",
|
||||
],
|
||||
"svg": [
|
||||
"*.svg",
|
||||
],
|
||||
"webp": [
|
||||
"*.webp",
|
||||
]
|
||||
}
|
||||
*/
|
||||
func fetchWalkxCodeIcons() (IconsMap, IconList, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, walkxcodeIcons, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
data := make(map[string][]string)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
icons := make(IconsMap, len(data))
|
||||
iconList := make(IconList, 0, 2000)
|
||||
for fileType, files := range data {
|
||||
icons[fileType] = make(map[string]struct{}, len(files))
|
||||
for _, icon := range files {
|
||||
icons[fileType][icon] = struct{}{}
|
||||
iconList = append(iconList, "@walkxcode/"+icon)
|
||||
}
|
||||
}
|
||||
return icons, iconList, nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"Name": "2FAuth",
|
||||
"Reference": "2fauth",
|
||||
"SVG": "Yes",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "Yes",
|
||||
"Category": "Self-Hosted",
|
||||
"CreatedAt": "2024-08-16 00:27:23+00:00"
|
||||
}
|
||||
*/
|
||||
func fetchSelfhstIcons() (IconsMap, IconList, ReferenceDisplayNameMap, error) {
|
||||
type SelfhStIcon struct {
|
||||
Name string `json:"Name"`
|
||||
Reference string `json:"Reference"`
|
||||
SVG string `json:"SVG"`
|
||||
PNG string `json:"PNG"`
|
||||
WebP string `json:"WebP"`
|
||||
// Light string
|
||||
// Category string
|
||||
// CreatedAt string
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, selfhstIcons, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
data := make([]SelfhStIcon, 0, 2000)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
iconList := make(IconList, 0, len(data)*3)
|
||||
icons := make(IconsMap)
|
||||
icons["svg"] = make(map[string]struct{}, len(data))
|
||||
icons["png"] = make(map[string]struct{}, len(data))
|
||||
icons["webp"] = make(map[string]struct{}, len(data))
|
||||
|
||||
referenceToNames := make(ReferenceDisplayNameMap, len(data))
|
||||
|
||||
for _, item := range data {
|
||||
if item.SVG == "Yes" {
|
||||
icons["svg"][item.Reference+".svg"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".svg")
|
||||
}
|
||||
if item.PNG == "Yes" {
|
||||
icons["png"][item.Reference+".png"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".png")
|
||||
}
|
||||
if item.WebP == "Yes" {
|
||||
icons["webp"][item.Reference+".webp"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".webp")
|
||||
}
|
||||
referenceToNames[item.Reference] = item.Name
|
||||
}
|
||||
|
||||
return icons, iconList, referenceToNames, nil
|
||||
}
|
||||
373
internal/homepage/list_icons.go
Normal file
373
internal/homepage/list_icons.go
Normal file
@@ -0,0 +1,373 @@
|
||||
package homepage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lithammer/fuzzysearch/fuzzy"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
)
|
||||
|
||||
type (
|
||||
IconKey string
|
||||
IconMap map[IconKey]*IconMeta
|
||||
IconList []string
|
||||
IconMeta struct {
|
||||
SVG, PNG, WebP bool
|
||||
Light, Dark bool
|
||||
DisplayName string
|
||||
Tag string
|
||||
}
|
||||
IconMetaSearch struct {
|
||||
Source IconSource
|
||||
Ref string
|
||||
SVG bool
|
||||
PNG bool
|
||||
WebP bool
|
||||
Light bool
|
||||
Dark bool
|
||||
}
|
||||
Cache struct {
|
||||
Icons IconMap
|
||||
LastUpdate time.Time
|
||||
sync.RWMutex `json:"-"`
|
||||
}
|
||||
)
|
||||
|
||||
func (icon *IconMeta) Filenames(ref string) []string {
|
||||
filenames := make([]string, 0)
|
||||
if icon.SVG {
|
||||
filenames = append(filenames, fmt.Sprintf("%s.svg", ref))
|
||||
if icon.Light {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-light.svg", ref))
|
||||
}
|
||||
if icon.Dark {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-dark.svg", ref))
|
||||
}
|
||||
}
|
||||
if icon.PNG {
|
||||
filenames = append(filenames, fmt.Sprintf("%s.png", ref))
|
||||
if icon.Light {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-light.png", ref))
|
||||
}
|
||||
if icon.Dark {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-dark.png", ref))
|
||||
}
|
||||
}
|
||||
if icon.WebP {
|
||||
filenames = append(filenames, fmt.Sprintf("%s.webp", ref))
|
||||
if icon.Light {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-light.webp", ref))
|
||||
}
|
||||
if icon.Dark {
|
||||
filenames = append(filenames, fmt.Sprintf("%s-dark.webp", ref))
|
||||
}
|
||||
}
|
||||
return filenames
|
||||
}
|
||||
|
||||
const updateInterval = 2 * time.Hour
|
||||
|
||||
var iconsCache = &Cache{
|
||||
Icons: make(IconMap),
|
||||
}
|
||||
|
||||
const (
|
||||
walkxcodeIcons = "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons@master/tree.json"
|
||||
selfhstIcons = "https://cdn.selfh.st/directory/icons.json"
|
||||
)
|
||||
|
||||
func NewIconKey(source IconSource, reference string) IconKey {
|
||||
return IconKey(fmt.Sprintf("%s/%s", source, reference))
|
||||
}
|
||||
|
||||
func (k IconKey) SourceRef() (IconSource, string) {
|
||||
parts := strings.Split(string(k), "/")
|
||||
return IconSource(parts[0]), parts[1]
|
||||
}
|
||||
|
||||
func InitIconListCache() {
|
||||
iconsCache.Lock()
|
||||
defer iconsCache.Unlock()
|
||||
|
||||
err := utils.LoadJSONIfExist(common.IconListCachePath, iconsCache)
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to load icons")
|
||||
} else if len(iconsCache.Icons) > 0 {
|
||||
logging.Info().
|
||||
Int("icons", len(iconsCache.Icons)).
|
||||
Msg("icons loaded")
|
||||
}
|
||||
|
||||
if err = updateIcons(); err != nil {
|
||||
logging.Error().Err(err).Msg("failed to update icons")
|
||||
}
|
||||
|
||||
task.OnProgramExit("save_icons_cache", func() {
|
||||
utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
|
||||
})
|
||||
}
|
||||
|
||||
func ListAvailableIcons() (*Cache, error) {
|
||||
if common.IsTest {
|
||||
return iconsCache, nil
|
||||
}
|
||||
|
||||
iconsCache.RLock()
|
||||
if time.Since(iconsCache.LastUpdate) < updateInterval {
|
||||
if len(iconsCache.Icons) == 0 {
|
||||
iconsCache.RUnlock()
|
||||
return iconsCache, nil
|
||||
}
|
||||
}
|
||||
iconsCache.RUnlock()
|
||||
|
||||
iconsCache.Lock()
|
||||
defer iconsCache.Unlock()
|
||||
|
||||
logging.Info().Msg("updating icon data")
|
||||
if err := updateIcons(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logging.Info().Int("icons", len(iconsCache.Icons)).Msg("icons list updated")
|
||||
|
||||
iconsCache.LastUpdate = time.Now()
|
||||
|
||||
err := utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
|
||||
if err != nil {
|
||||
logging.Warn().Err(err).Msg("failed to save icons")
|
||||
}
|
||||
return iconsCache, nil
|
||||
}
|
||||
|
||||
func SearchIcons(keyword string, limit int) ([]IconMetaSearch, error) {
|
||||
if keyword == "" {
|
||||
return make([]IconMetaSearch, 0), nil
|
||||
}
|
||||
iconsCache.RLock()
|
||||
defer iconsCache.RUnlock()
|
||||
result := make([]IconMetaSearch, 0)
|
||||
for k, icon := range iconsCache.Icons {
|
||||
if fuzzy.MatchFold(keyword, string(k)) {
|
||||
source, ref := k.SourceRef()
|
||||
result = append(result, IconMetaSearch{
|
||||
Source: source,
|
||||
Ref: ref,
|
||||
SVG: icon.SVG,
|
||||
PNG: icon.PNG,
|
||||
WebP: icon.WebP,
|
||||
Light: icon.Light,
|
||||
Dark: icon.Dark,
|
||||
})
|
||||
}
|
||||
if len(result) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func HasIcon(icon *IconURL) bool {
|
||||
if icon.Extra == nil {
|
||||
return false
|
||||
}
|
||||
if common.IsTest {
|
||||
return true
|
||||
}
|
||||
iconsCache.RLock()
|
||||
defer iconsCache.RUnlock()
|
||||
key := NewIconKey(icon.IconSource, icon.Extra.Ref)
|
||||
meta, ok := iconsCache.Icons[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
switch icon.Extra.FileType {
|
||||
case "png":
|
||||
return meta.PNG && (!icon.Extra.IsLight || meta.Light) && (!icon.Extra.IsDark || meta.Dark)
|
||||
case "svg":
|
||||
return meta.SVG && (!icon.Extra.IsLight || meta.Light) && (!icon.Extra.IsDark || meta.Dark)
|
||||
case "webp":
|
||||
return meta.WebP && (!icon.Extra.IsLight || meta.Light) && (!icon.Extra.IsDark || meta.Dark)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type HomepageMeta struct {
|
||||
DisplayName string
|
||||
Tag string
|
||||
}
|
||||
|
||||
func GetHomepageMeta(ref string) (HomepageMeta, bool) {
|
||||
iconsCache.RLock()
|
||||
defer iconsCache.RUnlock()
|
||||
meta, ok := iconsCache.Icons[NewIconKey(IconSourceSelfhSt, ref)]
|
||||
if !ok {
|
||||
return HomepageMeta{}, false
|
||||
}
|
||||
return HomepageMeta{
|
||||
DisplayName: meta.DisplayName,
|
||||
Tag: meta.Tag,
|
||||
}, true
|
||||
}
|
||||
|
||||
func updateIcons() error {
|
||||
clear(iconsCache.Icons)
|
||||
if err := UpdateWalkxCodeIcons(); err != nil {
|
||||
return err
|
||||
}
|
||||
return UpdateSelfhstIcons()
|
||||
}
|
||||
|
||||
var httpGet = httpGetImpl
|
||||
|
||||
func MockHttpGet(body []byte) {
|
||||
httpGet = func(_ string) ([]byte, error) {
|
||||
return body, nil
|
||||
}
|
||||
}
|
||||
|
||||
func httpGetImpl(url string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"png": [
|
||||
"*.png",
|
||||
],
|
||||
"svg": [
|
||||
"*.svg",
|
||||
],
|
||||
"webp": [
|
||||
"*.webp",
|
||||
]
|
||||
}
|
||||
*/
|
||||
func UpdateWalkxCodeIcons() error {
|
||||
body, err := httpGet(walkxcodeIcons)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data := make(map[string][]string)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fileType, files := range data {
|
||||
var setExt func(icon *IconMeta)
|
||||
switch fileType {
|
||||
case "png":
|
||||
setExt = func(icon *IconMeta) { icon.PNG = true }
|
||||
case "svg":
|
||||
setExt = func(icon *IconMeta) { icon.SVG = true }
|
||||
case "webp":
|
||||
setExt = func(icon *IconMeta) { icon.WebP = true }
|
||||
}
|
||||
for _, f := range files {
|
||||
f = strings.TrimSuffix(f, "."+fileType)
|
||||
isLight := strings.HasSuffix(f, "-light")
|
||||
if isLight {
|
||||
f = strings.TrimSuffix(f, "-light")
|
||||
}
|
||||
key := NewIconKey(IconSourceWalkXCode, f)
|
||||
icon, ok := iconsCache.Icons[key]
|
||||
if !ok {
|
||||
icon = new(IconMeta)
|
||||
iconsCache.Icons[key] = icon
|
||||
}
|
||||
setExt(icon)
|
||||
if isLight {
|
||||
icon.Light = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"Name": "2FAuth",
|
||||
"Reference": "2fauth",
|
||||
"SVG": "Yes",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "Yes",
|
||||
"Dark": "Yes",
|
||||
"Tag": "",
|
||||
"Category": "Self-Hosted",
|
||||
"CreatedAt": "2024-08-16 00:27:23+00:00"
|
||||
}
|
||||
*/
|
||||
|
||||
func UpdateSelfhstIcons() error {
|
||||
type SelfhStIcon struct {
|
||||
Name string
|
||||
Reference string
|
||||
SVG string
|
||||
PNG string
|
||||
WebP string
|
||||
Light string
|
||||
Dark string
|
||||
Tags string
|
||||
}
|
||||
|
||||
body, err := httpGet(selfhstIcons)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data := make([]SelfhStIcon, 0)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range data {
|
||||
var tag string
|
||||
if item.Tags != "" {
|
||||
tag = strutils.CommaSeperatedList(item.Tags)[0]
|
||||
}
|
||||
icon := &IconMeta{
|
||||
DisplayName: item.Name,
|
||||
Tag: tag,
|
||||
SVG: item.SVG == "Yes",
|
||||
PNG: item.PNG == "Yes",
|
||||
WebP: item.WebP == "Yes",
|
||||
Light: item.Light == "Yes",
|
||||
Dark: item.Dark == "Yes",
|
||||
}
|
||||
key := NewIconKey(IconSourceSelfhSt, item.Reference)
|
||||
iconsCache.Icons[key] = icon
|
||||
}
|
||||
return nil
|
||||
}
|
||||
175
internal/homepage/list_icons_test.go
Normal file
175
internal/homepage/list_icons_test.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package homepage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/yusing/go-proxy/internal/homepage"
|
||||
)
|
||||
|
||||
const walkxcodeIcons = `{
|
||||
"png": [
|
||||
"app1.png",
|
||||
"app1-light.png",
|
||||
"app2.png"
|
||||
],
|
||||
"svg": [
|
||||
"app1.svg",
|
||||
"app1-light.svg"
|
||||
],
|
||||
"webp": [
|
||||
"app1.webp",
|
||||
"app1-light.webp",
|
||||
"app2.webp"
|
||||
]
|
||||
}`
|
||||
|
||||
const selfhstIcons = `[
|
||||
{
|
||||
"Name": "2FAuth",
|
||||
"Reference": "2fauth",
|
||||
"SVG": "Yes",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "Yes",
|
||||
"Dark": "Yes",
|
||||
"Category": "Self-Hosted",
|
||||
"Tags": "",
|
||||
"CreatedAt": "2024-08-16 00:27:23+00:00"
|
||||
},
|
||||
{
|
||||
"Name": "Dittofeed",
|
||||
"Reference": "dittofeed",
|
||||
"SVG": "No",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "No",
|
||||
"Dark": "No",
|
||||
"Category": "Self-Hosted",
|
||||
"Tags": "",
|
||||
"CreatedAt": "2024-08-22 11:33:37+00:00"
|
||||
},
|
||||
{
|
||||
"Name": "Ars Technica",
|
||||
"Reference": "ars-technica",
|
||||
"SVG": "Yes",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "Yes",
|
||||
"Dark": "Yes",
|
||||
"Category": "Other",
|
||||
"Tags": "News",
|
||||
"CreatedAt": "2025-04-09 11:15:01+00:00"
|
||||
}
|
||||
]`
|
||||
|
||||
type testCases struct {
|
||||
Key IconKey
|
||||
IconMeta
|
||||
}
|
||||
|
||||
func runTests(t *testing.T, iconsCache *Cache, test []testCases) {
|
||||
for _, item := range test {
|
||||
icon, ok := iconsCache.Icons[item.Key]
|
||||
if !ok {
|
||||
t.Fatalf("icon %s not found", item.Key)
|
||||
}
|
||||
if icon.PNG != item.PNG || icon.SVG != item.SVG || icon.WebP != item.WebP {
|
||||
t.Fatalf("icon %s file format mismatch", item.Key)
|
||||
}
|
||||
if icon.Light != item.Light || icon.Dark != item.Dark {
|
||||
t.Fatalf("icon %s variant mismatch", item.Key)
|
||||
}
|
||||
if icon.DisplayName != item.DisplayName {
|
||||
t.Fatalf("icon %s display name mismatch, expect %s, got %s", item.Key, item.DisplayName, icon.DisplayName)
|
||||
}
|
||||
if icon.Tag != item.Tag {
|
||||
t.Fatalf("icon %s tag mismatch, expect %s, got %s", item.Key, item.Tag, icon.Tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListWalkxCodeIcons(t *testing.T) {
|
||||
MockHttpGet([]byte(walkxcodeIcons))
|
||||
if err := UpdateWalkxCodeIcons(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
iconsCache, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(iconsCache.Icons) != 2 {
|
||||
t.Fatalf("expect 2 icons, got %d", len(iconsCache.Icons))
|
||||
}
|
||||
test := []testCases{
|
||||
{
|
||||
Key: NewIconKey(IconSourceWalkXCode, "2fauth"),
|
||||
IconMeta: IconMeta{
|
||||
SVG: true,
|
||||
PNG: true,
|
||||
WebP: true,
|
||||
Light: true,
|
||||
DisplayName: "2FAuth",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: NewIconKey(IconSourceWalkXCode, "dittofeed"),
|
||||
IconMeta: IconMeta{
|
||||
PNG: true,
|
||||
WebP: true,
|
||||
DisplayName: "Dittofeed",
|
||||
},
|
||||
},
|
||||
}
|
||||
runTests(t, iconsCache, test)
|
||||
}
|
||||
|
||||
func TestListSelfhstIcons(t *testing.T) {
|
||||
MockHttpGet([]byte(selfhstIcons))
|
||||
if err := UpdateSelfhstIcons(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
iconsCache, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(iconsCache.Icons) != 3 {
|
||||
t.Fatalf("expect 3 icons, got %d", len(iconsCache.Icons))
|
||||
}
|
||||
// if len(iconsCache.IconList) != 8 {
|
||||
// t.Fatalf("expect 8 icons, got %d", len(iconsCache.IconList))
|
||||
// }
|
||||
test := []testCases{
|
||||
{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "2fauth"),
|
||||
IconMeta: IconMeta{
|
||||
SVG: true,
|
||||
PNG: true,
|
||||
WebP: true,
|
||||
Light: true,
|
||||
Dark: true,
|
||||
DisplayName: "2FAuth",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "dittofeed"),
|
||||
IconMeta: IconMeta{
|
||||
PNG: true,
|
||||
WebP: true,
|
||||
DisplayName: "Dittofeed",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: NewIconKey(IconSourceSelfhSt, "ars-technica"),
|
||||
IconMeta: IconMeta{
|
||||
SVG: true,
|
||||
PNG: true,
|
||||
WebP: true,
|
||||
Light: true,
|
||||
Dark: true,
|
||||
DisplayName: "Ars Technica",
|
||||
Tag: "News",
|
||||
},
|
||||
},
|
||||
}
|
||||
runTests(t, iconsCache, test)
|
||||
}
|
||||
14
internal/homepage/widgets/http.go
Normal file
14
internal/homepage/widgets/http.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package widgets
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
)
|
||||
|
||||
var HTTPClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
var ErrHTTPStatus = gperr.New("http status")
|
||||
49
internal/homepage/widgets/widgets.go
Normal file
49
internal/homepage/widgets/widgets.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package widgets
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type (
|
||||
Config struct {
|
||||
Provider string `json:"provider"`
|
||||
Config Widget `json:"config"`
|
||||
}
|
||||
Widget interface {
|
||||
Initialize(ctx context.Context, url string, cfg map[string]any) error
|
||||
Data(ctx context.Context) ([]NameValue, error)
|
||||
}
|
||||
NameValue struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
WidgetProviderQbittorrent = "qbittorrent"
|
||||
)
|
||||
|
||||
var widgetProviders = map[string]struct{}{
|
||||
WidgetProviderQbittorrent: {},
|
||||
}
|
||||
|
||||
var ErrInvalidProvider = gperr.New("invalid provider")
|
||||
|
||||
func (cfg *Config) UnmarshalMap(m map[string]any) error {
|
||||
cfg.Provider = m["provider"].(string)
|
||||
if _, ok := widgetProviders[cfg.Provider]; !ok {
|
||||
return ErrInvalidProvider.Subject(cfg.Provider)
|
||||
}
|
||||
delete(m, "provider")
|
||||
m, ok := m["config"].(map[string]any)
|
||||
if !ok {
|
||||
return gperr.New("invalid config")
|
||||
}
|
||||
if err := utils.MapUnmarshalValidate(m, &cfg.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -38,6 +38,10 @@ func (w *Watcher) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
default:
|
||||
f := &ForceCacheControl{expires: w.expires().Format(http.TimeFormat), ResponseWriter: rw}
|
||||
w, ok := watcherMap[w.Key()] // could've been reloaded
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
w.rp.ServeHTTP(f, r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func NewWatcher(parent task.Parent, r routes.Route) (*Watcher, error) {
|
||||
case routes.StreamRoute:
|
||||
w.stream = r
|
||||
default:
|
||||
return nil, gperr.New("unexpected route type")
|
||||
return nil, gperr.Errorf("unexpected route type: %T", r)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(parent.Context(), reqTimeout)
|
||||
@@ -262,11 +262,14 @@ func (w *Watcher) watchUntilDestroy() (returnCause gperr.Error) {
|
||||
case <-w.task.Context().Done():
|
||||
return gperr.Wrap(w.task.FinishCause())
|
||||
case err := <-errCh:
|
||||
return err
|
||||
gperr.LogError("watcher error", err, &w.l)
|
||||
case e := <-eventCh:
|
||||
w.l.Debug().Stringer("action", e.Action).Msg("state changed")
|
||||
if e.Action == events.ActionContainerDestroy {
|
||||
switch e.Action {
|
||||
case events.ActionContainerDestroy:
|
||||
return causeContainerDestroy
|
||||
case events.ActionForceReload:
|
||||
continue
|
||||
}
|
||||
w.resetIdleTimer()
|
||||
switch {
|
||||
|
||||
@@ -13,15 +13,16 @@ func TestNewJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveLoadStore(t *testing.T) {
|
||||
defer clear(stores)
|
||||
|
||||
storesPath = t.TempDir()
|
||||
store := Store[string]("test")
|
||||
store.Store("a", "1")
|
||||
if err := save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// reload
|
||||
clear(stores)
|
||||
loaded := Store[string]("test")
|
||||
v, ok := loaded.Load("a")
|
||||
if !ok {
|
||||
@@ -43,6 +44,8 @@ type testObject struct {
|
||||
func (*testObject) Initialize() {}
|
||||
|
||||
func TestSaveLoadObject(t *testing.T) {
|
||||
defer clear(stores)
|
||||
|
||||
storesPath = t.TempDir()
|
||||
obj := Object[*testObject]("test")
|
||||
obj.I = 1
|
||||
@@ -50,9 +53,8 @@ func TestSaveLoadObject(t *testing.T) {
|
||||
if err := save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// reload
|
||||
clear(stores)
|
||||
loaded := Object[*testObject]("test")
|
||||
if loaded.I != 1 || loaded.S != "1" {
|
||||
t.Fatalf("expected 1, got %d, %s", loaded.I, loaded.S)
|
||||
|
||||
@@ -1,297 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lithammer/fuzzysearch/fuzzy"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type GitHubContents struct { //! keep this, may reuse in future
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Sha string `json:"sha"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
type (
|
||||
IconsMap map[string]map[string]struct{}
|
||||
IconList []string
|
||||
Cache struct {
|
||||
WalkxCode, Selfhst IconsMap
|
||||
DisplayNames ReferenceDisplayNameMap
|
||||
IconList IconList // combined into a single list
|
||||
}
|
||||
ReferenceDisplayNameMap map[string]string
|
||||
)
|
||||
|
||||
func (icons *Cache) needUpdate() bool {
|
||||
return len(icons.WalkxCode) == 0 || len(icons.Selfhst) == 0 || len(icons.IconList) == 0 || len(icons.DisplayNames) == 0
|
||||
}
|
||||
|
||||
const updateInterval = 2 * time.Hour
|
||||
|
||||
var (
|
||||
iconsCache *Cache
|
||||
iconsCahceMu sync.RWMutex
|
||||
lastUpdate time.Time
|
||||
)
|
||||
|
||||
const (
|
||||
walkxcodeIcons = "https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons@master/tree.json"
|
||||
selfhstIcons = "https://cdn.selfh.st/directory/icons.json"
|
||||
)
|
||||
|
||||
func InitIconListCache() {
|
||||
iconsCahceMu.Lock()
|
||||
defer iconsCahceMu.Unlock()
|
||||
|
||||
iconsCache = &Cache{
|
||||
WalkxCode: make(IconsMap),
|
||||
Selfhst: make(IconsMap),
|
||||
DisplayNames: make(ReferenceDisplayNameMap),
|
||||
IconList: []string{},
|
||||
}
|
||||
err := utils.LoadJSONIfExist(common.IconListCachePath, iconsCache)
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to load icon list cache config")
|
||||
} else if len(iconsCache.IconList) > 0 {
|
||||
logging.Info().
|
||||
Int("icons", len(iconsCache.IconList)).
|
||||
Int("display_names", len(iconsCache.DisplayNames)).
|
||||
Msg("icon list cache loaded")
|
||||
}
|
||||
}
|
||||
|
||||
func ListAvailableIcons() (*Cache, error) {
|
||||
iconsCahceMu.RLock()
|
||||
if time.Since(lastUpdate) < updateInterval {
|
||||
if !iconsCache.needUpdate() {
|
||||
iconsCahceMu.RUnlock()
|
||||
return iconsCache, nil
|
||||
}
|
||||
}
|
||||
iconsCahceMu.RUnlock()
|
||||
|
||||
iconsCahceMu.Lock()
|
||||
defer iconsCahceMu.Unlock()
|
||||
|
||||
logging.Info().Msg("updating icon data")
|
||||
icons, err := fetchIconData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logging.Info().
|
||||
Int("icons", len(icons.IconList)).
|
||||
Int("display_names", len(icons.DisplayNames)).
|
||||
Msg("icons list updated")
|
||||
|
||||
iconsCache = icons
|
||||
lastUpdate = time.Now()
|
||||
|
||||
err = utils.SaveJSON(common.IconListCachePath, iconsCache, 0o644)
|
||||
if err != nil {
|
||||
logging.Warn().Err(err).Msg("failed to save icon list cache")
|
||||
}
|
||||
return icons, nil
|
||||
}
|
||||
|
||||
func SearchIcons(keyword string, limit int) ([]string, error) {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if keyword == "" {
|
||||
return utils.Slice(icons.IconList, limit), nil
|
||||
}
|
||||
return utils.Slice(fuzzy.Find(keyword, icons.IconList), limit), nil
|
||||
}
|
||||
|
||||
func HasWalkxCodeIcon(name string, filetype string) bool {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return false
|
||||
}
|
||||
if _, ok := icons.WalkxCode[filetype]; !ok {
|
||||
return false
|
||||
}
|
||||
_, ok := icons.WalkxCode[filetype][name+"."+filetype]
|
||||
return ok
|
||||
}
|
||||
|
||||
func HasSelfhstIcon(name string, filetype string) bool {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return false
|
||||
}
|
||||
if _, ok := icons.Selfhst[filetype]; !ok {
|
||||
return false
|
||||
}
|
||||
_, ok := icons.Selfhst[filetype][name+"."+filetype]
|
||||
return ok
|
||||
}
|
||||
|
||||
func GetDisplayName(reference string) (string, bool) {
|
||||
icons, err := ListAvailableIcons()
|
||||
if err != nil {
|
||||
logging.Error().Err(err).Msg("failed to list icons")
|
||||
return "", false
|
||||
}
|
||||
displayName, ok := icons.DisplayNames[reference]
|
||||
return displayName, ok
|
||||
}
|
||||
|
||||
func fetchIconData() (*Cache, error) {
|
||||
walkxCodeIconMap, walkxCodeIconList, err := fetchWalkxCodeIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := 0
|
||||
for _, items := range walkxCodeIconMap {
|
||||
n += len(items)
|
||||
}
|
||||
|
||||
selfhstIconMap, selfhstIconList, referenceToNames, err := fetchSelfhstIcons()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Cache{
|
||||
WalkxCode: walkxCodeIconMap,
|
||||
Selfhst: selfhstIconMap,
|
||||
DisplayNames: referenceToNames,
|
||||
IconList: append(walkxCodeIconList, selfhstIconList...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"png": [
|
||||
"*.png",
|
||||
],
|
||||
"svg": [
|
||||
"*.svg",
|
||||
],
|
||||
"webp": [
|
||||
"*.webp",
|
||||
]
|
||||
}
|
||||
*/
|
||||
func fetchWalkxCodeIcons() (IconsMap, IconList, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, walkxcodeIcons, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
data := make(map[string][]string)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
icons := make(IconsMap, len(data))
|
||||
iconList := make(IconList, 0, 2000)
|
||||
for fileType, files := range data {
|
||||
icons[fileType] = make(map[string]struct{}, len(files))
|
||||
for _, icon := range files {
|
||||
icons[fileType][icon] = struct{}{}
|
||||
iconList = append(iconList, "@walkxcode/"+icon)
|
||||
}
|
||||
}
|
||||
return icons, iconList, nil
|
||||
}
|
||||
|
||||
/*
|
||||
format:
|
||||
|
||||
{
|
||||
"Name": "2FAuth",
|
||||
"Reference": "2fauth",
|
||||
"SVG": "Yes",
|
||||
"PNG": "Yes",
|
||||
"WebP": "Yes",
|
||||
"Light": "Yes",
|
||||
"Category": "Self-Hosted",
|
||||
"CreatedAt": "2024-08-16 00:27:23+00:00"
|
||||
}
|
||||
*/
|
||||
func fetchSelfhstIcons() (IconsMap, IconList, ReferenceDisplayNameMap, error) {
|
||||
type SelfhStIcon struct {
|
||||
Name string `json:"Name"`
|
||||
Reference string `json:"Reference"`
|
||||
SVG string `json:"SVG"`
|
||||
PNG string `json:"PNG"`
|
||||
WebP string `json:"WebP"`
|
||||
// Light string
|
||||
// Category string
|
||||
// CreatedAt string
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, selfhstIcons, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
data := make([]SelfhStIcon, 0, 2000)
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
iconList := make(IconList, 0, len(data)*3)
|
||||
icons := make(IconsMap)
|
||||
icons["svg"] = make(map[string]struct{}, len(data))
|
||||
icons["png"] = make(map[string]struct{}, len(data))
|
||||
icons["webp"] = make(map[string]struct{}, len(data))
|
||||
|
||||
referenceToNames := make(ReferenceDisplayNameMap, len(data))
|
||||
|
||||
for _, item := range data {
|
||||
if item.SVG == "Yes" {
|
||||
icons["svg"][item.Reference+".svg"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".svg")
|
||||
}
|
||||
if item.PNG == "Yes" {
|
||||
icons["png"][item.Reference+".png"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".png")
|
||||
}
|
||||
if item.WebP == "Yes" {
|
||||
icons["webp"][item.Reference+".webp"] = struct{}{}
|
||||
iconList = append(iconList, "@selfhst/"+item.Reference+".webp")
|
||||
}
|
||||
referenceToNames[item.Reference] = item.Name
|
||||
}
|
||||
|
||||
return icons, iconList, referenceToNames, nil
|
||||
}
|
||||
@@ -4,15 +4,16 @@ import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
maxmind "github.com/yusing/go-proxy/internal/maxmind/types"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
"github.com/yusing/go-proxy/internal/utils/synk"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -22,12 +23,17 @@ type (
|
||||
task *task.Task
|
||||
cfg *Config
|
||||
|
||||
rawWriter io.Writer
|
||||
closer []io.Closer
|
||||
supportRotate []supportRotate
|
||||
writer *bufio.Writer
|
||||
writeLock sync.Mutex
|
||||
closed bool
|
||||
|
||||
wps int64
|
||||
bufSize int
|
||||
lastAdjust time.Time
|
||||
|
||||
lineBufPool *synk.BytesPool // buffer pool for formatting a single log line
|
||||
|
||||
errRateLimiter *rate.Limiter
|
||||
@@ -43,26 +49,30 @@ type (
|
||||
Name() string // file name or path
|
||||
}
|
||||
|
||||
SupportRotate interface {
|
||||
io.Writer
|
||||
supportRotate
|
||||
Name() string
|
||||
}
|
||||
|
||||
RequestFormatter interface {
|
||||
// AppendRequestLog appends a log line to line with or without a trailing newline
|
||||
AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte
|
||||
}
|
||||
ACLFormatter interface {
|
||||
// AppendACLLog appends a log line to line with or without a trailing newline
|
||||
AppendACLLog(line []byte, info *acl.IPInfo, blocked bool) []byte
|
||||
AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
StdoutbufSize = 64
|
||||
MinBufferSize = 4 * kilobyte
|
||||
MaxBufferSize = 1 * megabyte
|
||||
MaxBufferSize = 8 * megabyte
|
||||
|
||||
bufferAdjustInterval = time.Second // How often we check & adjust
|
||||
)
|
||||
|
||||
const (
|
||||
flushInterval = 30 * time.Second
|
||||
rotateInterval = time.Hour
|
||||
)
|
||||
const defaultRotateInterval = time.Hour
|
||||
|
||||
const (
|
||||
errRateLimit = 200 * time.Millisecond
|
||||
@@ -99,24 +109,17 @@ func unwrap[Writer any](w io.Writer) []Writer {
|
||||
|
||||
func NewAccessLoggerWithIO(parent task.Parent, writer WriterWithName, anyCfg AnyConfig) *AccessLogger {
|
||||
cfg := anyCfg.ToConfig()
|
||||
if cfg.BufferSize == 0 {
|
||||
cfg.BufferSize = DefaultBufferSize
|
||||
}
|
||||
if cfg.BufferSize < MinBufferSize {
|
||||
cfg.BufferSize = MinBufferSize
|
||||
}
|
||||
if cfg.BufferSize > MaxBufferSize {
|
||||
cfg.BufferSize = MaxBufferSize
|
||||
}
|
||||
if _, ok := writer.(*os.File); ok {
|
||||
cfg.BufferSize = StdoutbufSize
|
||||
if cfg.RotateInterval == 0 {
|
||||
cfg.RotateInterval = defaultRotateInterval
|
||||
}
|
||||
|
||||
l := &AccessLogger{
|
||||
task: parent.Subtask("accesslog."+writer.Name(), true),
|
||||
cfg: cfg,
|
||||
writer: bufio.NewWriterSize(writer, cfg.BufferSize),
|
||||
lineBufPool: synk.NewBytesPool(512, 8192),
|
||||
rawWriter: writer,
|
||||
writer: bufio.NewWriterSize(writer, MinBufferSize),
|
||||
bufSize: MinBufferSize,
|
||||
lineBufPool: synk.NewBytesPool(256, 768), // for common/combined usually < 256B; for json < 512B
|
||||
errRateLimiter: rate.NewLimiter(rate.Every(errRateLimit), errBurst),
|
||||
logger: logging.With().Str("file", writer.Name()).Logger(),
|
||||
}
|
||||
@@ -176,7 +179,7 @@ func (l *AccessLogger) LogError(req *http.Request, err error) {
|
||||
l.Log(req, &http.Response{StatusCode: http.StatusInternalServerError, Status: err.Error()})
|
||||
}
|
||||
|
||||
func (l *AccessLogger) LogACL(info *acl.IPInfo, blocked bool) {
|
||||
func (l *AccessLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
|
||||
line := l.lineBufPool.Get()
|
||||
defer l.lineBufPool.Put(line)
|
||||
line = l.ACLFormatter.AppendACLLog(line, info, blocked)
|
||||
@@ -214,9 +217,9 @@ func (l *AccessLogger) Rotate() (result *RotateResult, err error) {
|
||||
|
||||
func (l *AccessLogger) handleErr(err error) {
|
||||
if l.errRateLimiter.Allow() {
|
||||
gperr.LogError("failed to write access log", err)
|
||||
gperr.LogError("failed to write access log", err, &l.logger)
|
||||
} else {
|
||||
gperr.LogError("too many errors, stopping access log", err)
|
||||
gperr.LogError("too many errors, stopping access log", err, &l.logger)
|
||||
l.task.Finish(err)
|
||||
}
|
||||
}
|
||||
@@ -228,19 +231,16 @@ func (l *AccessLogger) start() {
|
||||
l.task.Finish(nil)
|
||||
}()
|
||||
|
||||
// flushes the buffer every 30 seconds
|
||||
flushTicker := time.NewTicker(30 * time.Second)
|
||||
defer flushTicker.Stop()
|
||||
|
||||
rotateTicker := time.NewTicker(rotateInterval)
|
||||
rotateTicker := time.NewTicker(l.cfg.RotateInterval)
|
||||
defer rotateTicker.Stop()
|
||||
|
||||
bufAdjTicker := time.NewTicker(bufferAdjustInterval)
|
||||
defer bufAdjTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.task.Context().Done():
|
||||
return
|
||||
case <-flushTicker.C:
|
||||
l.Flush()
|
||||
case <-rotateTicker.C:
|
||||
if !l.ShouldRotate() {
|
||||
continue
|
||||
@@ -253,6 +253,8 @@ func (l *AccessLogger) start() {
|
||||
} else {
|
||||
l.logger.Info().Msg("no rotation needed")
|
||||
}
|
||||
case <-bufAdjTicker.C:
|
||||
l.adjustBuffer()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -289,8 +291,55 @@ func (l *AccessLogger) write(data []byte) {
|
||||
if l.closed {
|
||||
return
|
||||
}
|
||||
_, err := l.writer.Write(data)
|
||||
n, err := l.writer.Write(data)
|
||||
if err != nil {
|
||||
l.handleErr(err)
|
||||
} else if n < len(data) {
|
||||
l.handleErr(gperr.Errorf("%w, writing %d bytes, only %d written", io.ErrShortWrite, len(data), n))
|
||||
}
|
||||
atomic.AddInt64(&l.wps, int64(n))
|
||||
}
|
||||
|
||||
func (l *AccessLogger) adjustBuffer() {
|
||||
wps := int(atomic.SwapInt64(&l.wps, 0))
|
||||
origBufSize := l.bufSize
|
||||
newBufSize := origBufSize
|
||||
|
||||
halfDiff := (wps - origBufSize) / 2
|
||||
if halfDiff < 0 {
|
||||
halfDiff = -halfDiff
|
||||
}
|
||||
step := max(halfDiff, wps/2)
|
||||
|
||||
switch {
|
||||
case origBufSize < wps:
|
||||
newBufSize += step
|
||||
if newBufSize > MaxBufferSize {
|
||||
newBufSize = MaxBufferSize
|
||||
}
|
||||
case origBufSize > wps:
|
||||
newBufSize -= step
|
||||
if newBufSize < MinBufferSize {
|
||||
newBufSize = MinBufferSize
|
||||
}
|
||||
}
|
||||
|
||||
if newBufSize == origBufSize {
|
||||
return
|
||||
}
|
||||
|
||||
l.writeLock.Lock()
|
||||
defer l.writeLock.Unlock()
|
||||
if l.closed {
|
||||
return
|
||||
}
|
||||
|
||||
l.logger.Debug().
|
||||
Str("wps", strutils.FormatByteSize(wps)).
|
||||
Str("old", strutils.FormatByteSize(origBufSize)).
|
||||
Str("new", strutils.FormatByteSize(newBufSize)).
|
||||
Msg("adjusted buffer size")
|
||||
|
||||
l.writer = bufio.NewWriterSize(l.rawWriter, newBufSize)
|
||||
l.bufSize = newBufSize
|
||||
}
|
||||
|
||||
@@ -26,12 +26,8 @@ type BackScanner struct {
|
||||
|
||||
// NewBackScanner creates a new Scanner to read the file backward.
|
||||
// chunkSize determines the size of each read chunk from the end of the file.
|
||||
func NewBackScanner(file ReaderAtSeeker, chunkSize int) *BackScanner {
|
||||
size, err := file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return &BackScanner{err: err}
|
||||
}
|
||||
return newBackScanner(file, size, make([]byte, chunkSize))
|
||||
func NewBackScanner(file ReaderAtSeeker, fileSize int64, chunkSize int) *BackScanner {
|
||||
return newBackScanner(file, fileSize, make([]byte, chunkSize))
|
||||
}
|
||||
|
||||
func newBackScanner(file ReaderAtSeeker, fileSize int64, buf []byte) *BackScanner {
|
||||
@@ -111,11 +107,6 @@ func (s *BackScanner) Bytes() []byte {
|
||||
return s.line
|
||||
}
|
||||
|
||||
// FileSize returns the size of the file.
|
||||
func (s *BackScanner) FileSize() int64 {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF error encountered by the scanner.
|
||||
func (s *BackScanner) Err() error {
|
||||
return s.err
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestBackScanner(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create scanner with small chunk size to test chunking
|
||||
scanner := NewBackScanner(mockFile, 10)
|
||||
scanner := NewBackScanner(mockFile, mockFile.MustSize(), 10)
|
||||
|
||||
// Collect all lines
|
||||
var lines [][]byte
|
||||
@@ -108,7 +108,7 @@ func TestBackScannerWithVaryingChunkSizes(t *testing.T) {
|
||||
t.Fatalf("failed to write to mock file: %v", err)
|
||||
}
|
||||
|
||||
scanner := NewBackScanner(mockFile, chunkSize)
|
||||
scanner := NewBackScanner(mockFile, mockFile.MustSize(), chunkSize)
|
||||
|
||||
var lines [][]byte
|
||||
for scanner.Scan() {
|
||||
@@ -170,7 +170,8 @@ func TestReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
linesRead := 0
|
||||
s := NewBackScanner(file, defaultChunkSize)
|
||||
stat, _ := file.Stat()
|
||||
s := NewBackScanner(file, stat.Size(), defaultChunkSize)
|
||||
for s.Scan() {
|
||||
linesRead++
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func BenchmarkBackScanner(b *testing.B) {
|
||||
}
|
||||
for i := range 14 {
|
||||
chunkSize := (2 << i) * kilobyte
|
||||
scanner := NewBackScanner(mockFile, chunkSize)
|
||||
scanner := NewBackScanner(mockFile, mockFile.MustSize(), chunkSize)
|
||||
name := strutils.FormatByteSize(chunkSize)
|
||||
b.ResetTimer()
|
||||
b.Run(name, func(b *testing.B) {
|
||||
@@ -226,7 +227,8 @@ func BenchmarkBackScannerRealFile(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
scanner := NewBackScanner(file, 256*kilobyte)
|
||||
stat, _ := file.Stat()
|
||||
scanner := NewBackScanner(file, stat.Size(), 256*kilobyte)
|
||||
b.ResetTimer()
|
||||
for scanner.Scan() {
|
||||
}
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
type (
|
||||
ConfigBase struct {
|
||||
BufferSize int `json:"buffer_size"`
|
||||
Path string `json:"path"`
|
||||
Stdout bool `json:"stdout"`
|
||||
Retention *Retention `json:"retention" aliases:"keep"`
|
||||
B int `json:"buffer_size"` // Deprecated: buffer size is adjusted dynamically
|
||||
Path string `json:"path"`
|
||||
Stdout bool `json:"stdout"`
|
||||
Retention *Retention `json:"retention" aliases:"keep"`
|
||||
RotateInterval time.Duration `json:"rotate_interval,omitempty"`
|
||||
}
|
||||
ACLLoggerConfig struct {
|
||||
ConfigBase
|
||||
@@ -41,9 +44,9 @@ type (
|
||||
CIDR LogFilter[*CIDR] `json:"cidr"`
|
||||
}
|
||||
Fields struct {
|
||||
Headers FieldConfig `json:"headers"`
|
||||
Query FieldConfig `json:"query"`
|
||||
Cookies FieldConfig `json:"cookies"`
|
||||
Headers FieldConfig `json:"headers" aliases:"header"`
|
||||
Query FieldConfig `json:"query" aliases:"queries"`
|
||||
Cookies FieldConfig `json:"cookies" aliases:"cookie"`
|
||||
}
|
||||
)
|
||||
|
||||
@@ -55,8 +58,6 @@ var (
|
||||
ReqLoggerFormats = []Format{FormatCommon, FormatCombined, FormatJSON}
|
||||
)
|
||||
|
||||
const DefaultBufferSize = 64 * kilobyte // 64KB
|
||||
|
||||
func (cfg *ConfigBase) Validate() gperr.Error {
|
||||
if cfg.Path == "" && !cfg.Stdout {
|
||||
return gperr.New("path or stdout is required")
|
||||
@@ -99,8 +100,7 @@ func (cfg *RequestLoggerConfig) ToConfig() *Config {
|
||||
func DefaultRequestLoggerConfig() *RequestLoggerConfig {
|
||||
return &RequestLoggerConfig{
|
||||
ConfigBase: ConfigBase{
|
||||
BufferSize: DefaultBufferSize,
|
||||
Retention: &Retention{Days: 30},
|
||||
Retention: &Retention{Days: 30},
|
||||
},
|
||||
Format: FormatCombined,
|
||||
Fields: Fields{
|
||||
@@ -120,8 +120,7 @@ func DefaultRequestLoggerConfig() *RequestLoggerConfig {
|
||||
func DefaultACLLoggerConfig() *ACLLoggerConfig {
|
||||
return &ACLLoggerConfig{
|
||||
ConfigBase: ConfigBase{
|
||||
BufferSize: DefaultBufferSize,
|
||||
Retention: &Retention{Days: 30},
|
||||
Retention: &Retention{Days: 30},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"proxy.buffer_size": "10",
|
||||
"proxy.format": "combined",
|
||||
"proxy.path": "/tmp/access.log",
|
||||
"proxy.filters.status_codes.values": "200-299",
|
||||
@@ -30,10 +29,9 @@ func TestNewConfig(t *testing.T) {
|
||||
expect.NoError(t, err)
|
||||
|
||||
var config RequestLoggerConfig
|
||||
err = utils.Deserialize(parsed, &config)
|
||||
err = utils.MapUnmarshalValidate(parsed, &config)
|
||||
expect.NoError(t, err)
|
||||
|
||||
expect.Equal(t, config.BufferSize, 10)
|
||||
expect.Equal(t, config.Format, FormatCombined)
|
||||
expect.Equal(t, config.Path, "/tmp/access.log")
|
||||
expect.Equal(t, config.Filters.StatusCodes.Values, []*StatusCodeRange{{Start: 200, End: 299}})
|
||||
|
||||
@@ -2,8 +2,9 @@ package accesslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
pathPkg "path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
type File struct {
|
||||
*os.File
|
||||
f *os.File
|
||||
|
||||
// os.File.Name() may not equal to key of `openedFiles`.
|
||||
// Store it for later delete from `openedFiles`.
|
||||
@@ -25,21 +26,25 @@ var (
|
||||
openedFilesMu sync.Mutex
|
||||
)
|
||||
|
||||
func newFileIO(path string) (WriterWithName, error) {
|
||||
func newFileIO(path string) (SupportRotate, error) {
|
||||
openedFilesMu.Lock()
|
||||
defer openedFilesMu.Unlock()
|
||||
|
||||
var file *File
|
||||
path = pathPkg.Clean(path)
|
||||
path = filepath.Clean(path)
|
||||
if opened, ok := openedFiles[path]; ok {
|
||||
opened.refCount.Add()
|
||||
return opened, nil
|
||||
} else {
|
||||
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o644)
|
||||
// cannot open as O_APPEND as we need Seek and WriteAt
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("access log open error: %w", err)
|
||||
}
|
||||
file = &File{File: f, path: path, refCount: utils.NewRefCounter()}
|
||||
if _, err := f.Seek(0, io.SeekEnd); err != nil {
|
||||
return nil, fmt.Errorf("access log seek error: %w", err)
|
||||
}
|
||||
file = &File{f: f, path: path, refCount: utils.NewRefCounter()}
|
||||
openedFiles[path] = file
|
||||
go file.closeOnZero()
|
||||
}
|
||||
@@ -47,6 +52,38 @@ func newFileIO(path string) (WriterWithName, error) {
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (f *File) Name() string {
|
||||
return f.f.Name()
|
||||
}
|
||||
|
||||
func (f *File) Write(p []byte) (n int, err error) {
|
||||
return f.f.Write(p)
|
||||
}
|
||||
|
||||
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.f.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *File) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return f.f.WriteAt(p, off)
|
||||
}
|
||||
|
||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.f.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *File) Size() (int64, error) {
|
||||
stat, err := f.f.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return stat.Size(), nil
|
||||
}
|
||||
|
||||
func (f *File) Truncate(size int64) error {
|
||||
return f.f.Truncate(size)
|
||||
}
|
||||
|
||||
func (f *File) Close() error {
|
||||
f.refCount.Sub()
|
||||
return nil
|
||||
@@ -62,5 +99,5 @@ func (f *File) closeOnZero() {
|
||||
openedFilesMu.Lock()
|
||||
delete(openedFiles, f.path)
|
||||
openedFilesMu.Unlock()
|
||||
f.File.Close()
|
||||
f.f.Close()
|
||||
}
|
||||
|
||||
@@ -50,7 +50,6 @@ func TestConcurrentAccessLoggerLogAndFlush(t *testing.T) {
|
||||
file := NewMockFile()
|
||||
|
||||
cfg := DefaultRequestLoggerConfig()
|
||||
cfg.BufferSize = 1024
|
||||
parent := task.RootTask("test", false)
|
||||
|
||||
loggerCount := 5
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
maxmind "github.com/yusing/go-proxy/internal/maxmind/types"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
@@ -158,7 +158,7 @@ func (f *JSONFormatter) AppendRequestLog(line []byte, req *http.Request, res *ht
|
||||
return writer.Bytes()
|
||||
}
|
||||
|
||||
func (f ACLLogFormatter) AppendACLLog(line []byte, info *acl.IPInfo, blocked bool) []byte {
|
||||
func (f ACLLogFormatter) AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte {
|
||||
writer := bytes.NewBuffer(line)
|
||||
logger := zerolog.New(writer)
|
||||
event := logger.Info().
|
||||
|
||||
@@ -17,8 +17,11 @@ type MockFile struct {
|
||||
noLock
|
||||
}
|
||||
|
||||
var _ SupportRotate = (*MockFile)(nil)
|
||||
|
||||
func NewMockFile() *MockFile {
|
||||
f, _ := afero.TempFile(afero.NewMemMapFs(), "", "")
|
||||
f.Seek(0, io.SeekEnd)
|
||||
return &MockFile{
|
||||
File: f,
|
||||
}
|
||||
@@ -47,3 +50,13 @@ func (m *MockFile) NumLines() int {
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *MockFile) Size() (int64, error) {
|
||||
stat, _ := m.Stat()
|
||||
return stat.Size(), nil
|
||||
}
|
||||
|
||||
func (m *MockFile) MustSize() int64 {
|
||||
size, _ := m.Size()
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -6,16 +6,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
"github.com/yusing/go-proxy/internal/utils/strutils"
|
||||
"github.com/yusing/go-proxy/internal/utils/synk"
|
||||
)
|
||||
|
||||
type supportRotate interface {
|
||||
io.ReadSeeker
|
||||
io.Seeker
|
||||
io.ReaderAt
|
||||
io.WriterAt
|
||||
Truncate(size int64) error
|
||||
Size() (int64, error)
|
||||
}
|
||||
|
||||
type RotateResult struct {
|
||||
@@ -29,17 +31,29 @@ type RotateResult struct {
|
||||
}
|
||||
|
||||
func (r *RotateResult) Print(logger *zerolog.Logger) {
|
||||
logger.Info().
|
||||
Str("original_size", strutils.FormatByteSize(r.OriginalSize)).
|
||||
Str("bytes_read", strutils.FormatByteSize(r.NumBytesRead)).
|
||||
Str("bytes_keep", strutils.FormatByteSize(r.NumBytesKeep)).
|
||||
Int("lines_read", r.NumLinesRead).
|
||||
Int("lines_keep", r.NumLinesKeep).
|
||||
Int("lines_invalid", r.NumLinesInvalid).
|
||||
event := logger.Info().
|
||||
Str("original_size", strutils.FormatByteSize(r.OriginalSize))
|
||||
if r.NumBytesRead > 0 {
|
||||
event.Str("bytes_read", strutils.FormatByteSize(r.NumBytesRead))
|
||||
}
|
||||
if r.NumBytesKeep > 0 {
|
||||
event.Str("bytes_keep", strutils.FormatByteSize(r.NumBytesKeep))
|
||||
}
|
||||
if r.NumLinesRead > 0 {
|
||||
event.Int("lines_read", r.NumLinesRead)
|
||||
}
|
||||
if r.NumLinesKeep > 0 {
|
||||
event.Int("lines_keep", r.NumLinesKeep)
|
||||
}
|
||||
if r.NumLinesInvalid > 0 {
|
||||
event.Int("lines_invalid", r.NumLinesInvalid)
|
||||
}
|
||||
event.Str("saved", strutils.FormatByteSize(r.OriginalSize-r.NumBytesKeep)).
|
||||
Msg("log rotate result")
|
||||
}
|
||||
|
||||
func (r *RotateResult) Add(other *RotateResult) {
|
||||
r.OriginalSize += other.OriginalSize
|
||||
r.NumBytesRead += other.NumBytesRead
|
||||
r.NumBytesKeep += other.NumBytesKeep
|
||||
r.NumLinesRead += other.NumLinesRead
|
||||
@@ -66,9 +80,23 @@ var rotateBytePool = synk.NewBytesPool(0, 16*1024*1024)
|
||||
// If the file does not need to be rotated, it returns nil, nil.
|
||||
func rotateLogFile(file supportRotate, config *Retention) (result *RotateResult, err error) {
|
||||
if config.KeepSize > 0 {
|
||||
return rotateLogFileBySize(file, config)
|
||||
result, err = rotateLogFileBySize(file, config)
|
||||
} else {
|
||||
result, err = rotateLogFileByPolicy(file, config)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := file.Seek(0, io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func rotateLogFileByPolicy(file supportRotate, config *Retention) (result *RotateResult, err error) {
|
||||
var shouldStop func() bool
|
||||
t := utils.TimeNow()
|
||||
|
||||
@@ -82,16 +110,21 @@ func rotateLogFile(file supportRotate, config *Retention) (result *RotateResult,
|
||||
return nil, nil // should not happen
|
||||
}
|
||||
|
||||
s := NewBackScanner(file, defaultChunkSize)
|
||||
result = &RotateResult{
|
||||
OriginalSize: s.FileSize(),
|
||||
fileSize, err := file.Size()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// nothing to rotate, return the nothing
|
||||
if result.OriginalSize == 0 {
|
||||
if fileSize == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
s := NewBackScanner(file, fileSize, defaultChunkSize)
|
||||
result = &RotateResult{
|
||||
OriginalSize: fileSize,
|
||||
}
|
||||
|
||||
// Store the line positions and sizes we want to keep
|
||||
linesToKeep := make([]lineInfo, 0)
|
||||
lastLineValid := false
|
||||
@@ -169,6 +202,8 @@ func rotateLogFile(file supportRotate, config *Retention) (result *RotateResult,
|
||||
// Write it to the new position
|
||||
if _, err := file.WriteAt(buf, writePos); err != nil {
|
||||
return nil, err
|
||||
} else if n < line.Size {
|
||||
return nil, gperr.Errorf("%w, writing %d bytes, only %d written", io.ErrShortWrite, line.Size, n)
|
||||
}
|
||||
writePos += n
|
||||
}
|
||||
@@ -187,7 +222,7 @@ func rotateLogFile(file supportRotate, config *Retention) (result *RotateResult,
|
||||
//
|
||||
// Invalid lines will not be detected and included in the result.
|
||||
func rotateLogFileBySize(file supportRotate, config *Retention) (result *RotateResult, err error) {
|
||||
filesize, err := file.Seek(0, io.SeekEnd)
|
||||
filesize, err := file.Size()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -234,7 +269,6 @@ var timeJSON = []byte(`"time":"`)
|
||||
//
|
||||
// The returned time is not validated.
|
||||
func ExtractTime(line []byte) []byte {
|
||||
//TODO: optimize this
|
||||
switch line[0] {
|
||||
case '{': // JSON format
|
||||
if i := bytes.Index(line, timeJSON); i != -1 {
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package acl
|
||||
package maxmind
|
||||
|
||||
import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
acl "github.com/yusing/go-proxy/internal/acl/types"
|
||||
)
|
||||
|
||||
var cityCache = xsync.NewMapOf[string, *acl.City]()
|
||||
var cityCache = xsync.NewMapOf[string, *City]()
|
||||
|
||||
func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
|
||||
func (cfg *MaxMind) lookupCity(ip *IPInfo) (*City, bool) {
|
||||
if ip.City != nil {
|
||||
return ip.City, true
|
||||
}
|
||||
@@ -25,7 +24,7 @@ func (cfg *MaxMindConfig) lookupCity(ip *acl.IPInfo) (*acl.City, bool) {
|
||||
cfg.db.RLock()
|
||||
defer cfg.db.RUnlock()
|
||||
|
||||
city = new(acl.City)
|
||||
city = new(City)
|
||||
err := cfg.db.Lookup(ip.IP, city)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
31
internal/maxmind/instance.go
Normal file
31
internal/maxmind/instance.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package maxmind
|
||||
|
||||
import (
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
var instance *MaxMind
|
||||
|
||||
func SetInstance(parent task.Parent, cfg *Config) gperr.Error {
|
||||
newInstance := &MaxMind{Config: cfg}
|
||||
if err := newInstance.LoadMaxMindDB(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
if instance != nil {
|
||||
instance.task.Finish("updated")
|
||||
}
|
||||
instance = newInstance
|
||||
return nil
|
||||
}
|
||||
|
||||
func HasInstance() bool {
|
||||
return instance != nil
|
||||
}
|
||||
|
||||
func LookupCity(ip *IPInfo) (*City, bool) {
|
||||
if instance == nil {
|
||||
return nil, false
|
||||
}
|
||||
return instance.lookupCity(ip)
|
||||
}
|
||||
321
internal/maxmind/maxmind.go
Normal file
321
internal/maxmind/maxmind.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package maxmind
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
maxmind "github.com/yusing/go-proxy/internal/maxmind/types"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
type MaxMind struct {
|
||||
*Config
|
||||
lastUpdate time.Time
|
||||
task *task.Task
|
||||
db struct {
|
||||
*maxminddb.Reader
|
||||
sync.RWMutex
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
Config = maxmind.Config
|
||||
IPInfo = maxmind.IPInfo
|
||||
City = maxmind.City
|
||||
)
|
||||
|
||||
var (
|
||||
updateInterval = 24 * time.Hour
|
||||
httpClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
ErrResponseNotOK = gperr.New("response not OK")
|
||||
ErrDownloadFailure = gperr.New("download failure")
|
||||
)
|
||||
|
||||
func (cfg *MaxMind) dbPath() string {
|
||||
if cfg.Database == maxmind.MaxMindGeoLite {
|
||||
return filepath.Join(dataDir, "GeoLite2-City.mmdb")
|
||||
}
|
||||
return filepath.Join(dataDir, "GeoIP2-City.mmdb")
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) dbURL() string {
|
||||
if cfg.Database == maxmind.MaxMindGeoLite {
|
||||
return "https://download.maxmind.com/geoip/databases/GeoLite2-City/download?suffix=tar.gz"
|
||||
}
|
||||
return "https://download.maxmind.com/geoip/databases/GeoIP2-City/download?suffix=tar.gz"
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) dbFilename() string {
|
||||
if cfg.Database == maxmind.MaxMindGeoLite {
|
||||
return "GeoLite2-City.mmdb"
|
||||
}
|
||||
return "GeoIP2-City.mmdb"
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) LoadMaxMindDB(parent task.Parent) gperr.Error {
|
||||
if cfg.Database == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg.task = parent.Subtask("maxmind_db", true)
|
||||
path := dbPath(cfg)
|
||||
reader, err := maxmindDBOpen(path)
|
||||
valid := true
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
default:
|
||||
// ignore invalid error, just download it again
|
||||
var invalidErr maxminddb.InvalidDatabaseError
|
||||
if !errors.As(err, &invalidErr) {
|
||||
return gperr.Wrap(err)
|
||||
}
|
||||
}
|
||||
valid = false
|
||||
}
|
||||
|
||||
if !valid {
|
||||
cfg.Logger().Info().Msg("MaxMind DB not found/invalid, downloading...")
|
||||
if err = cfg.download(); err != nil {
|
||||
return ErrDownloadFailure.With(err)
|
||||
}
|
||||
} else {
|
||||
cfg.Logger().Info().Msg("MaxMind DB loaded")
|
||||
cfg.db.Reader = reader
|
||||
go cfg.scheduleUpdate(cfg.task)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) loadLastUpdate() {
|
||||
f, err := os.Stat(cfg.dbPath())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cfg.lastUpdate = f.ModTime()
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) setLastUpdate(t time.Time) {
|
||||
cfg.lastUpdate = t
|
||||
_ = os.Chtimes(cfg.dbPath(), t, t)
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) scheduleUpdate(parent task.Parent) {
|
||||
task := parent.Subtask("schedule_update", true)
|
||||
ticker := time.NewTicker(updateInterval)
|
||||
|
||||
cfg.loadLastUpdate()
|
||||
cfg.update()
|
||||
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
if cfg.db.Reader != nil {
|
||||
cfg.db.Reader.Close()
|
||||
}
|
||||
task.Finish(nil)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-task.Context().Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
cfg.update()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) update() {
|
||||
// check for update
|
||||
cfg.Logger().Info().Msg("checking for MaxMind DB update...")
|
||||
remoteLastModified, err := cfg.checkLastest()
|
||||
if err != nil {
|
||||
cfg.Logger().Err(err).Msg("failed to check MaxMind DB update")
|
||||
return
|
||||
}
|
||||
if remoteLastModified.Equal(cfg.lastUpdate) {
|
||||
cfg.Logger().Info().Msg("MaxMind DB is up to date")
|
||||
return
|
||||
}
|
||||
|
||||
cfg.Logger().Info().
|
||||
Time("latest", remoteLastModified.Local()).
|
||||
Time("current", cfg.lastUpdate).
|
||||
Msg("MaxMind DB update available")
|
||||
if err = cfg.download(); err != nil {
|
||||
cfg.Logger().Err(err).Msg("failed to update MaxMind DB")
|
||||
return
|
||||
}
|
||||
cfg.Logger().Info().Msg("MaxMind DB updated")
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) doReq(method string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, cfg.dbURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SetBasicAuth(cfg.AccountID, cfg.LicenseKey)
|
||||
resp, err := doReq(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) checkLastest() (lastModifiedT *time.Time, err error) {
|
||||
resp, err := cfg.doReq(http.MethodHead)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
lastModified := resp.Header.Get("Last-Modified")
|
||||
if lastModified == "" {
|
||||
cfg.Logger().Warn().Msg("MaxMind responded no last modified time, update skipped")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
lastModifiedTime, err := time.Parse(http.TimeFormat, lastModified)
|
||||
if err != nil {
|
||||
cfg.Logger().Warn().Err(err).Msg("MaxMind responded invalid last modified time, update skipped")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lastModifiedTime, nil
|
||||
}
|
||||
|
||||
func (cfg *MaxMind) download() error {
|
||||
resp, err := cfg.doReq(http.MethodGet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%w: %d", ErrResponseNotOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
dbFile := dbPath(cfg)
|
||||
tmpGZPath := dbFile + "-tmp.tar.gz"
|
||||
tmpDBPath := dbFile + "-tmp"
|
||||
|
||||
tmpGZFile, err := os.OpenFile(tmpGZPath, os.O_CREATE|os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanup the tar.gz file
|
||||
defer func() {
|
||||
_ = tmpGZFile.Close()
|
||||
_ = os.Remove(tmpGZPath)
|
||||
}()
|
||||
|
||||
cfg.Logger().Info().Msg("MaxMind DB downloading...")
|
||||
|
||||
_, err = io.Copy(tmpGZFile, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tmpGZFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// extract .tar.gz and to database
|
||||
err = extractFileFromTarGz(tmpGZFile, cfg.dbFilename(), tmpDBPath)
|
||||
|
||||
if err != nil {
|
||||
return gperr.New("failed to extract database from archive").With(err)
|
||||
}
|
||||
|
||||
// test if the downloaded database is valid
|
||||
db, err := maxmindDBOpen(tmpDBPath)
|
||||
if err != nil {
|
||||
_ = os.Remove(tmpDBPath)
|
||||
return err
|
||||
}
|
||||
|
||||
db.Close()
|
||||
err = os.Rename(tmpDBPath, dbFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.db.Lock()
|
||||
defer cfg.db.Unlock()
|
||||
if cfg.db.Reader != nil {
|
||||
cfg.db.Reader.Close()
|
||||
}
|
||||
cfg.db.Reader, err = maxmindDBOpen(dbFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastModifiedStr := resp.Header.Get("Last-Modified")
|
||||
lastModifiedTime, err := time.Parse(http.TimeFormat, lastModifiedStr)
|
||||
if err == nil {
|
||||
cfg.setLastUpdate(lastModifiedTime)
|
||||
}
|
||||
|
||||
cfg.Logger().Info().Msg("MaxMind DB downloaded")
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractFileFromTarGz(tarGzFile *os.File, targetFilename, destPath string) error {
|
||||
defer tarGzFile.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(tarGzFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break // End of archive
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only extract the file that matches targetFilename (basename match)
|
||||
if filepath.Base(hdr.Name) == targetFilename {
|
||||
outFile, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, hdr.FileInfo().Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
_, err = io.Copy(outFile, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil // Done
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("file %s not found in archive", targetFilename)
|
||||
}
|
||||
|
||||
var (
|
||||
dataDir = common.DataDir
|
||||
dbPath = (*MaxMind).dbPath
|
||||
doReq = httpClient.Do
|
||||
maxmindDBOpen = maxminddb.Open
|
||||
)
|
||||
131
internal/maxmind/maxmind_test.go
Normal file
131
internal/maxmind/maxmind_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package maxmind
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"github.com/rs/zerolog"
|
||||
maxmind "github.com/yusing/go-proxy/internal/maxmind/types"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
)
|
||||
|
||||
// --- Helper for MaxMindConfig ---
|
||||
type testLogger struct{ zerolog.Logger }
|
||||
|
||||
func (testLogger) Info() *zerolog.Event { return &zerolog.Event{} }
|
||||
func (testLogger) Warn() *zerolog.Event { return &zerolog.Event{} }
|
||||
func (testLogger) Err(_ error) *zerolog.Event { return &zerolog.Event{} }
|
||||
|
||||
func testCfg() *MaxMind {
|
||||
return &MaxMind{
|
||||
Config: &Config{
|
||||
AccountID: "testid",
|
||||
LicenseKey: "testkey",
|
||||
Database: maxmind.MaxMindGeoLite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var testLastMod = time.Now().UTC()
|
||||
|
||||
func testDoReq(cfg *MaxMind, w http.ResponseWriter, r *http.Request) {
|
||||
if u, p, ok := r.BasicAuth(); !ok || u != "testid" || p != "testkey" {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Last-Modified", testLastMod.Format(http.TimeFormat))
|
||||
gz := gzip.NewWriter(w)
|
||||
t := tar.NewWriter(gz)
|
||||
t.WriteHeader(&tar.Header{
|
||||
Name: cfg.dbFilename(),
|
||||
})
|
||||
t.Write([]byte("1234"))
|
||||
t.Close()
|
||||
gz.Close()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func mockDoReq(cfg *MaxMind, t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
oldDoReq := doReq
|
||||
doReq = func(req *http.Request) (*http.Response, error) {
|
||||
testDoReq(cfg, rw, req)
|
||||
return rw.Result(), nil
|
||||
}
|
||||
t.Cleanup(func() { doReq = oldDoReq })
|
||||
}
|
||||
|
||||
func mockDataDir(t *testing.T) {
|
||||
oldDataDir := dataDir
|
||||
dataDir = t.TempDir()
|
||||
t.Cleanup(func() { dataDir = oldDataDir })
|
||||
}
|
||||
|
||||
func mockMaxMindDBOpen(t *testing.T) {
|
||||
oldMaxMindDBOpen := maxmindDBOpen
|
||||
maxmindDBOpen = func(path string) (*maxminddb.Reader, error) {
|
||||
return &maxminddb.Reader{}, nil
|
||||
}
|
||||
t.Cleanup(func() { maxmindDBOpen = oldMaxMindDBOpen })
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_doReq(t *testing.T) {
|
||||
cfg := testCfg()
|
||||
mockDoReq(cfg, t)
|
||||
resp, err := cfg.doReq(http.MethodGet)
|
||||
if err != nil {
|
||||
t.Fatalf("newReq() error = %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("unexpected status: %v", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_checkLatest(t *testing.T) {
|
||||
cfg := testCfg()
|
||||
mockDoReq(cfg, t)
|
||||
|
||||
latest, err := cfg.checkLastest()
|
||||
if err != nil {
|
||||
t.Fatalf("checkLatest() error = %v", err)
|
||||
}
|
||||
if latest.Equal(testLastMod) {
|
||||
t.Errorf("expected latest equal to testLastMod")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_download(t *testing.T) {
|
||||
cfg := testCfg()
|
||||
mockDataDir(t)
|
||||
mockMaxMindDBOpen(t)
|
||||
mockDoReq(cfg, t)
|
||||
|
||||
err := cfg.download()
|
||||
if err != nil {
|
||||
t.Fatalf("download() error = %v", err)
|
||||
}
|
||||
if cfg.db.Reader == nil {
|
||||
t.Error("expected db instance")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MaxMindConfig_loadMaxMindDB(t *testing.T) {
|
||||
cfg := testCfg()
|
||||
mockDataDir(t)
|
||||
mockMaxMindDBOpen(t)
|
||||
|
||||
task := task.RootTask("test")
|
||||
defer task.Finish(nil)
|
||||
err := cfg.LoadMaxMindDB(task)
|
||||
if err != nil {
|
||||
t.Errorf("loadMaxMindDB() error = %v", err)
|
||||
}
|
||||
if cfg.db.Reader == nil {
|
||||
t.Error("expected db instance")
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package acl
|
||||
package maxmind
|
||||
|
||||
type City struct {
|
||||
Location struct {
|
||||
33
internal/maxmind/types/config.go
Normal file
33
internal/maxmind/types/config.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package maxmind
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
)
|
||||
|
||||
type (
|
||||
DatabaseType string
|
||||
Config struct {
|
||||
AccountID string `json:"account_id" validate:"required"`
|
||||
LicenseKey string `json:"license_key" validate:"required"`
|
||||
Database DatabaseType `json:"database" validate:"omitempty,oneof=geolite geoip2"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
MaxMindGeoLite DatabaseType = "geolite"
|
||||
MaxMindGeoIP2 DatabaseType = "geoip2"
|
||||
)
|
||||
|
||||
func (cfg *Config) Validate() gperr.Error {
|
||||
if cfg.Database == "" {
|
||||
cfg.Database = MaxMindGeoLite
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) Logger() *zerolog.Logger {
|
||||
l := logging.With().Str("database", string(cfg.Database)).Logger()
|
||||
return &l
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package acl
|
||||
package maxmind
|
||||
|
||||
import "net"
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package systeminfo // import github.com/yusing/go-proxy/internal/metrics/systeminfo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
@@ -16,56 +14,29 @@ import (
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
"github.com/shirou/gopsutil/v4/net"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
"github.com/shirou/gopsutil/v4/warning"
|
||||
"github.com/yusing/go-proxy/internal/common"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/metrics/period"
|
||||
"github.com/yusing/go-proxy/internal/utils/synk"
|
||||
)
|
||||
|
||||
// json tags are left for tests
|
||||
|
||||
type (
|
||||
MemoryUsage struct {
|
||||
Total uint64 `json:"total"`
|
||||
Available uint64 `json:"available"`
|
||||
Used uint64 `json:"used"`
|
||||
UsedPercent float64 `json:"used_percent"`
|
||||
}
|
||||
Disk struct {
|
||||
Path string `json:"path"`
|
||||
Fstype string `json:"fstype"`
|
||||
Total uint64 `json:"total"`
|
||||
Free uint64 `json:"free"`
|
||||
Used uint64 `json:"used"`
|
||||
UsedPercent float64 `json:"used_percent"`
|
||||
}
|
||||
DiskIO struct {
|
||||
ReadBytes uint64 `json:"read_bytes"`
|
||||
WriteBytes uint64 `json:"write_bytes"`
|
||||
ReadCount uint64 `json:"read_count"`
|
||||
WriteCount uint64 `json:"write_count"`
|
||||
ReadSpeed float64 `json:"read_speed"`
|
||||
WriteSpeed float64 `json:"write_speed"`
|
||||
Iops uint64 `json:"iops"`
|
||||
}
|
||||
Network struct {
|
||||
BytesSent uint64 `json:"bytes_sent"`
|
||||
BytesRecv uint64 `json:"bytes_recv"`
|
||||
UploadSpeed float64 `json:"upload_speed"`
|
||||
DownloadSpeed float64 `json:"download_speed"`
|
||||
}
|
||||
Sensors []sensors.TemperatureStat
|
||||
Aggregated []map[string]any
|
||||
)
|
||||
|
||||
type SystemInfo struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
CPUAverage *float64 `json:"cpu_average"`
|
||||
Memory *MemoryUsage `json:"memory"`
|
||||
Disks map[string]*Disk `json:"disks"` // disk usage by partition
|
||||
DisksIO map[string]*DiskIO `json:"disks_io"` // disk IO by device
|
||||
Network *Network `json:"network"`
|
||||
Sensors Sensors `json:"sensors"` // sensor temperature by key
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
CPUAverage *float64 `json:"cpu_average"`
|
||||
Memory *mem.VirtualMemoryStat `json:"memory"`
|
||||
Disks map[string]*disk.UsageStat `json:"disks"` // disk usage by partition
|
||||
DisksIO map[string]*disk.IOCountersStat `json:"disks_io"` // disk IO by device
|
||||
Network *net.IOCountersStat `json:"network"`
|
||||
Sensors Sensors `json:"sensors"` // sensor temperature by key
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -125,10 +96,7 @@ func getSystemInfo(ctx context.Context, lastResult *SystemInfo) (*SystemInfo, er
|
||||
allWarnings := gperr.NewBuilder("")
|
||||
allErrors := gperr.NewBuilder("failed to get system info")
|
||||
errs.ForEach(func(err error) {
|
||||
// disk.Warnings has the same type
|
||||
// all Warnings are alias of common.Warnings from "github.com/shirou/gopsutil/v4/internal/common"
|
||||
// see line 37
|
||||
warnings := new(sensors.Warnings)
|
||||
warnings := new(warning.Warning)
|
||||
if errors.As(err, &warnings) {
|
||||
for _, warning := range warnings.List {
|
||||
allWarnings.Add(warning)
|
||||
@@ -163,12 +131,7 @@ func (s *SystemInfo) collectMemoryInfo(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Memory = &MemoryUsage{
|
||||
Total: memoryInfo.Total,
|
||||
Available: memoryInfo.Available,
|
||||
Used: memoryInfo.Used,
|
||||
UsedPercent: memoryInfo.UsedPercent,
|
||||
}
|
||||
s.Memory = memoryInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -177,43 +140,7 @@ func (s *SystemInfo) collectDisksInfo(ctx context.Context, lastResult *SystemInf
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DisksIO = make(map[string]*DiskIO, len(ioCounters))
|
||||
for name, io := range ioCounters {
|
||||
// include only /dev/sd* and /dev/nvme* disk devices
|
||||
if len(name) < 3 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(name, "nvme"),
|
||||
strings.HasPrefix(name, "mmcblk"): // NVMe/SD/MMC
|
||||
if name[len(name)-2] == 'p' {
|
||||
continue // skip partitions
|
||||
}
|
||||
default:
|
||||
switch name[0] {
|
||||
case 's', 'h', 'v': // SCSI/SATA/virtio disks
|
||||
if name[1] != 'd' {
|
||||
continue
|
||||
}
|
||||
case 'x': // Xen virtual disks
|
||||
if name[1:3] != "vd" {
|
||||
continue
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
last := name[len(name)-1]
|
||||
if last >= '0' && last <= '9' {
|
||||
continue // skip partitions
|
||||
}
|
||||
}
|
||||
s.DisksIO[name] = &DiskIO{
|
||||
ReadBytes: io.ReadBytes,
|
||||
WriteBytes: io.WriteBytes,
|
||||
ReadCount: io.ReadCount,
|
||||
WriteCount: io.WriteCount,
|
||||
}
|
||||
}
|
||||
s.DisksIO = ioCounters
|
||||
if lastResult != nil {
|
||||
interval := float64(time.Now().Unix() - lastResult.Timestamp)
|
||||
for name, disk := range s.DisksIO {
|
||||
@@ -229,23 +156,15 @@ func (s *SystemInfo) collectDisksInfo(ctx context.Context, lastResult *SystemInf
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Disks = make(map[string]*Disk, len(partitions))
|
||||
s.Disks = make(map[string]*disk.UsageStat, len(partitions))
|
||||
errs := gperr.NewBuilder("failed to get disks info")
|
||||
for _, partition := range partitions {
|
||||
d := &Disk{
|
||||
Path: partition.Mountpoint,
|
||||
Fstype: partition.Fstype,
|
||||
}
|
||||
diskInfo, err := disk.UsageWithContext(ctx, partition.Mountpoint)
|
||||
if err != nil {
|
||||
errs.Add(err)
|
||||
continue
|
||||
}
|
||||
d.Total = diskInfo.Total
|
||||
d.Free = diskInfo.Free
|
||||
d.Used = diskInfo.Used
|
||||
d.UsedPercent = diskInfo.UsedPercent
|
||||
s.Disks[partition.Device] = d
|
||||
s.Disks[partition.Device] = diskInfo
|
||||
}
|
||||
|
||||
if errs.HasError() {
|
||||
@@ -262,10 +181,7 @@ func (s *SystemInfo) collectNetworkInfo(ctx context.Context, lastResult *SystemI
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Network = &Network{
|
||||
BytesSent: networkIO[0].BytesSent,
|
||||
BytesRecv: networkIO[0].BytesRecv,
|
||||
}
|
||||
s.Network = networkIO[0]
|
||||
if lastResult != nil {
|
||||
interval := float64(time.Now().Unix() - lastResult.Timestamp)
|
||||
s.Network.UploadSpeed = float64(networkIO[0].BytesSent-lastResult.Network.BytesSent) / interval
|
||||
@@ -276,53 +192,59 @@ func (s *SystemInfo) collectNetworkInfo(ctx context.Context, lastResult *SystemI
|
||||
|
||||
func (s *SystemInfo) collectSensorsInfo(ctx context.Context) error {
|
||||
sensorsInfo, err := sensors.TemperaturesWithContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Sensors = sensorsInfo
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
var bufPool = synk.NewBytesPool(1024, 16384)
|
||||
|
||||
// explicitly implement MarshalJSON to avoid reflection
|
||||
func (s *SystemInfo) MarshalJSON() ([]byte, error) {
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b := bufPool.Get()
|
||||
defer bufPool.Put(b)
|
||||
|
||||
b.WriteRune('{')
|
||||
b = append(b, '{')
|
||||
|
||||
// timestamp
|
||||
b.WriteString(`"timestamp":`)
|
||||
b.WriteString(strconv.FormatInt(s.Timestamp, 10))
|
||||
b = append(b, `"timestamp":`...)
|
||||
b = strconv.AppendInt(b, s.Timestamp, 10)
|
||||
|
||||
// cpu_average
|
||||
b.WriteString(`,"cpu_average":`)
|
||||
b = append(b, `,"cpu_average":`...)
|
||||
if s.CPUAverage != nil {
|
||||
b.WriteString(strconv.FormatFloat(*s.CPUAverage, 'f', 2, 64))
|
||||
b = strconv.AppendFloat(b, *s.CPUAverage, 'f', 2, 64)
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
// memory
|
||||
b.WriteString(`,"memory":`)
|
||||
b = append(b, `,"memory":`...)
|
||||
if s.Memory != nil {
|
||||
b.WriteString(fmt.Sprintf(
|
||||
`{"total":%d,"available":%d,"used":%d,"used_percent":%s}`,
|
||||
b = fmt.Appendf(b,
|
||||
`{"total":%d,"available":%d,"used":%d,"used_percent":%.2f}`,
|
||||
s.Memory.Total,
|
||||
s.Memory.Available,
|
||||
s.Memory.Used,
|
||||
strconv.FormatFloat(s.Memory.UsedPercent, 'f', 2, 64),
|
||||
))
|
||||
s.Memory.UsedPercent,
|
||||
)
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
// disk
|
||||
b.WriteString(`,"disks":`)
|
||||
b = append(b, `,"disks":`...)
|
||||
if len(s.Disks) > 0 {
|
||||
b.WriteString("{")
|
||||
b = append(b, '{')
|
||||
first := true
|
||||
for device, disk := range s.Disks {
|
||||
if !first {
|
||||
b.WriteRune(',')
|
||||
b = append(b, ',')
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(
|
||||
`"%s":{"device":%q,"path":%q,"fstype":%q,"total":%d,"free":%d,"used":%d,"used_percent":%s}`,
|
||||
b = fmt.Appendf(b,
|
||||
`"%s":{"device":"%s","path":"%s","fstype":"%s","total":%d,"free":%d,"used":%d,"used_percent":%.2f}`,
|
||||
device,
|
||||
device,
|
||||
disk.Path,
|
||||
@@ -330,81 +252,81 @@ func (s *SystemInfo) MarshalJSON() ([]byte, error) {
|
||||
disk.Total,
|
||||
disk.Free,
|
||||
disk.Used,
|
||||
strconv.FormatFloat(float64(disk.UsedPercent), 'f', 2, 32),
|
||||
))
|
||||
disk.UsedPercent,
|
||||
)
|
||||
first = false
|
||||
}
|
||||
b.WriteRune('}')
|
||||
b = append(b, '}')
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
// disks_io
|
||||
b.WriteString(`,"disks_io":`)
|
||||
b = append(b, `,"disks_io":`...)
|
||||
if len(s.DisksIO) > 0 {
|
||||
b.WriteString("{")
|
||||
b = append(b, '{')
|
||||
first := true
|
||||
for name, usage := range s.DisksIO {
|
||||
if !first {
|
||||
b.WriteRune(',')
|
||||
b = append(b, ',')
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(
|
||||
`"%s":{"name":%q,"read_bytes":%d,"write_bytes":%d,"read_speed":%s,"write_speed":%s,"iops":%d}`,
|
||||
b = fmt.Appendf(b,
|
||||
`"%s":{"name":"%s","read_bytes":%d,"write_bytes":%d,"read_speed":%.2f,"write_speed":%.2f,"iops":%d}`,
|
||||
name,
|
||||
name,
|
||||
usage.ReadBytes,
|
||||
usage.WriteBytes,
|
||||
strconv.FormatFloat(usage.ReadSpeed, 'f', 2, 64),
|
||||
strconv.FormatFloat(usage.WriteSpeed, 'f', 2, 64),
|
||||
usage.ReadSpeed,
|
||||
usage.WriteSpeed,
|
||||
usage.Iops,
|
||||
))
|
||||
)
|
||||
first = false
|
||||
}
|
||||
b.WriteRune('}')
|
||||
b = append(b, '}')
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
// network
|
||||
b.WriteString(`,"network":`)
|
||||
b = append(b, `,"network":`...)
|
||||
if s.Network != nil {
|
||||
b.WriteString(fmt.Sprintf(
|
||||
`{"bytes_sent":%d,"bytes_recv":%d,"upload_speed":%s,"download_speed":%s}`,
|
||||
b = fmt.Appendf(b,
|
||||
`{"bytes_sent":%d,"bytes_recv":%d,"upload_speed":%.2f,"download_speed":%.2f}`,
|
||||
s.Network.BytesSent,
|
||||
s.Network.BytesRecv,
|
||||
strconv.FormatFloat(s.Network.UploadSpeed, 'f', 2, 64),
|
||||
strconv.FormatFloat(s.Network.DownloadSpeed, 'f', 2, 64),
|
||||
))
|
||||
s.Network.UploadSpeed,
|
||||
s.Network.DownloadSpeed,
|
||||
)
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
// sensors
|
||||
b.WriteString(`,"sensors":`)
|
||||
b = append(b, `,"sensors":`...)
|
||||
if len(s.Sensors) > 0 {
|
||||
b.WriteString("{")
|
||||
b = append(b, '{')
|
||||
first := true
|
||||
for _, sensor := range s.Sensors {
|
||||
if !first {
|
||||
b.WriteRune(',')
|
||||
b = append(b, ',')
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(
|
||||
`%q:{"name":%q,"temperature":%s,"high":%s,"critical":%s}`,
|
||||
b = fmt.Appendf(b,
|
||||
`"%s":{"name":"%s","temperature":%.2f,"high":%.2f,"critical":%.2f}`,
|
||||
sensor.SensorKey,
|
||||
sensor.SensorKey,
|
||||
strconv.FormatFloat(float64(sensor.Temperature), 'f', 2, 32),
|
||||
strconv.FormatFloat(float64(sensor.High), 'f', 2, 32),
|
||||
strconv.FormatFloat(float64(sensor.Critical), 'f', 2, 32),
|
||||
))
|
||||
sensor.Temperature,
|
||||
sensor.High,
|
||||
sensor.Critical,
|
||||
)
|
||||
first = false
|
||||
}
|
||||
b.WriteRune('}')
|
||||
b = append(b, '}')
|
||||
} else {
|
||||
b.WriteString("null")
|
||||
b = append(b, "null"...)
|
||||
}
|
||||
|
||||
b.WriteRune('}')
|
||||
return []byte(b.String()), nil
|
||||
b = append(b, '}')
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *Sensors) UnmarshalJSON(data []byte) error {
|
||||
@@ -548,41 +470,42 @@ func aggregate(entries []*SystemInfo, query url.Values) (total int, result Aggre
|
||||
}
|
||||
|
||||
func (result Aggregated) MarshalJSON() ([]byte, error) {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
buf := bufPool.Get()
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
buf.WriteByte('[')
|
||||
buf = append(buf, '[')
|
||||
i := 0
|
||||
n := len(result)
|
||||
for _, entry := range result {
|
||||
buf.WriteRune('{')
|
||||
buf = append(buf, '{')
|
||||
j := 0
|
||||
m := len(entry)
|
||||
for k, v := range entry {
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf = append(buf, '"')
|
||||
buf = append(buf, k...)
|
||||
buf = append(buf, '"')
|
||||
buf = append(buf, ':')
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
buf.WriteString(strconv.FormatFloat(v, 'f', 2, 64))
|
||||
buf = strconv.AppendFloat(buf, v, 'f', 2, 64)
|
||||
case uint64:
|
||||
buf.WriteString(strconv.FormatUint(v, 10))
|
||||
buf = strconv.AppendUint(buf, v, 10)
|
||||
case int64:
|
||||
buf.WriteString(strconv.FormatInt(v, 10))
|
||||
buf = strconv.AppendInt(buf, v, 10)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type: %T", v))
|
||||
}
|
||||
if j != m-1 {
|
||||
buf.WriteByte(',')
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
j++
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
buf = append(buf, '}')
|
||||
if i != n-1 {
|
||||
buf.WriteByte(',')
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
i++
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.Bytes(), nil
|
||||
buf = append(buf, ']')
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
"github.com/shirou/gopsutil/v4/net"
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
. "github.com/yusing/go-proxy/internal/utils/testing"
|
||||
)
|
||||
@@ -15,13 +18,13 @@ var cpuAvg = 45.67
|
||||
var testInfo = &SystemInfo{
|
||||
Timestamp: 123456,
|
||||
CPUAverage: &cpuAvg,
|
||||
Memory: &MemoryUsage{
|
||||
Memory: &mem.VirtualMemoryStat{
|
||||
Total: 16000000000,
|
||||
Available: 8000000000,
|
||||
Used: 8000000000,
|
||||
UsedPercent: 50.0,
|
||||
},
|
||||
Disks: map[string]*Disk{
|
||||
Disks: map[string]*disk.UsageStat{
|
||||
"sda": {
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
@@ -39,8 +42,9 @@ var testInfo = &SystemInfo{
|
||||
UsedPercent: 50.0,
|
||||
},
|
||||
},
|
||||
DisksIO: map[string]*DiskIO{
|
||||
DisksIO: map[string]*disk.IOCountersStat{
|
||||
"media": {
|
||||
Name: "media",
|
||||
ReadBytes: 1000000,
|
||||
WriteBytes: 2000000,
|
||||
ReadSpeed: 100.5,
|
||||
@@ -48,6 +52,7 @@ var testInfo = &SystemInfo{
|
||||
Iops: 1000,
|
||||
},
|
||||
"nvme0n1": {
|
||||
Name: "nvme0n1",
|
||||
ReadBytes: 1000000,
|
||||
WriteBytes: 2000000,
|
||||
ReadSpeed: 100.5,
|
||||
@@ -55,7 +60,7 @@ var testInfo = &SystemInfo{
|
||||
Iops: 1000,
|
||||
},
|
||||
},
|
||||
Network: &Network{
|
||||
Network: &net.IOCountersStat{
|
||||
BytesSent: 5000000,
|
||||
BytesRecv: 10000000,
|
||||
UploadSpeed: 1024.5,
|
||||
@@ -142,7 +147,7 @@ func TestSerialize(t *testing.T) {
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
entries := make([]*SystemInfo, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := range b.N {
|
||||
entries[i] = testInfo
|
||||
}
|
||||
queries := map[string]Aggregated{}
|
||||
@@ -152,15 +157,25 @@ func BenchmarkSerialize(b *testing.B) {
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.Run("optimized-non-query", func(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_, _ = testInfo.MarshalJSON()
|
||||
}
|
||||
})
|
||||
b.Run("json-non-query", func(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_, _ = json.Marshal(testInfo)
|
||||
}
|
||||
})
|
||||
b.Run("optimized", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
for _, query := range allQueries {
|
||||
_, _ = queries[query].MarshalJSON()
|
||||
}
|
||||
}
|
||||
})
|
||||
b.Run("json", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
for _, query := range allQueries {
|
||||
_, _ = json.Marshal([]map[string]any(queries[query]))
|
||||
}
|
||||
|
||||
@@ -10,6 +10,15 @@ type (
|
||||
AcceptContentType []ContentType
|
||||
)
|
||||
|
||||
const (
|
||||
ContentTypeJSON = ContentType("application/json")
|
||||
ContentTypeTextPlain = ContentType("text/plain")
|
||||
ContentTypeTextHTML = ContentType("text/html")
|
||||
ContentTypeTextMarkdown = ContentType("text/markdown")
|
||||
ContentTypeTextXML = ContentType("text/xml")
|
||||
ContentTypeXHTML = ContentType("application/xhtml+xml")
|
||||
)
|
||||
|
||||
func GetContentType(h http.Header) ContentType {
|
||||
ct := h.Get("Content-Type")
|
||||
if ct == "" {
|
||||
@@ -35,15 +44,15 @@ func GetAccept(h http.Header) AcceptContentType {
|
||||
}
|
||||
|
||||
func (ct ContentType) IsHTML() bool {
|
||||
return ct == "text/html" || ct == "application/xhtml+xml"
|
||||
return ct == ContentTypeTextHTML || ct == ContentTypeXHTML
|
||||
}
|
||||
|
||||
func (ct ContentType) IsJSON() bool {
|
||||
return ct == "application/json"
|
||||
return ct == ContentTypeJSON
|
||||
}
|
||||
|
||||
func (ct ContentType) IsPlainText() bool {
|
||||
return ct == "text/plain"
|
||||
return ct == ContentTypeTextPlain
|
||||
}
|
||||
|
||||
func (act AcceptContentType) IsEmpty() bool {
|
||||
@@ -68,6 +77,15 @@ func (act AcceptContentType) AcceptJSON() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (act AcceptContentType) AcceptMarkdown() bool {
|
||||
for _, v := range act {
|
||||
if v == ContentTypeTextMarkdown || v == "*/*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (act AcceptContentType) AcceptPlainText() bool {
|
||||
for _, v := range act {
|
||||
if v.IsPlainText() || v == "text/*" || v == "*/*" {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"syscall"
|
||||
|
||||
@@ -40,15 +41,22 @@ func ServerError(w http.ResponseWriter, r *http.Request, err error, code ...int)
|
||||
//
|
||||
// For JSON marshallable errors (e.g. gperr.Error), it returns the error details as JSON.
|
||||
// Otherwise, it returns the error details as plain text.
|
||||
func ClientError(w http.ResponseWriter, err error, code ...int) {
|
||||
func ClientError(w http.ResponseWriter, r *http.Request, err error, code ...int) {
|
||||
if len(code) == 0 {
|
||||
code = []int{http.StatusBadRequest}
|
||||
}
|
||||
if gperr.IsJSONMarshallable(err) {
|
||||
w.WriteHeader(code[0])
|
||||
accept := GetAccept(r.Header)
|
||||
switch {
|
||||
case accept.AcceptJSON():
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(err)
|
||||
} else {
|
||||
http.Error(w, err.Error(), code[0])
|
||||
case accept.AcceptMarkdown():
|
||||
w.Header().Set("Content-Type", "text/markdown")
|
||||
w.Write(gperr.Markdown(err))
|
||||
default:
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(gperr.Plain(err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,18 +91,18 @@ func NotFound(w http.ResponseWriter, err string) {
|
||||
BadRequest(w, err, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func ErrMissingKey(k string) error {
|
||||
return gperr.New(k + " is required")
|
||||
func MissingKey(w http.ResponseWriter, k string) {
|
||||
BadRequest(w, k+" is required", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func ErrInvalidKey(k string) error {
|
||||
return gperr.New(k + " is invalid")
|
||||
func InvalidKey(w http.ResponseWriter, k string) {
|
||||
BadRequest(w, k+" is invalid", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func ErrAlreadyExists(k, v string) error {
|
||||
return gperr.Errorf("%s %q already exists", k, v)
|
||||
func KeyAlreadyExists(w http.ResponseWriter, k, v string) {
|
||||
BadRequest(w, fmt.Sprintf("%s %q already exists", k, v), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func ErrNotFound(k, v string) error {
|
||||
return gperr.Errorf("%s %q not found", k, v)
|
||||
func ValueNotFound(w http.ResponseWriter, k, v string) {
|
||||
BadRequest(w, fmt.Sprintf("%s %q not found", k, v), http.StatusNotFound)
|
||||
}
|
||||
|
||||
@@ -133,11 +133,6 @@ func (lb *LoadBalancer) AddServer(srv Server) {
|
||||
|
||||
lb.rebalance()
|
||||
lb.impl.OnAddServer(srv)
|
||||
|
||||
lb.l.Debug().
|
||||
Str("action", "add").
|
||||
Str("server", srv.Name()).
|
||||
Msgf("%d servers available", lb.pool.Size())
|
||||
}
|
||||
|
||||
func (lb *LoadBalancer) RemoveServer(srv Server) {
|
||||
|
||||
@@ -17,8 +17,7 @@ import (
|
||||
)
|
||||
|
||||
type cloudflareRealIP struct {
|
||||
realIP realIP
|
||||
Recursive bool
|
||||
realIP realIP
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -47,7 +46,7 @@ var CloudflareRealIP = NewMiddleware[cloudflareRealIP]()
|
||||
func (cri *cloudflareRealIP) setup() {
|
||||
cri.realIP.RealIPOpts = RealIPOpts{
|
||||
Header: "CF-Connecting-IP",
|
||||
Recursive: cri.Recursive,
|
||||
Recursive: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ func (m *Middleware) apply(optsRaw OptionsRaw) gperr.Error {
|
||||
} else {
|
||||
m.priority = DefaultPriority
|
||||
}
|
||||
return utils.Deserialize(optsRaw, m.impl)
|
||||
return utils.MapUnmarshalValidate(optsRaw, m.impl)
|
||||
}
|
||||
|
||||
func (m *Middleware) finalize() error {
|
||||
|
||||
@@ -35,8 +35,8 @@ var allMiddlewares = map[string]*Middleware{
|
||||
}
|
||||
|
||||
var (
|
||||
ErrUnknownMiddleware = gperr.New("unknown middleware")
|
||||
ErrDuplicatedMiddleware = gperr.New("duplicated middleware")
|
||||
ErrUnknownMiddleware = gperr.New("unknown middleware")
|
||||
ErrMiddlewareAlreadyExists = gperr.New("middleware with the same name already exists")
|
||||
)
|
||||
|
||||
func Get(name string) (*Middleware, Error) {
|
||||
@@ -44,7 +44,7 @@ func Get(name string) (*Middleware, Error) {
|
||||
if !ok {
|
||||
return nil, ErrUnknownMiddleware.
|
||||
Subject(name).
|
||||
Withf(strutils.DoYouMean(utils.NearestField(name, allMiddlewares)))
|
||||
With(gperr.DoYouMean(utils.NearestField(name, allMiddlewares)))
|
||||
}
|
||||
return middleware, nil
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func LoadComposeFiles() {
|
||||
for name, m := range mws {
|
||||
name = strutils.ToLowerNoSnake(name)
|
||||
if _, ok := allMiddlewares[name]; ok {
|
||||
errs.Add(ErrDuplicatedMiddleware.Subject(name))
|
||||
errs.Add(ErrMiddlewareAlreadyExists.Subject(name))
|
||||
continue
|
||||
}
|
||||
allMiddlewares[name] = m
|
||||
|
||||
@@ -10,20 +10,28 @@ import (
|
||||
)
|
||||
|
||||
type ProviderBase struct {
|
||||
Name string `json:"name" validate:"required"`
|
||||
URL string `json:"url" validate:"url"`
|
||||
Token string `json:"token"`
|
||||
Name string `json:"name" validate:"required"`
|
||||
URL string `json:"url" validate:"url"`
|
||||
Token string `json:"token"`
|
||||
Format *LogFormat `json:"format"`
|
||||
}
|
||||
|
||||
type rawError []byte
|
||||
|
||||
func (e rawError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrMissingToken = gperr.New("token is required")
|
||||
ErrURLMissingScheme = gperr.New("url missing scheme, expect 'http://' or 'https://'")
|
||||
ErrUnknownError = gperr.New("unknown error")
|
||||
)
|
||||
|
||||
// Validate implements the utils.CustomValidator interface.
|
||||
func (base *ProviderBase) Validate() gperr.Error {
|
||||
if base.Token == "" {
|
||||
return ErrMissingToken
|
||||
if base.Format == nil {
|
||||
base.Format = LogFormatMarkdown
|
||||
}
|
||||
if !strings.HasPrefix(base.URL, "http://") && !strings.HasPrefix(base.URL, "https://") {
|
||||
return ErrURLMissingScheme
|
||||
@@ -60,10 +68,10 @@ func (base *ProviderBase) SetHeaders(logMsg *LogMessage, headers http.Header) {
|
||||
// no-op by default
|
||||
}
|
||||
|
||||
func (base *ProviderBase) makeRespError(resp *http.Response) error {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
return gperr.Errorf("%s status %d: %s", base.Name, resp.StatusCode, body)
|
||||
func (base *ProviderBase) fmtError(respBody io.Reader) error {
|
||||
body, err := io.ReadAll(respBody)
|
||||
if err == nil && len(body) > 0 {
|
||||
return rawError(body)
|
||||
}
|
||||
return gperr.Errorf("%s status %d", base.Name, resp.StatusCode)
|
||||
return ErrUnknownError
|
||||
}
|
||||
|
||||
130
internal/notif/body.go
Normal file
130
internal/notif/body.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
)
|
||||
|
||||
type (
|
||||
LogField struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
LogFormat struct {
|
||||
string
|
||||
}
|
||||
LogBody interface {
|
||||
Format(format *LogFormat) ([]byte, error)
|
||||
}
|
||||
)
|
||||
|
||||
type (
|
||||
FieldsBody []LogField
|
||||
ListBody []string
|
||||
MessageBody string
|
||||
ErrorBody struct {
|
||||
Error error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
LogFormatMarkdown = &LogFormat{"markdown"}
|
||||
LogFormatPlain = &LogFormat{"plain"}
|
||||
LogFormatRawJSON = &LogFormat{"json"} // internal use only
|
||||
)
|
||||
|
||||
func MakeLogFields(fields ...LogField) LogBody {
|
||||
return FieldsBody(fields)
|
||||
}
|
||||
|
||||
func (f *LogFormat) Parse(format string) error {
|
||||
switch format {
|
||||
case "":
|
||||
f.string = LogFormatMarkdown.string
|
||||
case LogFormatPlain.string, LogFormatMarkdown.string:
|
||||
f.string = format
|
||||
default:
|
||||
return gperr.Multiline().
|
||||
Addf("invalid log format %s, supported formats:", format).
|
||||
AddLines(
|
||||
LogFormatPlain,
|
||||
LogFormatMarkdown,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FieldsBody) Add(name, value string) {
|
||||
*f = append(*f, LogField{Name: name, Value: value})
|
||||
}
|
||||
|
||||
func (f FieldsBody) Format(format *LogFormat) ([]byte, error) {
|
||||
switch format {
|
||||
case LogFormatMarkdown:
|
||||
var msg bytes.Buffer
|
||||
for _, field := range f {
|
||||
msg.WriteString("#### ")
|
||||
msg.WriteString(field.Name)
|
||||
msg.WriteRune('\n')
|
||||
msg.WriteString(field.Value)
|
||||
msg.WriteRune('\n')
|
||||
}
|
||||
return msg.Bytes(), nil
|
||||
case LogFormatPlain:
|
||||
var msg bytes.Buffer
|
||||
for _, field := range f {
|
||||
msg.WriteString(field.Name)
|
||||
msg.WriteString(": ")
|
||||
msg.WriteString(field.Value)
|
||||
msg.WriteRune('\n')
|
||||
}
|
||||
return msg.Bytes(), nil
|
||||
case LogFormatRawJSON:
|
||||
return json.Marshal(f)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown format: %v", format)
|
||||
}
|
||||
|
||||
func (l ListBody) Format(format *LogFormat) ([]byte, error) {
|
||||
switch format {
|
||||
case LogFormatPlain:
|
||||
return []byte(strings.Join(l, "\n")), nil
|
||||
case LogFormatMarkdown:
|
||||
var msg bytes.Buffer
|
||||
for _, item := range l {
|
||||
msg.WriteString("* ")
|
||||
msg.WriteString(item)
|
||||
msg.WriteRune('\n')
|
||||
}
|
||||
return msg.Bytes(), nil
|
||||
case LogFormatRawJSON:
|
||||
return json.Marshal(l)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown format: %v", format)
|
||||
}
|
||||
|
||||
func (m MessageBody) Format(format *LogFormat) ([]byte, error) {
|
||||
switch format {
|
||||
case LogFormatPlain, LogFormatMarkdown:
|
||||
return []byte(m), nil
|
||||
case LogFormatRawJSON:
|
||||
return json.Marshal(m)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown format: %v", format)
|
||||
}
|
||||
|
||||
func (e ErrorBody) Format(format *LogFormat) ([]byte, error) {
|
||||
switch format {
|
||||
case LogFormatRawJSON:
|
||||
return json.Marshal(e)
|
||||
case LogFormatPlain:
|
||||
return gperr.Plain(e.Error), nil
|
||||
case LogFormatMarkdown:
|
||||
return gperr.Markdown(e.Error), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown format: %v", format)
|
||||
}
|
||||
@@ -46,11 +46,5 @@ func (cfg *NotificationConfig) UnmarshalMap(m map[string]any) (err gperr.Error)
|
||||
Withf("expect %s or %s", ProviderWebhook, ProviderGotify)
|
||||
}
|
||||
|
||||
// unmarshal provider config
|
||||
if err := utils.Deserialize(m, cfg.Provider); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate provider
|
||||
return cfg.Provider.Validate()
|
||||
return utils.MapUnmarshalValidate(m, cfg.Provider)
|
||||
}
|
||||
|
||||
@@ -25,8 +25,9 @@ func TestNotificationConfig(t *testing.T) {
|
||||
},
|
||||
expected: &Webhook{
|
||||
ProviderBase: ProviderBase{
|
||||
Name: "test",
|
||||
URL: "https://example.com",
|
||||
Name: "test",
|
||||
URL: "https://example.com",
|
||||
Format: LogFormatMarkdown,
|
||||
},
|
||||
Template: "discord",
|
||||
Method: http.MethodPost,
|
||||
@@ -43,12 +44,32 @@ func TestNotificationConfig(t *testing.T) {
|
||||
"provider": "gotify",
|
||||
"url": "https://example.com",
|
||||
"token": "token",
|
||||
"format": "plain",
|
||||
},
|
||||
expected: &GotifyClient{
|
||||
ProviderBase: ProviderBase{
|
||||
Name: "test",
|
||||
URL: "https://example.com",
|
||||
Token: "token",
|
||||
Name: "test",
|
||||
URL: "https://example.com",
|
||||
Token: "token",
|
||||
Format: LogFormatPlain,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "default_format",
|
||||
cfg: map[string]any{
|
||||
"name": "test",
|
||||
"provider": "gotify",
|
||||
"token": "token",
|
||||
"url": "https://example.com",
|
||||
},
|
||||
expected: &GotifyClient{
|
||||
ProviderBase: ProviderBase{
|
||||
Name: "test",
|
||||
URL: "https://example.com",
|
||||
Token: "token",
|
||||
Format: LogFormatMarkdown,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
@@ -62,6 +83,16 @@ func TestNotificationConfig(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid_format",
|
||||
cfg: map[string]any{
|
||||
"name": "test",
|
||||
"provider": "webhook",
|
||||
"url": "https://example.com",
|
||||
"format": "invalid",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing_url",
|
||||
cfg: map[string]any{
|
||||
@@ -150,7 +181,7 @@ func TestNotificationConfig(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var cfg NotificationConfig
|
||||
provider := tt.cfg["provider"]
|
||||
err := utils.Deserialize(tt.cfg, &cfg)
|
||||
err := utils.MapUnmarshalValidate(tt.cfg, &cfg)
|
||||
if tt.wantErr {
|
||||
ExpectHasError(t, err)
|
||||
} else {
|
||||
|
||||
@@ -1,41 +1,55 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/task"
|
||||
F "github.com/yusing/go-proxy/internal/utils/functional"
|
||||
)
|
||||
|
||||
type (
|
||||
Dispatcher struct {
|
||||
task *task.Task
|
||||
logCh chan *LogMessage
|
||||
providers F.Set[Provider]
|
||||
task *task.Task
|
||||
providers F.Set[Provider]
|
||||
logCh chan *LogMessage
|
||||
retryCh chan *RetryMessage
|
||||
retryTicker *time.Ticker
|
||||
}
|
||||
LogField struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
LogFields []LogField
|
||||
LogMessage struct {
|
||||
Level zerolog.Level
|
||||
Title string
|
||||
Extras LogFields
|
||||
Color Color
|
||||
Level zerolog.Level
|
||||
Title string
|
||||
Body LogBody
|
||||
Color Color
|
||||
}
|
||||
RetryMessage struct {
|
||||
Message *LogMessage
|
||||
Trials int
|
||||
Provider Provider
|
||||
}
|
||||
)
|
||||
|
||||
var dispatcher *Dispatcher
|
||||
|
||||
const dispatchErr = "notification dispatch error"
|
||||
const retryInterval = 5 * time.Second
|
||||
|
||||
var maxRetries = map[zerolog.Level]int{
|
||||
zerolog.DebugLevel: 1,
|
||||
zerolog.InfoLevel: 1,
|
||||
zerolog.WarnLevel: 3,
|
||||
zerolog.ErrorLevel: 5,
|
||||
zerolog.FatalLevel: 10,
|
||||
zerolog.PanicLevel: 10,
|
||||
}
|
||||
|
||||
func StartNotifDispatcher(parent task.Parent) *Dispatcher {
|
||||
dispatcher = &Dispatcher{
|
||||
task: parent.Subtask("notification"),
|
||||
logCh: make(chan *LogMessage),
|
||||
providers: F.NewSet[Provider](),
|
||||
task: parent.Subtask("notification"),
|
||||
providers: F.NewSet[Provider](),
|
||||
logCh: make(chan *LogMessage),
|
||||
retryCh: make(chan *RetryMessage, 100),
|
||||
retryTicker: time.NewTicker(retryInterval),
|
||||
}
|
||||
go dispatcher.start()
|
||||
return dispatcher
|
||||
@@ -53,10 +67,6 @@ func Notify(msg *LogMessage) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *LogFields) Add(name, value string) {
|
||||
*f = append(*f, LogField{Name: name, Value: value})
|
||||
}
|
||||
|
||||
func (disp *Dispatcher) RegisterProvider(cfg *NotificationConfig) {
|
||||
disp.providers.Add(cfg.Provider)
|
||||
}
|
||||
@@ -66,6 +76,7 @@ func (disp *Dispatcher) start() {
|
||||
dispatcher = nil
|
||||
disp.providers.Clear()
|
||||
close(disp.logCh)
|
||||
close(disp.retryCh)
|
||||
disp.task.Finish(nil)
|
||||
}()
|
||||
|
||||
@@ -78,26 +89,59 @@ func (disp *Dispatcher) start() {
|
||||
return
|
||||
}
|
||||
go disp.dispatch(msg)
|
||||
case <-disp.retryTicker.C:
|
||||
if len(disp.retryCh) == 0 {
|
||||
continue
|
||||
}
|
||||
var msgs []*RetryMessage
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
case msg := <-disp.retryCh:
|
||||
msgs = append(msgs, msg)
|
||||
default:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
if err := disp.retry(msgs); err != nil {
|
||||
gperr.LogError("notification retry failed", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (disp *Dispatcher) dispatch(msg *LogMessage) {
|
||||
if true {
|
||||
return
|
||||
}
|
||||
task := disp.task.Subtask("dispatcher")
|
||||
defer task.Finish("notif dispatched")
|
||||
|
||||
errs := gperr.NewBuilderWithConcurrency(dispatchErr)
|
||||
disp.providers.RangeAllParallel(func(p Provider) {
|
||||
if err := notifyProvider(task.Context(), p, msg); err != nil {
|
||||
errs.Add(gperr.PrependSubject(p.GetName(), err))
|
||||
if err := msg.notify(task.Context(), p); err != nil {
|
||||
disp.retryCh <- &RetryMessage{
|
||||
Message: msg,
|
||||
Trials: 0,
|
||||
Provider: p,
|
||||
}
|
||||
}
|
||||
})
|
||||
if errs.HasError() {
|
||||
gperr.LogError(errs.About(), errs.Error())
|
||||
} else {
|
||||
logging.Debug().Str("title", msg.Title).Msgf("dispatched notif")
|
||||
}
|
||||
}
|
||||
|
||||
func (disp *Dispatcher) retry(messages []*RetryMessage) error {
|
||||
task := disp.task.Subtask("retry")
|
||||
defer task.Finish("notif retried")
|
||||
|
||||
errs := gperr.NewBuilder("notification failure")
|
||||
for _, msg := range messages {
|
||||
err := msg.Message.notify(task.Context(), msg.Provider)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if msg.Trials > maxRetries[msg.Message.Level] {
|
||||
errs.Addf("notification provider %s failed after %d trials", msg.Provider.GetName(), msg.Trials)
|
||||
errs.Add(err)
|
||||
continue
|
||||
}
|
||||
msg.Trials++
|
||||
disp.retryCh <- msg
|
||||
}
|
||||
return errs.Error()
|
||||
}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
func formatMarkdown(extras LogFields) string {
|
||||
msg := bytes.NewBufferString("")
|
||||
for _, field := range extras {
|
||||
msg.WriteString("#### ")
|
||||
msg.WriteString(field.Name)
|
||||
msg.WriteRune('\n')
|
||||
msg.WriteString(field.Value)
|
||||
msg.WriteRune('\n')
|
||||
}
|
||||
return msg.String()
|
||||
}
|
||||
|
||||
func formatDiscord(extras LogFields) (string, error) {
|
||||
fields, err := json.Marshal(extras)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(fields), nil
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gotify/server/v2/model"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -24,8 +22,8 @@ func (client *GotifyClient) GetURL() string {
|
||||
return client.URL + gotifyMsgEndpoint
|
||||
}
|
||||
|
||||
// MakeBody implements Provider.
|
||||
func (client *GotifyClient) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
// MarshalMessage implements Provider.
|
||||
func (client *GotifyClient) MarshalMessage(logMsg *LogMessage) ([]byte, error) {
|
||||
var priority int
|
||||
|
||||
switch logMsg.Level {
|
||||
@@ -37,15 +35,23 @@ func (client *GotifyClient) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
priority = 8
|
||||
}
|
||||
|
||||
body, err := logMsg.Body.Format(client.Format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg := &GotifyMessage{
|
||||
Title: logMsg.Title,
|
||||
Message: formatMarkdown(logMsg.Extras),
|
||||
Message: string(body),
|
||||
Priority: &priority,
|
||||
Extras: map[string]interface{}{
|
||||
}
|
||||
|
||||
if client.Format == LogFormatMarkdown {
|
||||
msg.Extras = map[string]interface{}{
|
||||
"client::display": map[string]string{
|
||||
"contentType": "text/markdown",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(msg)
|
||||
@@ -53,15 +59,15 @@ func (client *GotifyClient) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytes.NewReader(data), nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// makeRespError implements Provider.
|
||||
func (client *GotifyClient) makeRespError(resp *http.Response) error {
|
||||
// fmtError implements Provider.
|
||||
func (client *GotifyClient) fmtError(respBody io.Reader) error {
|
||||
var errm model.Error
|
||||
err := json.NewDecoder(resp.Body).Decode(&errm)
|
||||
err := json.NewDecoder(respBody).Decode(&errm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s status %d, but failed to decode err response: %w", client.Name, resp.StatusCode, err)
|
||||
return fmt.Errorf("failed to decode err response: %w", err)
|
||||
}
|
||||
return fmt.Errorf("%s status %d %s: %s", client.Name, resp.StatusCode, errm.Error, errm.ErrorDescription)
|
||||
return fmt.Errorf("%s: %s", errm.Error, errm.ErrorDescription)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
@@ -13,18 +10,14 @@ import (
|
||||
// See https://docs.ntfy.sh/publish
|
||||
type Ntfy struct {
|
||||
ProviderBase
|
||||
Topic string `json:"topic"`
|
||||
Style NtfyStyle `json:"style"`
|
||||
Topic string `json:"topic"`
|
||||
}
|
||||
|
||||
type NtfyStyle string
|
||||
|
||||
const (
|
||||
NtfyStyleMarkdown NtfyStyle = "markdown"
|
||||
NtfyStylePlain NtfyStyle = "plain"
|
||||
)
|
||||
|
||||
// Validate implements the utils.CustomValidator interface.
|
||||
func (n *Ntfy) Validate() gperr.Error {
|
||||
if err := n.ProviderBase.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if n.URL == "" {
|
||||
return gperr.New("url is required")
|
||||
}
|
||||
@@ -34,16 +27,10 @@ func (n *Ntfy) Validate() gperr.Error {
|
||||
if n.Topic[0] == '/' {
|
||||
return gperr.New("topic should not start with a slash")
|
||||
}
|
||||
switch n.Style {
|
||||
case "":
|
||||
n.Style = NtfyStyleMarkdown
|
||||
case NtfyStyleMarkdown, NtfyStylePlain:
|
||||
default:
|
||||
return gperr.Errorf("invalid style, expecting %q or %q, got %q", NtfyStyleMarkdown, NtfyStylePlain, n.Style)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetURL implements Provider.
|
||||
func (n *Ntfy) GetURL() string {
|
||||
if n.URL[len(n.URL)-1] == '/' {
|
||||
return n.URL + n.Topic
|
||||
@@ -51,23 +38,22 @@ func (n *Ntfy) GetURL() string {
|
||||
return n.URL + "/" + n.Topic
|
||||
}
|
||||
|
||||
// GetMIMEType implements Provider.
|
||||
func (n *Ntfy) GetMIMEType() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetToken implements Provider.
|
||||
func (n *Ntfy) GetToken() string {
|
||||
return n.Token
|
||||
}
|
||||
|
||||
func (n *Ntfy) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
switch n.Style {
|
||||
case NtfyStyleMarkdown:
|
||||
return strings.NewReader(formatMarkdown(logMsg.Extras)), nil
|
||||
default:
|
||||
return &bytes.Buffer{}, nil
|
||||
}
|
||||
// MarshalMessage implements Provider.
|
||||
func (n *Ntfy) MarshalMessage(logMsg *LogMessage) ([]byte, error) {
|
||||
return logMsg.Body.Format(n.Format)
|
||||
}
|
||||
|
||||
// SetHeaders implements Provider.
|
||||
func (n *Ntfy) SetHeaders(logMsg *LogMessage, headers http.Header) {
|
||||
headers.Set("Title", logMsg.Title)
|
||||
|
||||
@@ -83,7 +69,7 @@ func (n *Ntfy) SetHeaders(logMsg *LogMessage, headers http.Header) {
|
||||
headers.Set("Priority", "min")
|
||||
}
|
||||
|
||||
if n.Style == NtfyStyleMarkdown {
|
||||
if n.Format == LogFormatMarkdown {
|
||||
headers.Set("Markdown", "yes")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package notif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/yusing/go-proxy/internal/gperr"
|
||||
gphttp "github.com/yusing/go-proxy/internal/net/gphttp"
|
||||
"github.com/yusing/go-proxy/internal/logging"
|
||||
"github.com/yusing/go-proxy/internal/utils"
|
||||
)
|
||||
|
||||
@@ -21,10 +23,10 @@ type (
|
||||
GetMethod() string
|
||||
GetMIMEType() string
|
||||
|
||||
MakeBody(logMsg *LogMessage) (io.Reader, error)
|
||||
MarshalMessage(logMsg *LogMessage) ([]byte, error)
|
||||
SetHeaders(logMsg *LogMessage, headers http.Header)
|
||||
|
||||
makeRespError(resp *http.Response) error
|
||||
fmtError(respBody io.Reader) error
|
||||
}
|
||||
ProviderCreateFunc func(map[string]any) (Provider, gperr.Error)
|
||||
ProviderConfig map[string]any
|
||||
@@ -36,10 +38,10 @@ const (
|
||||
ProviderWebhook = "webhook"
|
||||
)
|
||||
|
||||
func notifyProvider(ctx context.Context, provider Provider, msg *LogMessage) error {
|
||||
body, err := provider.MakeBody(msg)
|
||||
func (msg *LogMessage) notify(ctx context.Context, provider Provider) error {
|
||||
body, err := provider.MarshalMessage(msg)
|
||||
if err != nil {
|
||||
return gperr.PrependSubject(provider.GetName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
@@ -49,10 +51,10 @@ func notifyProvider(ctx context.Context, provider Provider, msg *LogMessage) err
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
provider.GetURL(),
|
||||
body,
|
||||
bytes.NewReader(body),
|
||||
)
|
||||
if err != nil {
|
||||
return gperr.PrependSubject(provider.GetName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", provider.GetMIMEType())
|
||||
@@ -63,13 +65,22 @@ func notifyProvider(ctx context.Context, provider Provider, msg *LogMessage) err
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return gperr.PrependSubject(provider.GetName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if !gphttp.IsSuccess(resp.StatusCode) {
|
||||
return provider.makeRespError(resp)
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
logging.Debug().
|
||||
Str("provider", provider.GetName()).
|
||||
Str("url", provider.GetURL()).
|
||||
Str("status", resp.Status).
|
||||
RawJSON("resp_body", body).
|
||||
Msg("notification sent")
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("http status %d: %w", resp.StatusCode, provider.fmtError(resp.Body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package notif
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -88,24 +87,21 @@ func (webhook *Webhook) GetMIMEType() string {
|
||||
return webhook.MIMEType
|
||||
}
|
||||
|
||||
// makeRespError implements Provider.
|
||||
func (webhook *Webhook) makeRespError(resp *http.Response) error {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s status %d, failed to read body: %w", webhook.Name, resp.StatusCode, err)
|
||||
// fmtError implements Provider.
|
||||
func (webhook *Webhook) fmtError(respBody io.Reader) error {
|
||||
body, err := io.ReadAll(respBody)
|
||||
if err != nil || len(body) == 0 {
|
||||
return ErrUnknownError
|
||||
}
|
||||
if len(body) > 0 {
|
||||
return fmt.Errorf("%s status %d: %s", webhook.Name, resp.StatusCode, body)
|
||||
}
|
||||
return fmt.Errorf("%s status %d", webhook.Name, resp.StatusCode)
|
||||
return rawError(body)
|
||||
}
|
||||
|
||||
func (webhook *Webhook) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
func (webhook *Webhook) MarshalMessage(logMsg *LogMessage) ([]byte, error) {
|
||||
title, err := json.Marshal(logMsg.Title)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fields, err := formatDiscord(logMsg.Extras)
|
||||
fields, err := logMsg.Body.Format(LogFormatRawJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -115,14 +111,14 @@ func (webhook *Webhook) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
} else {
|
||||
color = logMsg.Color.DecString()
|
||||
}
|
||||
message, err := json.Marshal(formatMarkdown(logMsg.Extras))
|
||||
message, err := logMsg.Body.Format(LogFormatMarkdown)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plTempl := strings.NewReplacer(
|
||||
"$title", string(title),
|
||||
"$message", string(message),
|
||||
"$fields", fields,
|
||||
"$fields", string(fields),
|
||||
"$color", color,
|
||||
)
|
||||
var pl string
|
||||
@@ -132,5 +128,5 @@ func (webhook *Webhook) MakeBody(logMsg *LogMessage) (io.Reader, error) {
|
||||
pl = webhook.Payload
|
||||
}
|
||||
pl = plTempl.Replace(pl)
|
||||
return strings.NewReader(pl), nil
|
||||
return []byte(pl), nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user