Compare commits

..

63 Commits

Author SHA1 Message Date
yusing
8b985654ef fix(proxmox): improve journalctl with log tailing fallback for non-systemd systems
- Format tail command with fallback retry logic
- Add /var/log/messages fallback when no services specified

Improves log viewing reliability on systems without systemd support.
2026-01-28 22:41:11 +08:00
Yuzerion
1543ffa19f Create CODE_OF_CONDUCT.md 2026-01-28 16:24:06 +08:00
yusing
730e3a2ab4 fix(docker): improve error handling for missing Docker agent
Replaced panic with an error return in the NewClient
2026-01-27 00:37:55 +08:00
yusing
ba4af8fe77 refactor(proxmox): add validation for node name and VMID in provider initialization 2026-01-27 00:02:25 +08:00
yusing
b788e6e338 refactor(logging): add non-blocking writer for high-volume logging
Replace synchronous log writing with zerolog's diode-based non-blocking
writer to prevent logging from blocking the main application during
log bursts. The diode writer buffers up to 1024 messages and logs a
warning when messages are dropped.

- Extract multi-writer logic into separate `multiWriter` function
- Wrap with `diode.NewWriter` for async buffering
- Update both `NewLogger` and `NewLoggerWithFixedLevel` to use diode
2026-01-27 00:01:48 +08:00
yusing
ef3aa146b5 refactor(config): simplify route provider loading with improved error handling
Streamlined the `loadRouteProviders()` function by:
- Replacing channel-based concurrency with a simpler sequential registration pattern after agent initialization
- Using `gperr.NewGroup` and `gperr.NewBuilder` for more idiomatic error handling
- Adding mutex protection for concurrent result building
- Removing the `storeProvider` helper method
2026-01-26 23:51:18 +08:00
yusing
e222e693d7 chore(config): make initialization timeout configurable via environment variable
Replaced hardcoded 10-second initialization timeout with a configurable `INIT_TIMEOUT` environment variable.
The new default is 1 minute, allowing operators to adjust startup behavior based on their infrastructure requirements.
2026-01-26 21:09:47 +08:00
yusing
277a485afe feat(proxmox): add session refresh loop to maintain Proxmox API session
Introduced a new session refresh mechanism in the Proxmox configuration to ensure the API session remains active. This includes:
- Added `SessionRefreshInterval` constant for configurable session refresh timing.
- Implemented `refreshSessionLoop` method to periodically refresh the session and handle errors with exponential backoff.

This enhancement improves the reliability of interactions with the Proxmox API by preventing session expiry.
2026-01-26 14:17:41 +08:00
yusing
211c466fc3 feat(proxmox): add tail endpoint and enhance journalctl with multi-service support
Add new `/proxmox/tail` API endpoint for streaming file contents from Proxmox
nodes and LXC containers via WebSocket. Extend journalctl endpoint to support
filtering by multiple services simultaneously.

Changes:
- Add `GET /proxmox/tail` endpoint supporting node-level and LXC container file tailing
- Change `service` parameter from string to array in journalctl endpoints
- Add input validation (`checkValidInput`) to prevent command injection
- Refactor command formatting with proper shell quoting

Security: All command inputs are validated for dangerous characters before
2026-01-25 22:21:35 +08:00
yusing
f96884c62b feat(proxmox): better node-level routes auto-discovery with pointer VMID
- Add BaseURL field to Client for node-level route configuration
- Change VMID from int to *int to support three states:
  - nil: auto-discover node or VM from hostname/IP/alias
  - 0: node-level route (direct to Proxmox node API)
  - >0: LXC/QEMU resource route with container control
- Change Service string to Services []string for multi-service support
- Implement proper node-level route handling: HTTPS scheme,
  hostname from node BaseURL, default port 8006
- Move initial UpdateResources call to Init before starting loop
- Move proxmox auto-discovery earlier in route validation

BREAKING: NodeConfig.VMID is now a pointer type; NodeConfig.Service
renamed to Services (backward compatible via alias)
2026-01-25 22:19:26 +08:00
yusing
8b4f10f15a feat(api): support query parameters for proxmox journalctl endpoint
Refactored the journalctl API to accept `node`, `vmid`, and `service` parameters as query strings in addition to path parameters. Added a new route `/proxmox/journalctl` that accepts all parameters via query string while maintaining backward compatibility with existing path-parameter routes.

- Changed `JournalctlRequest` struct binding from URI-only to query+URI
- Simplified Swagger documentation by consolidating multiple route definitions
- Existing path-parameter routes remain functional for backward compatibility
2026-01-25 19:55:11 +08:00
yusing
6c9b1fe45c refactor(swagger): rename DockerConfig and ProxmoxNodeConfig to IdlewatcherDockerConfig and IdlewatcherProxmoxNodeConfig 2026-01-25 19:28:01 +08:00
yusing
73cba8b508 refactor: improve error handling, validation and proper cleanup 2026-01-25 19:18:14 +08:00
yusing
0633cacb2a fix(proxmox): concurrent map write in UpdateResources 2026-01-25 18:01:22 +08:00
yusing
bf5b231e52 chore(docker): add go-proxmox module dependencies to Dockerfile 2026-01-25 17:23:30 +08:00
yusing
9cda7febb4 chore(deps): upgrade dependencies 2026-01-25 17:21:15 +08:00
yusing
b3d4255868 refactor(types): decouple Proxmox config from proxmox package
Decouple the types package from the internal/proxmox package by defining
a standalone ProxmoxConfig struct. This reduces circular dependencies
and allows the types package to define its own configuration structures
without importing the proxmox package.

The route validation logic now converts between types.ProxmoxConfig and
proxmox.NodeConfig where needed for internal operations.
2026-01-25 17:19:25 +08:00
yusing
9c2051840f chore(docs): enhance README with Proxmox integration details
Added sections for Proxmox integration, including automatic route binding, WebUI management, and API endpoints. Updated existing content to reflect LXC lifecycle control and real-time logging capabilities for both Docker and Proxmox environments.
2026-01-25 17:13:26 +08:00
yusing
1a3810db3a fix(scripts/update-wiki): add "internal/go-proxmox/" to skipSubmodules list 2026-01-25 13:51:10 +08:00
yusing
2335ef0fb1 feat(proxmox): add node-level stats endpoint with streaming support
Add new `/proxmox/stats/{node}` API endpoint for retrieving Proxmox node
statistics in JSON format. The endpoint returns kernel version, CPU
usage/model, memory usage, rootfs usage, uptime, and load averages.

The existing `/proxmox/stats/{node}/{vmid}` endpoint has been corrected `VMStats` to return`text/plain` instead of `application/json`.

Both endpoints support WebSocket streaming for real-time stats updates
with a 1-second poll interval.
2026-01-25 13:50:37 +08:00
yusing
fc73803bc1 refactor(proxmox): move NodeCommand to node_command.go 2026-01-25 13:14:56 +08:00
yusing
59953fed30 chore(docs): update package docs for proxmox and route/routes 2026-01-25 13:06:47 +08:00
yusing
57a2ca26db feat(proxmox): support node-level routes and journalctl access
This change enables Proxmox node-level operations without requiring a specific
LXC container VMID.

**Features added:**
- New `/proxmox/journalctl/{node}` API endpoint for streaming node journalctl
- Route configuration support for Proxmox nodes (VMID = 0)
- `ReverseLookupNode` function for node discovery by hostname/IP/alias
- `NodeJournalctl` method for executing journalctl on nodes

**Behavior changes:**
- VMID parameter in journalctl endpoints is now optional
- Routes targeting nodes (without specific containers) are now valid

**Bug fixes:**
- Fixed error message variable reference in route validation
2026-01-25 13:04:09 +08:00
yusing
09ddb925a3 refactor(proxmox): extract websocket command execution into reusable NodeCommand method
The LXCCommand method contained duplicate websocket handling logic for connecting to Proxmox's VNC terminal proxy. This refactoring extracts the common websocket connection, streaming, and cleanup logic into a new NodeCommand method on the Node type, allowing LXCCommand to simply format the pct command and delegate.

The go-proxmox submodule was also updated to access the NewNode constructor, which provides a cleaner API for creating node instances with the HTTP client.

- Moves ~100 lines of websocket handling from lxc_command.go to node.go
- Adds reusable NodeCommand method for executing commands via VNC websocket
- LXCCommand now simply calls NodeCommand with formatted command
- Maintains identical behavior and output streaming semantics
2026-01-25 12:43:26 +08:00
yusing
55e09c02b1 fix(proxmox): prevent goroutine leaks by closing idle HTTP connections
Added a function to close idle HTTP connections in the LXCCommand method. This addresses potential goroutine leaks caused by the go-proxmox library's TermWebSocket not closing underlying HTTP/2 connections. The websocket closer is now wrapped to ensure proper cleanup of transport connections when the command execution is finished.
2026-01-25 12:28:51 +08:00
yusing
9adeb3e3dd fix(swagger): remove /api/v1 prefix from Proxmox endpoints
Streamline Proxmox API route paths by removing incorrect /api/v1 prefix.

Changed endpoints:
- /api/v1/proxmox/journalctl/{node}/{vmid} → /proxmox/journalctl/{node}/{vmid}
- /api/v1/proxmox/journalctl/{node}/{vmid}/{service} → /proxmox/journalctl/{node}/{vmid}/{service}
- /api/v1/proxmox/lxc/:node/:vmid/restart → /proxmox/lxc/:node/:vmid/restart
- /api/v1/proxmox/lxc/:node/:vmid/start → /proxmox/lxc/:node/:vmid/start
- /api/v1/proxmox/lxc/:node/:vmid/stop → /proxmox/lxc/:node/:vmid/stop
- /api/v1/proxmox/stats/{node}/{vmid} → /proxmox/stats/{node}/{vmid}

Updated:
- Swagger annotations in 5 Go source files
- Generated swagger.json and swagger.yaml documentation
2026-01-25 12:22:39 +08:00
yusing
0f087edfd6 fix: add startup timeout guard to prevent indefinite hangs
Add a 10-second timeout mechanism during application initialization. If initialization
fails to complete within the timeout window, the application logs a fatal error and exits.
This prevents the proxy from becoming unresponsive during startup due to blocking operations
in parallel initialization tasks (DNS providers, icon cache, system info poller, middleware
loading, Docker client, API server, debug server, config watcher).

The timeout guard uses a background goroutine that listens for either a completion signal
(via closing the done channel) or the timeout expiration, providing a safety net for
long-running or blocked initialization scenarios.
2026-01-25 12:20:47 +08:00
yusing
c29798a48b feat(proxmox): add LXC container control endpoints
Add start, stop, and restart endpoints for LXC containers via the Proxmox API:
- POST /api/v1/proxmox/lxc/:node/:vmid/start
- POST /api/v1/proxmox/lxc/:node/:vmid/stop
- POST /api/v1/proxmox/lxc/:node/:vmid/restart
2026-01-25 12:13:08 +08:00
yusing
c202e26559 feat(proxmox): add journalctl endpoint without service; add limit parameter
Added new Proxmox journalctl endpoint `/journalctl/:node/:vmid` for viewing all
journalctl output without requiring a service name. Made the service parameter
optional across both endpoints.

Introduced configurable `limit` query parameter (1-1000, default 100) to both
proxmox journalctl and docker logs APIs, replacing hardcoded 100-line tail.

Added container status check in LXCCommand to prevent command execution on
stopped containers, returning a clear status message instead.

Refactored route validation to use pre-fetched IPs and improved References()
method for proxmox routes with better alias handling.
2026-01-25 12:03:50 +08:00
yusing
568d24d746 chore(docs): update proxmox package docs 2026-01-25 02:26:26 +08:00
yusing
cdd1353102 feat(proxmox): enhance VM resource tracking with auto-discovery and cached IPs
- Add VMResource wrapper type with cached IP addresses for efficient lookups
- Implement concurrent IP fetching during resource updates (limited concurrency)
- Add ReverseLookupResource for discovering VMs by IP, hostname, or alias
- Prioritize interfaces API over config for IP retrieval (offline container fallback)
- Enable routes to auto-discover Proxmox resources when no explicit config provided
- Fix configuration type from value to pointer slice for correct proxmox client retrievel
- Ensure Proxmox providers are initialized before route validation
2026-01-25 02:25:07 +08:00
yusing
b4646b665f feat(proxmox): add LXC container stats endpoint with streaming support
Implement a new API endpoint to retrieve real-time statistics for Proxmox
LXC containers, similar to `docker stats` functionality.

Changes:
- Add `GET /api/v1/proxmox/stats/:node/:vmid` endpoint with HTTP and WebSocket support
- Implement resource polling loop to cache VM metadata every 3 seconds
- Create `LXCStats()` method with streaming (websocket) and single-shot modes
- Format output as: STATUS|CPU%|MEM USAGE/LIMIT|MEM%|NET I/O|BLOCK I/O
- Add `GetResource()` method for efficient VM resource lookup by kind and ID
- Fix task creation bug using correct client reference

Example response:
  running|31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25GiB/36GiB
2026-01-25 01:37:13 +08:00
yusing
c191676565 fix(proxmox): enhance LXCCommand skip logic
Updated the LXCCommand function to skip until`\x1b[H` and `\x1b[?2004`, ensuring no garbage output.
2026-01-24 23:38:29 +08:00
yusing
9a96f3cc53 refactor(proxmox): consolidate NodeConfig and add service field
Centralize Proxmox node configuration by moving `ProxmoxConfig` from `internal/types/idlewatcher.go` to a new `NodeConfig` struct in `internal/proxmox/node.go`.

- Add `proxmox` field to route; allowing `proxy.app.proxmox` labels and corresponding route file config
- Added `service` optional field to NodeConfig for service identification
- Integrated Proxmox config directly into Route struct with proper validation
- Propagate Proxmox settings to Idlewatcher during route validation
- Updated swagger documentation to reflect schema changes
2026-01-24 21:33:47 +08:00
yusing
95a72930b5 feat(proxmox): add journalctl streaming API endpoint for LXC containers
Add new /api/v1/proxmox/journalctl/:node/:vmid/:service endpoint that
streams real-time journalctl output from Proxmox LXC containers via
WebSocket connection. This enables live monitoring of container services
from the GoDoxy WebUI.

Implementation includes:
- New proxmox API handler with path parameter validation
- WebSocket upgrade for streaming output
- LXCCommand helper for executing commands over Proxmox VNC websocket
- LXCJournalctl wrapper for convenient journalctl -u service -f invocation
- Updated API documentation with proxmox integration
2026-01-24 21:26:07 +08:00
yusing
71e5a507ba Requires authenticated Proxmox session with username/password configured.
refactor(proxmox): support for PAM authentication

- Added support for username and password authentication alongside existing token-based authentication.
- Updated validation rules to require either token or username/password for authentication.
- Modified the Init function to handle session creation based on the selected authentication method.
- Increased timeout duration for context in the Init function.
2026-01-24 21:25:52 +08:00
yusing
8f7ef5a015 feat(proxmox): add go-proxmox submodule for customized Proxmox integration
Add the go-proxmox library as a Git submodule to enable Proxmox
integration for container/VM management.

Submodule: https://github.com/yusing/go-proxmox
2026-01-24 21:25:45 +08:00
yusing
a824e4c8c2 refactor(metrics): remove unused fields from RouteAggregate and update related documentation
- Removed `display_name`, `is_docker`, and `is_excluded` fields from the `RouteAggregate` struct and corresponding Swagger documentation.
- Updated references in the README and code to reflect the removal of these fields, ensuring consistency across the codebase.
2026-01-24 15:55:46 +08:00
yusing
62fb690417 refactor(query): remove SearchRoute function and related documentation 2026-01-24 01:42:03 +08:00
yusing
9f036a61f8 refactor(routes): replace route retrieval with GetIncludeExcluded
- Updated route retrieval in the API and idle watcher to use GetIncludeExcluded, allowing for the inclusion of excluded routes.
- Simplified the route status aggregation logic by directly using GetIncludeExcluded for display name resolution.
- Removed redundant code that separately handled excluded routes, streamlining the route management process.
2026-01-24 01:40:24 +08:00
yusing
cdd60d99cd feat(api): add endpoint to retrieve container stats
- Introduced a new GET endpoint `/docker/stats/:id` to fetch statistics for a specified container by its ID or route alias.
- Implemented the `Stats` function in the `dockerapi` package to handle the request and return container stats in both JSON and WebSocket formats.
- Added error handling for invalid requests and container not found scenarios.
2026-01-24 00:12:34 +08:00
yusing
e718cd4c4a feat(ci): separate cache for different tags; utilize gha cache 2026-01-22 16:24:11 +08:00
yusing
8ce821adb9 feat(ci): pass BRANCH to Makefile for correct build tag 2026-01-22 16:24:08 +08:00
yusing
92598e05a2 feat(ci): enhance Docker image workflow to compute version based on Git tags and branches
- Added a step to checkout the repository for accurate tag resolution.
- Implemented logic to determine the build version based on the Git reference type, supporting tags and branch names.
- Updated the Docker build arguments to use the computed version for better versioning in images.
2026-01-22 16:23:54 +08:00
yusing
1c0cd1ff03 fix(Makefile): no longer add sonic tag to compat build 2026-01-22 16:08:09 +08:00
yusing
630629a3fd refactor(watcher): simplify config file watcher initialization using sync.Once 2026-01-22 15:27:01 +08:00
yusing
a1f7375e7b refactor(memlogger): remove HTTP/WebSocket handler and simplify buffer management
Removes the embedded HTTP handler and WebSocket streaming capability from the
in-memory logger, leaving only the core io.Writer interface and event subscription
via Events(). Simplifies buffer management by eliminating position-based tracking
and using slices.Clone() for safe message passing to listeners.

- Removes HandlerFunc(), ServeHTTP(), wsInitial(), wsStreamLog() methods
- Removes logEntryRange struct and connChans map (no longer needed)
- Refactors buffer field from embedded to explicit buf with named mutexes
- Adds buffered channel (64) for event listeners to prevent blocking
- Improves concurrency with double-checked locking in truncation logic
2026-01-22 15:25:50 +08:00
yusing
dba6a4fedf fix(config): update JSON tags in ACL and access log configurations to omit empty values
Modified JSON tags in the Notify struct of ACL config and the ConfigBase and Retention structs in access log config to include 'omitempty'
2026-01-22 00:18:58 +08:00
yusing
6b752059da fix(loadbalancer): change pool type from value to pointer 2026-01-21 23:54:23 +08:00
yusing
262d386a97 fix(logging): update JSON tags in access log configuration to omit zero values
Modified JSON tags in the Filters and Fields structs to include 'omitzero', ensuring that zero values are not included in the serialized output.
2026-01-21 23:53:36 +08:00
yusing
8df7eb2fe5 fix(logging): correct variable shadowing in NewLoggerWithFixedLevel causing incorrect log level being assigned 2026-01-21 23:52:37 +08:00
yusing
b0dc0e714d feat(pool): introduce tombstone-based deletion with soft-delete mechanism
Refactored the pool implementation to use a tombstone-based deletion strategy
instead of immediate removal. This allows correct logging "reload"
instead of "removed" + "added" when an item is quickly deleted
and re-added within a short time window.

Changes:
- Items are now marked as tombstones upon deletion and retained for 1 second
- Added `PurgeExpiredTombs()` method for cleanup of expired tombstones
- Updated `Get`, `Iter`, and `Slice` to skip tombstoned entries
- Updated `Del` and `DelKey` to cleanup tombstones when exceeding threshold
- `AddIfNotExists` can now "reload" recently deleted items within the TTL
- Added tomb counter for tracking active tombstones and triggering purge
2026-01-21 23:44:56 +08:00
yusing
01b8554c0a fix(acl): correctly marshal matchers instead of plain '{}'
- Introduced a raw field in the Matcher struct to store the original string representation.
- Implemented MarshalText method for Matcher
2026-01-21 22:53:00 +08:00
yusing
5e32627363 chore(deps): upgrade dependencies 2026-01-21 22:39:51 +08:00
yusing
f5047f4dfa feat(api): enhance API handler to support unauthenticated local access
- Updated NewHandler function to accept a requireAuth parameter for authentication control.
- Introduced a new local API server that allows unauthenticated access when LocalAPIHTTPAddr is set.
- Adjusted server startup logic to handle both authenticated and unauthenticated API routes.
2026-01-21 22:36:22 +08:00
yusing
92f8590edd fix(config): no longer show "http_route: added <route>" on startup 2026-01-21 14:33:55 +08:00
yusing
17f87d6ece fix(websocket): log errors only for non-normal closure codes 2026-01-19 15:03:00 +08:00
yusing
92bf8b196f refactor(accesslog): restructure access logging; enhance console output format
Major refactoring of the access logging infrastructure to improve code organization and add proper console/stdout logging support.

- Renamed `Writer` interface to `File` and consolidated with `SupportRotate`
- Renamed `Log(req, res)` to `LogRequest(req, res)` for clarity
- Added new `ConsoleLogger` with zerolog console writer for formatted stdout output
- Moved type definitions to new `types.go` file
- Changed buffer handling from `[]byte` returns to `*bytes.Buffer` parameters
- Renamed internal files for clarity (`access_logger.go` → `file_access_logger.go`)
- Fixed fileserver access logging timing: moved logging after handler execution with defer
- Correct response handling in Fileserver
- Remove deprecated field `buffer_size`
- Simplify and removed unnecessary code

All callers have been updated to use the new APIs.
2026-01-19 15:00:37 +08:00
yusing
077e0bc03b perf(accesslog): use buffer pool in BackScanner to reduce allocations
Replace per-scan byte slice allocations with a sized buffer pool,
significantly reducing memory pressure during log file scanning.

- Add Release() method to return buffers to pool (callers must invoke)
- Remove Reset() method - create new scanner instead for simpler lifecycle
- Refactor chunk prepending to reuse pooled buffers instead of append

Benchmark results show allocations dropped from ~26k to 1 per scan
for small chunk sizes, with better throughput.

BREAKING CHANGE: Reset() removed; callers must call Release() and
create a new BackScanner instance instead.
2026-01-19 14:32:42 +08:00
yusing
1b55573cc4 fix(config): rename initAccessLogger to initACL 2026-01-18 11:32:49 +08:00
yusing
243a9dc388 fix(acl): ensure acl behind proxy protocol for TCP; fix acl not working for TCP/UDP by replacing ActiveConfig with context value 2026-01-18 11:23:40 +08:00
yusing
cfe4587ec4 fix(acl): deny rules now have higher precedence than allow rules 2026-01-18 10:50:46 +08:00
FrozenFrog
f01cfd8459 feat(middleware): implement CrowdSec WAF bouncer middleware (#196)
* crowdsec middleware
2026-01-18 01:16:35 +08:00
91 changed files with 5565 additions and 1110 deletions

View File

@@ -45,11 +45,37 @@ jobs:
attestations: write
steps:
- name: Checkout (for tag resolution)
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Compute VERSION for build
run: |
if [ "${GITHUB_REF_TYPE}" = "tag" ]; then
version="${GITHUB_REF_NAME}"
cache_variant="release"
elif [ "${GITHUB_REF_NAME}" = "main" ] || [ "${GITHUB_REF_NAME}" = "compat" ]; then
git fetch --tags origin main
version="$(git describe --tags --abbrev=0 origin/main 2>/dev/null || git describe --tags --abbrev=0 main 2>/dev/null || echo v0.0.0)"
cache_variant="${GITHUB_REF_NAME}"
else
version="v$(date -u +'%Y%m%d-%H%M')"
cache_variant="nightly"
fi
echo "VERSION_FOR_BUILD=$version" >> $GITHUB_ENV
echo "CACHE_VARIANT=$cache_variant" >> $GITHUB_ENV
if [ "${GITHUB_REF_TYPE}" = "branch" ]; then
echo "BRANCH_FOR_BUILD=${GITHUB_REF_NAME}" >> $GITHUB_ENV
else
echo "BRANCH_FOR_BUILD=" >> $GITHUB_ENV
fi
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
@@ -80,14 +106,15 @@ jobs:
file: ${{ env.DOCKERFILE }}
outputs: type=image,name=${{ env.REGISTRY }}/${{ inputs.image_name }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=${{ env.REGISTRY }}/${{ inputs.image_name }}:buildcache-${{ env.PLATFORM_PAIR }}
# type=gha,scope=${{ github.workflow }}-${{ env.PLATFORM_PAIR }}
type=gha,scope=${{ github.workflow }}-${{ env.CACHE_VARIANT }}-${{ env.PLATFORM_PAIR }}
type=registry,ref=${{ env.REGISTRY }}/${{ inputs.image_name }}:buildcache-${{ env.CACHE_VARIANT }}-${{ env.PLATFORM_PAIR }}
cache-to: |
type=registry,ref=${{ env.REGISTRY }}/${{ inputs.image_name }}:buildcache-${{ env.PLATFORM_PAIR }},mode=max
# type=gha,scope=${{ github.workflow }}-${{ env.PLATFORM_PAIR }},mode=max
type=gha,scope=${{ github.workflow }}-${{ env.CACHE_VARIANT }}-${{ env.PLATFORM_PAIR }},mode=max
type=registry,ref=${{ env.REGISTRY }}/${{ inputs.image_name }}:buildcache-${{ env.CACHE_VARIANT }}-${{ env.PLATFORM_PAIR }},mode=max
build-args: |
VERSION=${{ github.ref_name }}
VERSION=${{ env.VERSION_FOR_BUILD }}
MAKE_ARGS=${{ env.MAKE_ARGS }}
BRANCH=${{ env.BRANCH_FOR_BUILD }}
- name: Generate artifact attestation
uses: actions/attest-build-provenance@v1

6
.gitignore vendored
View File

@@ -40,4 +40,8 @@ tsconfig.tsbuildinfo
!agent.compose.yml
!agent/pkg/**
dev-data/
dev-data/
RELEASE_NOTES.md
CLAUDE.md
.kilocode/**

3
.gitmodules vendored
View File

@@ -7,3 +7,6 @@
[submodule "goutils"]
path = goutils
url = https://github.com/yusing/goutils.git
[submodule "internal/go-proxmox"]
path = internal/go-proxmox
url = https://github.com/yusing/go-proxmox

128
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
yusing@6uo.me.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -14,6 +14,7 @@ WORKDIR /src
COPY goutils/go.mod goutils/go.sum ./goutils/
COPY internal/go-oidc/go.mod internal/go-oidc/go.sum ./internal/go-oidc/
COPY internal/gopsutil/go.mod internal/gopsutil/go.sum ./internal/gopsutil/
COPY internal/go-proxmox/go.mod internal/go-proxmox/go.sum ./internal/go-proxmox/
COPY go.mod go.sum ./
# remove godoxy stuff from go.mod first
@@ -43,6 +44,9 @@ ENV VERSION=${VERSION}
ARG MAKE_ARGS
ENV MAKE_ARGS=${MAKE_ARGS}
ARG BRANCH
ENV BRANCH=${BRANCH}
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/root/go/pkg/mod \
make ${MAKE_ARGS} docker=1 build

View File

@@ -1,5 +1,6 @@
shell := /bin/sh
export VERSION ?= $(shell git describe --tags --abbrev=0)
export BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
export BUILD_DATE ?= $(shell date -u +'%Y%m%d-%H%M')
export GOOS = linux
@@ -8,7 +9,12 @@ REPO_URL ?= https://github.com/yusing/godoxy
WEBUI_DIR ?= ../godoxy-webui
DOCS_DIR ?= ${WEBUI_DIR}/wiki
GO_TAGS = sonic
ifneq ($(BRANCH), compat)
GO_TAGS = sonic
else
GO_TAGS =
endif
LDFLAGS = -X github.com/yusing/goutils/version.version=${VERSION} -checklinkname=0
ifeq ($(agent), 1)
@@ -137,9 +143,6 @@ benchmark:
dev-run: build
cd dev-data && ${BIN_PATH}
mtrace:
${BIN_PATH} debug-ls-mtrace > mtrace.json
rapid-crash:
docker run --restart=always --name test_crash -p 80 debian:bookworm-slim /bin/cat &&\
sleep 3 &&\
@@ -156,7 +159,7 @@ cloc:
scc -w -i go --not-match '_test.go$$'
push-github:
git push origin $(shell git rev-parse --abbrev-ref HEAD)
git push origin $(BRANCH)
gen-swagger:
# go install github.com/swaggo/swag/cmd/swag@latest

View File

@@ -33,6 +33,10 @@ Have questions? Ask [ChatGPT](https://chatgpt.com/g/g-6825390374b481919ad482f2e4
- [Prerequisites](#prerequisites)
- [Setup](#setup)
- [How does GoDoxy work](#how-does-godoxy-work)
- [Proxmox Integration](#proxmox-integration)
- [Automatic Route Binding](#automatic-route-binding)
- [WebUI Management](#webui-management)
- [API Endpoints](#api-endpoints)
- [Update / Uninstall system agent](#update--uninstall-system-agent)
- [Screenshots](#screenshots)
- [idlesleeper](#idlesleeper)
@@ -67,7 +71,11 @@ Have questions? Ask [ChatGPT](https://chatgpt.com/g/g-6825390374b481919ad482f2e4
- Podman
- **Idle-sleep**: stop and wake containers based on traffic _(see [screenshots](#idlesleeper))_
- Docker containers
- Proxmox LXCs
- Proxmox LXC containers
- **Proxmox Integration**
- **Automatic route binding**: Routes automatically bind to Proxmox nodes or LXC containers by matching hostname, IP, or alias
- **LXC lifecycle control**: Start, stop, restart containers directly from WebUI
- **Real-time logs**: Stream journalctl logs from nodes and LXC containers via WebSocket
- **Traffic Management**
- HTTP reserve proxy
- TCP/UDP port forwarding
@@ -80,7 +88,12 @@ Have questions? Ask [ChatGPT](https://chatgpt.com/g/g-6825390374b481919ad482f2e4
- App Dashboard
- Config Editor
- Uptime and System Metrics
- Docker Logs Viewer
- **Docker**
- Container lifecycle management (start, stop, restart)
- Real-time container logs via WebSocket
- **Proxmox**
- LXC container lifecycle management (start, stop, restart)
- Real-time node and LXC journalctl logs via WebSocket
- **Cross-Platform support**
- Supports **linux/amd64** and **linux/arm64**
- **Efficient and Performant**
@@ -128,6 +141,50 @@ Configure Wildcard DNS Record(s) to point to machine running `GoDoxy`, e.g.
>
> For example, with the label `proxy.aliases: qbt` you can access your app via `qbt.domain.com`.
## Proxmox Integration
GoDoxy can automatically discover and manage Proxmox nodes and LXC containers through configured providers.
### Automatic Route Binding
Routes are automatically linked to Proxmox resources through reverse lookup:
1. **Node-level routes** (VMID = 0): When hostname, IP, or alias matches a Proxmox node name or IP
2. **Container-level routes** (VMID > 0): When hostname, IP, or alias matches an LXC container
This enables seamless proxy configuration without manual binding:
```yaml
routes:
pve-node-01:
host: pve-node-01.internal
port: 8006
# Automatically links to Proxmox node pve-node-01
```
### WebUI Management
From the WebUI, you can:
- **LXC Lifecycle Control**: Start, stop, restart containers
- **Node Logs**: Stream real-time journalctl output from nodes
- **LXC Logs**: Stream real-time journalctl output from containers
### API Endpoints
```http
# Node journalctl (WebSocket)
GET /api/v1/proxmox/journalctl/:node
# LXC journalctl (WebSocket)
GET /api/v1/proxmox/journalctl/:node/:vmid
# LXC lifecycle control
POST /api/v1/proxmox/lxc/:node/:vmid/start
POST /api/v1/proxmox/lxc/:node/:vmid/stop
POST /api/v1/proxmox/lxc/:node/:vmid/restart
```
## Update / Uninstall system agent
Update:

View File

@@ -34,6 +34,10 @@
- [安裝](#安裝)
- [手動安裝](#手動安裝)
- [資料夾結構](#資料夾結構)
- [Proxmox 整合](#proxmox-整合)
- [自動路由綁定](#自動路由綁定)
- [WebUI 管理](#webui-管理)
- [API 端點](#api-端點)
- [更新 / 卸載系統代理 (System Agent)](#更新--卸載系統代理-system-agent)
- [截圖](#截圖)
- [閒置休眠](#閒置休眠)
@@ -67,6 +71,10 @@
- **閒置休眠**:根據流量停止和喚醒容器 _(參見[截圖](#閒置休眠))_
- Docker 容器
- Proxmox LXC 容器
- **Proxmox 整合**
- **自動路由綁定**透過比對主機名稱、IP 或別名自動將路由綁定至 Proxmox 節點或 LXC 容器
- **LXC 生命週期控制**:可直接從 WebUI 啟動、停止、重新啟動容器
- **即時日誌**:透過 WebSocket 串流節點和 LXC 容器的 journalctl 日誌
- **流量管理**
- HTTP 反向代理
- TCP/UDP 連接埠轉送
@@ -79,7 +87,12 @@
- 應用程式一覽
- 設定編輯器
- 執行時間與系統指標
- Docker 日誌檢視器
- **Docker**
- 容器生命週期管理 (啟動、停止、重新啟動)
- 透過 WebSocket 即時串流容器日誌
- **Proxmox**
- LXC 容器生命週期管理 (啟動、停止、重新啟動)
- 透過 WebSocket 即時串流節點和 LXC 容器 journalctl 日誌
- **跨平台支援**
- 支援 **linux/amd64****linux/arm64**
- **高效能**
@@ -144,6 +157,50 @@
└── .env
```
## Proxmox 整合
GoDoxy 可透過配置的提供者自動探索和管理 Proxmox 節點和 LXC 容器。
### 自動路由綁定
路由透過反向查詢自動連結至 Proxmox 資源:
1. **節點級路由** (VMID = 0)當主機名稱、IP 或別名符合 Proxmox 節點名稱或 IP 時
2. **容器級路由** (VMID > 0)當主機名稱、IP 或別名符合 LXC 容器時
這可實現無需手動綁定的無縫代理配置:
```yaml
routes:
pve-node-01:
host: pve-node-01.internal
port: 8006
# 自動連結至 Proxmox 節點 pve-node-01
```
### WebUI 管理
您可以從 WebUI
- **LXC 生命週期控制**:啟動、停止、重新啟動容器
- **節點日誌**:串流來自節點的即時 journalctl 輸出
- **LXC 日誌**:串流來自容器的即時 journalctl 輸出
### API 端點
```http
# 節點 journalctl (WebSocket)
GET /api/v1/proxmox/journalctl/:node
# LXC journalctl (WebSocket)
GET /api/v1/proxmox/journalctl/:node/:vmid
# LXC 生命週期控制
POST /api/v1/proxmox/lxc/:node/:vmid/start
POST /api/v1/proxmox/lxc/:node/:vmid/stop
POST /api/v1/proxmox/lxc/:node/:vmid/restart
```
## 更新 / 卸載系統代理 (System Agent)
更新:

View File

@@ -15,14 +15,14 @@ replace (
exclude github.com/containerd/nerdctl/mod/tigron v0.0.0
require (
github.com/bytedance/sonic v1.14.2
github.com/bytedance/sonic v1.15.0
github.com/gin-gonic/gin v1.11.0
github.com/gorilla/websocket v1.5.3
github.com/pion/dtls/v3 v3.0.10
github.com/pion/transport/v3 v3.1.1
github.com/rs/zerolog v1.34.0
github.com/stretchr/testify v1.11.1
github.com/yusing/godoxy v0.24.1
github.com/yusing/godoxy v0.25.0
github.com/yusing/godoxy/socketproxy v0.0.0-00010101000000-000000000000
github.com/yusing/goutils v0.7.0
)
@@ -31,14 +31,14 @@ require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic/loader v0.4.0 // indirect
github.com/bytedance/sonic/loader v0.5.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v29.1.4+incompatible // indirect
github.com/docker/cli v29.1.5+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/ebitengine/purego v0.9.1 // indirect
@@ -73,7 +73,7 @@ require (
github.com/pion/transport/v4 v4.0.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/puzpuzpuz/xsync/v4 v4.3.0 // indirect
github.com/puzpuzpuz/xsync/v4 v4.4.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.12 // indirect
@@ -86,8 +86,8 @@ require (
github.com/valyala/fasthttp v1.69.0 // indirect
github.com/yusing/ds v0.4.1 // indirect
github.com/yusing/gointernals v0.1.16 // indirect
github.com/yusing/goutils/http/reverseproxy v0.0.0-20260116021320-b12ef77f3743 // indirect
github.com/yusing/goutils/http/websocket v0.0.0-20260116021320-b12ef77f3743 // indirect
github.com/yusing/goutils/http/reverseproxy v0.0.0-20260125040745-bcc4b498f878 // indirect
github.com/yusing/goutils/http/websocket v0.0.0-20260125040745-bcc4b498f878 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect

View File

@@ -10,10 +10,10 @@ github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY=
github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE=
github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980=
github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o=
github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE=
github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -37,8 +37,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/docker/cli v29.1.4+incompatible h1:AI8fwZhqsAsrqZnVv9h6lbexeW/LzNTasf6A4vcNN8M=
github.com/docker/cli v29.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt8vk2AX8ao=
github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -124,8 +124,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg=
@@ -153,16 +153,16 @@ github.com/pion/transport/v3 v3.1.1 h1:Tr684+fnnKlhPceU+ICdrw6KKkTms+5qHMgw6bIkY
github.com/pion/transport/v3 v3.1.1/go.mod h1:+c2eewC5WJQHiAA46fkMMzoYZSuGzA/7E2FPrOYHctQ=
github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o=
github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM=
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pires/go-proxyproto v0.9.1 h1:wTPjpyk41pJm1Im9BqHtPLuhxfjxL+qNfSikx9ux0WY=
github.com/pires/go-proxyproto v0.9.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=

View File

@@ -3,6 +3,7 @@ package main
import (
"os"
"sync"
"time"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/api"
@@ -32,6 +33,16 @@ func parallel(fns ...func()) {
}
func main() {
done := make(chan struct{}, 1)
go func() {
select {
case <-done:
return
case <-time.After(common.InitTimeout):
log.Fatal().Msgf("timeout waiting for initialization to complete, exiting...")
}
}()
initProfiling()
logging.InitLogger(os.Stderr, memlogger.GetMemLogger())
@@ -69,14 +80,25 @@ func main() {
server.StartServer(task.RootTask("api_server", false), server.Options{
Name: "api",
HTTPAddr: common.APIHTTPAddr,
Handler: api.NewHandler(),
Handler: api.NewHandler(true),
})
// Local API Handler is used for unauthenticated access.
if common.LocalAPIHTTPAddr != "" {
server.StartServer(task.RootTask("local_api_server", false), server.Options{
Name: "local_api",
HTTPAddr: common.LocalAPIHTTPAddr,
Handler: api.NewHandler(false),
})
}
listenDebugServer()
uptime.Poller.Start()
config.WatchChanges()
close(done)
task.WaitExit(config.Value().TimeoutShutdown)
}

33
go.mod
View File

@@ -4,6 +4,7 @@ go 1.25.6
replace (
github.com/coreos/go-oidc/v3 => ./internal/go-oidc
github.com/luthermonson/go-proxmox => ./internal/go-proxmox
github.com/shirou/gopsutil/v4 => ./internal/gopsutil
github.com/yusing/godoxy/agent => ./agent
github.com/yusing/godoxy/internal/dnsproviders => ./internal/dnsproviders
@@ -24,8 +25,8 @@ require (
github.com/gorilla/websocket v1.5.3 // websocket for API and agent
github.com/gotify/server/v2 v2.8.0 // reference the Message struct for json response
github.com/lithammer/fuzzysearch v1.1.8 // fuzzy search for searching icons and filtering metrics
github.com/pires/go-proxyproto v0.8.1 // proxy protocol support
github.com/puzpuzpuz/xsync/v4 v4.3.0 // lock free map for concurrent operations
github.com/pires/go-proxyproto v0.9.1 // proxy protocol support
github.com/puzpuzpuz/xsync/v4 v4.4.0 // lock free map for concurrent operations
github.com/rs/zerolog v1.34.0 // logging
github.com/vincent-petithory/dataurl v1.0.0 // data url for fav icon
golang.org/x/crypto v0.47.0 // encrypting password with bcrypt
@@ -37,8 +38,8 @@ require (
require (
github.com/bytedance/gopkg v0.1.3 // xxhash64 for fast hash
github.com/bytedance/sonic v1.14.2 // fast json parsing
github.com/docker/cli v29.1.4+incompatible // needs docker/cli/cli/connhelper connection helper for docker client
github.com/bytedance/sonic v1.15.0 // fast json parsing
github.com/docker/cli v29.1.5+incompatible // needs docker/cli/cli/connhelper connection helper for docker client
github.com/goccy/go-yaml v1.19.2 // yaml parsing for different config files
github.com/golang-jwt/jwt/v5 v5.3.0 // jwt authentication
github.com/luthermonson/go-proxmox v0.3.2 // proxmox API client
@@ -51,17 +52,17 @@ require (
github.com/stretchr/testify v1.11.1 // testing framework
github.com/valyala/fasthttp v1.69.0 // fast http for health check
github.com/yusing/ds v0.4.1 // data structures and algorithms
github.com/yusing/godoxy/agent v0.0.0-20260116020954-edcde00dcc3a
github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260116020954-edcde00dcc3a
github.com/yusing/godoxy/agent v0.0.0-20260125091326-9c2051840fd9
github.com/yusing/godoxy/internal/dnsproviders v0.0.0-20260124133347-9a96f3cc539e
github.com/yusing/gointernals v0.1.16
github.com/yusing/goutils v0.7.0
github.com/yusing/goutils/http/reverseproxy v0.0.0-20260116021320-b12ef77f3743
github.com/yusing/goutils/http/websocket v0.0.0-20260116021320-b12ef77f3743
github.com/yusing/goutils/server v0.0.0-20260116021320-b12ef77f3743
github.com/yusing/goutils/http/reverseproxy v0.0.0-20260125040745-bcc4b498f878
github.com/yusing/goutils/http/websocket v0.0.0-20260125040745-bcc4b498f878
github.com/yusing/goutils/server v0.0.0-20260125040745-bcc4b498f878
)
require (
cloud.google.com/go/auth v0.18.0 // indirect
cloud.google.com/go/auth v0.18.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect
@@ -103,7 +104,7 @@ require (
github.com/magefile/mage v1.15.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.70 // indirect
github.com/miekg/dns v1.1.72 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -135,8 +136,8 @@ require (
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.41.0 // indirect
google.golang.org/api v0.260.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect
google.golang.org/api v0.262.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/ini.v1 v1.67.1 // indirect
@@ -148,7 +149,7 @@ require (
github.com/akamai/AkamaiOPEN-edgegrid-golang/v11 v11.1.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/bytedance/sonic/loader v0.4.0 // indirect
github.com/bytedance/sonic/loader v0.5.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
@@ -169,8 +170,8 @@ require (
github.com/linode/linodego v1.64.0 // indirect
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
github.com/nrdcg/goinwx v0.12.0 // indirect
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0 // indirect
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0 // indirect
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1 // indirect
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pion/dtls/v3 v3.0.10 // indirect
github.com/pion/logging v0.2.4 // indirect

46
go.sum
View File

@@ -1,5 +1,5 @@
cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0=
cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo=
cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=
cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
@@ -51,10 +51,10 @@ github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY=
github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE=
github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980=
github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o=
github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE=
github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -76,8 +76,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/docker/cli v29.1.4+incompatible h1:AI8fwZhqsAsrqZnVv9h6lbexeW/LzNTasf6A4vcNN8M=
github.com/docker/cli v29.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt8vk2AX8ao=
github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -197,8 +197,6 @@ github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
github.com/luthermonson/go-proxmox v0.3.2 h1:/zUg6FCl9cAABx0xU3OIgtDtClY0gVXxOCsrceDNylc=
github.com/luthermonson/go-proxmox v0.3.2/go.mod h1:oyFgg2WwTEIF0rP6ppjiixOHa5ebK1p8OaRiFhvICBQ=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -210,8 +208,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/maxatome/go-testdeep v1.14.0 h1:rRlLv1+kI8eOI3OaBXZwb3O7xY3exRzdW5QyX48g9wI=
github.com/maxatome/go-testdeep v1.14.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -229,10 +227,10 @@ github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0
github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg=
github.com/nrdcg/goinwx v0.12.0 h1:ujdUqDBnaRSFwzVnImvPHYw3w3m9XgmGImNUw1GyMb4=
github.com/nrdcg/goinwx v0.12.0/go.mod h1:IrVKd3ZDbFiMjdPgML4CSxZAY9wOoqLvH44zv3NodJ0=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0 h1:4MRzV6spwPHKct+4/ETqkEtr39Hq+0KvxhsgqbgQ2Bo=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0 h1:RxraLVYX3eMUfQ1pDtJVvykEFGheky2YsrUt2HHRDcw=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0/go.mod h1:JLMEKMX8IYPZ1TUSVHAVAbtnNSfP/I8OZQkAnfEMA0I=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1 h1:+fx2mbWeR8XX/vidwpRMepJMtRIYQP44Iezm2oeObVM=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1 h1:GDhBiaIAm/QXLzHJ0ASDdY/6R/9w60+gk8lY5rgfxEQ=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1/go.mod h1:EHScJdbM0gg5Is7e3C0ceRYAFMMsfP4Vf8sBRoxoTgk=
github.com/nrdcg/porkbun v0.4.0 h1:rWweKlwo1PToQ3H+tEO9gPRW0wzzgmI/Ob3n2Guticw=
github.com/nrdcg/porkbun v0.4.0/go.mod h1:/QMskrHEIM0IhC/wY7iTCUgINsxdT2WcOphktJ9+Q54=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -253,8 +251,8 @@ github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o=
github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM=
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pires/go-proxyproto v0.9.1 h1:wTPjpyk41pJm1Im9BqHtPLuhxfjxL+qNfSikx9ux0WY=
github.com/pires/go-proxyproto v0.9.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -267,8 +265,8 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
@@ -449,14 +447,14 @@ golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.260.0 h1:XbNi5E6bOVEj/uLXQRlt6TKuEzMD7zvW/6tNwltE4P4=
google.golang.org/api v0.260.0/go.mod h1:Shj1j0Phr/9sloYrKomICzdYgsSDImpTxME8rGLaZ/o=
google.golang.org/api v0.262.0 h1:4B+3u8He2GwyN8St3Jhnd3XRHlIvc//sBmgHSp78oNY=
google.golang.org/api v0.262.0/go.mod h1:jNwmH8BgUBJ/VrUG6/lIl9YiildyLd09r9ZLHiQ6cGI=
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=

Submodule goutils updated: 900faa77c8...272bc53439

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"math"
"net"
"sync/atomic"
"time"
"github.com/puzpuzpuz/xsync/v4"
@@ -27,9 +26,9 @@ type Config struct {
Log *accesslog.ACLLoggerConfig `json:"log"`
Notify struct {
To []string `json:"to"` // list of notification providers
Interval time.Duration `json:"interval"` // interval between notifications
IncludeAllowed *bool `json:"include_allowed"` // default: false
To []string `json:"to,omitempty"` // list of notification providers
Interval time.Duration `json:"interval,omitempty"` // interval between notifications
IncludeAllowed *bool `json:"include_allowed,omitzero"` // default: false
} `json:"notify"`
config
@@ -75,8 +74,7 @@ type ipLog struct {
allowed bool
}
// could be nil
var ActiveConfig atomic.Pointer[Config]
type ContextKey struct{}
const cacheTTL = 1 * time.Minute
@@ -108,7 +106,7 @@ func (c *Config) Validate() gperr.Error {
c.allowLocal = true
}
if c.Notify.Interval < 0 {
if c.Notify.Interval <= 0 {
c.Notify.Interval = defaultNotifyInterval
}
@@ -292,16 +290,16 @@ func (c *Config) IPAllowed(ip net.IP) bool {
}
ipAndStr := &maxmind.IPInfo{IP: ip, Str: ipStr}
if c.Allow.Match(ipAndStr) {
c.logAndNotify(ipAndStr, true)
c.cacheRecord(ipAndStr, true)
return true
}
if c.Deny.Match(ipAndStr) {
c.logAndNotify(ipAndStr, false)
c.cacheRecord(ipAndStr, false)
return false
}
if c.Allow.Match(ipAndStr) {
c.logAndNotify(ipAndStr, true)
c.cacheRecord(ipAndStr, true)
return true
}
c.logAndNotify(ipAndStr, c.defaultAllow)
c.cacheRecord(ipAndStr, c.defaultAllow)

View File

@@ -1,6 +1,7 @@
package acl
import (
"bytes"
"net"
"strings"
@@ -12,6 +13,7 @@ type MatcherFunc func(*maxmind.IPInfo) bool
type Matcher struct {
match MatcherFunc
raw string
}
type Matchers []Matcher
@@ -46,6 +48,7 @@ func (matcher *Matcher) Parse(s string) error {
if len(parts) != 2 {
return errSyntax
}
matcher.raw = s
switch parts[0] {
case MatcherTypeIP:
@@ -79,6 +82,18 @@ func (matchers Matchers) Match(ip *maxmind.IPInfo) bool {
return false
}
func (matchers Matchers) MarshalText() ([]byte, error) {
if len(matchers) == 0 {
return []byte("[]"), nil
}
var buf bytes.Buffer
for _, m := range matchers {
buf.WriteString(m.raw)
buf.WriteByte('\n')
}
return buf.Bytes(), nil
}
func matchIP(ip net.IP) MatcherFunc {
return func(ip2 *maxmind.IPInfo) bool {
return ip.Equal(ip2.IP)

View File

@@ -16,6 +16,7 @@ import (
fileApi "github.com/yusing/godoxy/internal/api/v1/file"
homepageApi "github.com/yusing/godoxy/internal/api/v1/homepage"
metricsApi "github.com/yusing/godoxy/internal/api/v1/metrics"
proxmoxApi "github.com/yusing/godoxy/internal/api/v1/proxmox"
routeApi "github.com/yusing/godoxy/internal/api/v1/route"
"github.com/yusing/godoxy/internal/auth"
"github.com/yusing/godoxy/internal/common"
@@ -38,7 +39,7 @@ import (
// @externalDocs.description GoDoxy Docs
// @externalDocs.url https://docs.godoxy.dev
func NewHandler() *gin.Engine {
func NewHandler(requireAuth bool) *gin.Engine {
if !common.IsDebug {
gin.SetMode("release")
}
@@ -51,7 +52,7 @@ func NewHandler() *gin.Engine {
r.GET("/api/v1/version", apiV1.Version)
if auth.IsEnabled() {
if auth.IsEnabled() && requireAuth {
v1Auth := r.Group("/api/v1/auth")
{
v1Auth.HEAD("/check", authApi.Check)
@@ -64,7 +65,7 @@ func NewHandler() *gin.Engine {
}
v1 := r.Group("/api/v1")
if auth.IsEnabled() {
if auth.IsEnabled() && requireAuth {
v1.Use(AuthMiddleware())
}
if common.APISkipOriginCheck {
@@ -140,6 +141,21 @@ func NewHandler() *gin.Engine {
docker.POST("/start", dockerApi.Start)
docker.POST("/stop", dockerApi.Stop)
docker.POST("/restart", dockerApi.Restart)
docker.GET("/stats/:id", dockerApi.Stats)
}
proxmox := v1.Group("/proxmox")
{
proxmox.GET("/tail", proxmoxApi.Tail)
proxmox.GET("/journalctl", proxmoxApi.Journalctl)
proxmox.GET("/journalctl/:node", proxmoxApi.Journalctl)
proxmox.GET("/journalctl/:node/:vmid", proxmoxApi.Journalctl)
proxmox.GET("/journalctl/:node/:vmid/:service", proxmoxApi.Journalctl)
proxmox.GET("/stats/:node", proxmoxApi.NodeStats)
proxmox.GET("/stats/:node/:vmid", proxmoxApi.VMStats)
proxmox.POST("/lxc/:node/:vmid/start", proxmoxApi.Start)
proxmox.POST("/lxc/:node/:vmid/stop", proxmoxApi.Stop)
proxmox.POST("/lxc/:node/:vmid/restart", proxmoxApi.Restart)
}
}

View File

@@ -44,6 +44,7 @@ Types are defined in `goutils/apitypes`:
| `file` | Configuration file read/write operations |
| `auth` | Authentication and session management |
| `agent` | Remote agent creation and management |
| `proxmox` | Proxmox API management and monitoring |
## Architecture
@@ -77,15 +78,16 @@ API listening address is configured with `GODOXY_API_ADDR` environment variable.
### Internal Dependencies
| Package | Purpose |
| ----------------------- | --------------------------- |
| `internal/route/routes` | Route storage and iteration |
| `internal/docker` | Docker client management |
| `internal/config` | Configuration access |
| `internal/metrics` | System metrics collection |
| `internal/homepage` | Homepage item generation |
| `internal/agentpool` | Remote agent management |
| `internal/auth` | Authentication services |
| Package | Purpose |
| ----------------------- | ------------------------------------- |
| `internal/route/routes` | Route storage and iteration |
| `internal/docker` | Docker client management |
| `internal/config` | Configuration access |
| `internal/metrics` | System metrics collection |
| `internal/homepage` | Homepage item generation |
| `internal/agentpool` | Remote agent management |
| `internal/auth` | Authentication services |
| `internal/proxmox` | Proxmox API management and monitoring |
### External Dependencies

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/moby/moby/api/pkg/stdcopy"
@@ -22,6 +23,7 @@ type LogsQueryParams struct {
Since string `form:"from"`
Until string `form:"to"`
Levels string `form:"levels"`
Limit int `form:"limit,default=100" binding:"min=1,max=1000"`
} // @name LogsQueryParams
// @x-id "logs"
@@ -34,9 +36,10 @@ type LogsQueryParams struct {
// @Param id path string true "container id"
// @Param stdout query bool false "show stdout"
// @Param stderr query bool false "show stderr"
// @Param from query string false "from timestamp"
// @Param to query string false "to timestamp"
// @Param from query string false "from timestamp"
// @Param to query string false "to timestamp"
// @Param levels query string false "levels"
// @Param limit query int false "limit"
// @Success 200
// @Failure 400 {object} apitypes.ErrorResponse
// @Failure 403 {object} apitypes.ErrorResponse
@@ -77,7 +80,7 @@ func Logs(c *gin.Context) {
Until: queryParams.Until,
Timestamps: true,
Follow: true,
Tail: "100",
Tail: strconv.Itoa(queryParams.Limit),
}
if queryParams.Levels != "" {
opts.Details = true

View File

@@ -0,0 +1,117 @@
package dockerapi
import (
"context"
"errors"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/client"
"github.com/yusing/godoxy/internal/docker"
"github.com/yusing/godoxy/internal/route/routes"
"github.com/yusing/godoxy/internal/types"
apitypes "github.com/yusing/goutils/apitypes"
"github.com/yusing/goutils/http/httpheaders"
"github.com/yusing/goutils/http/websocket"
"github.com/yusing/goutils/synk"
"github.com/yusing/goutils/task"
)
type ContainerStatsResponse container.StatsResponse // @name ContainerStatsResponse
// @x-id "stats"
// @BasePath /api/v1
// @Summary Get container stats
// @Description Get container stats by container id
// @Tags docker,websocket
// @Produce json
// @Param id path string true "Container ID or route alias"
// @Success 200 {object} ContainerStatsResponse
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request: id is required or route is not a docker container"
// @Failure 403 {object} apitypes.ErrorResponse
// @Failure 404 {object} apitypes.ErrorResponse "Container not found"
// @Failure 500 {object} apitypes.ErrorResponse
// @Router /docker/stats/{id} [get]
func Stats(c *gin.Context) {
id := c.Param("id")
if id == "" {
c.JSON(http.StatusBadRequest, apitypes.Error("id is required"))
return
}
dockerCfg, ok := docker.GetDockerCfgByContainerID(id)
if !ok {
var route types.Route
route, ok = routes.GetIncludeExcluded(id)
if ok {
cont := route.ContainerInfo()
if cont == nil {
c.JSON(http.StatusBadRequest, apitypes.Error("route is not a docker container"))
return
}
dockerCfg = cont.DockerCfg
id = cont.ContainerID
}
}
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("container or route not found"))
return
}
dockerClient, err := docker.NewClient(dockerCfg)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to create docker client"))
return
}
defer dockerClient.Close()
if httpheaders.IsWebsocket(c.Request.Header) {
stats, err := dockerClient.ContainerStats(c.Request.Context(), id, client.ContainerStatsOptions{Stream: true})
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get container stats"))
return
}
defer stats.Body.Close()
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to create websocket manager"))
return
}
defer manager.Close()
buf := synk.GetSizedBytesPool().GetSized(4096)
defer synk.GetSizedBytesPool().Put(buf)
for {
select {
case <-manager.Done():
return
default:
_, err = io.CopyBuffer(manager.NewWriter(websocket.TextMessage), stats.Body, buf)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, task.ErrProgramExiting) {
return
}
c.Error(apitypes.InternalServerError(err, "failed to copy container stats"))
return
}
}
}
}
stats, err := dockerClient.ContainerStats(c.Request.Context(), id, client.ContainerStatsOptions{Stream: false})
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get container stats"))
return
}
defer stats.Body.Close()
_, err = io.Copy(c.Writer, stats.Body)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy container stats"))
return
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
package proxmoxapi
type ActionRequest struct {
Node string `uri:"node" binding:"required"`
VMID int `uri:"vmid" binding:"required"`
} // @name ProxmoxVMActionRequest

View File

@@ -0,0 +1,85 @@
package proxmoxapi
import (
"errors"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
"github.com/yusing/goutils/apitypes"
"github.com/yusing/goutils/http/websocket"
)
// e.g. ws://localhost:8889/api/v1/proxmox/journalctl?node=pve&vmid=127&service=pveproxy&service=pvedaemon&limit=10
// e.g. ws://localhost:8889/api/v1/proxmox/journalctl/pve/127?service=pveproxy&service=pvedaemon&limit=10
type JournalctlRequest struct {
Node string `form:"node" uri:"node" binding:"required"` // Node name
VMID *int `form:"vmid" uri:"vmid"` // Container VMID (optional - if not provided, streams node journalctl)
Services []string `form:"service" uri:"service"` // Service names
Limit *int `form:"limit" uri:"limit" default:"100" binding:"min=1,max=1000"` // Limit output lines (1-1000)
} // @name ProxmoxJournalctlRequest
// @x-id "journalctl"
// @BasePath /api/v1
// @Summary Get journalctl output
// @Description Get journalctl output for node or LXC container. If vmid is not provided, streams node journalctl.
// @Tags proxmox,websocket
// @Accept json
// @Produce application/json
// @Param query query JournalctlRequest true "Request"
// @Param path path JournalctlRequest true "Request"
// @Success 200 string plain "Journalctl output"
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 403 {object} apitypes.ErrorResponse "Unauthorized"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse "Internal server error"
// @Router /proxmox/journalctl [get]
// @Router /proxmox/journalctl/{node} [get]
// @Router /proxmox/journalctl/{node}/{vmid} [get]
// @Router /proxmox/journalctl/{node}/{vmid}/{service} [get]
func Journalctl(c *gin.Context) {
var request JournalctlRequest
uriErr := c.ShouldBindUri(&request)
queryErr := c.ShouldBindQuery(&request)
if uriErr != nil && queryErr != nil { // allow both uri and query parameters to be set
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", errors.Join(uriErr, queryErr)))
return
}
node, ok := proxmox.Nodes.Get(request.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
c.Status(http.StatusContinue)
var reader io.ReadCloser
var err error
if request.VMID == nil {
reader, err = node.NodeJournalctl(c.Request.Context(), request.Services, *request.Limit)
} else {
reader, err = node.LXCJournalctl(c.Request.Context(), *request.VMID, request.Services, *request.Limit)
}
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get journalctl output"))
return
}
defer reader.Close()
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to upgrade to websocket"))
return
}
defer manager.Close()
writer := manager.NewWriter(websocket.TextMessage)
_, err = io.Copy(writer, reader)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy journalctl output"))
return
}
}

View File

@@ -0,0 +1,42 @@
package proxmoxapi
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
apitypes "github.com/yusing/goutils/apitypes"
)
// @x-id "lxcRestart"
// @BasePath /api/v1
// @Summary Restart LXC container
// @Description Restart LXC container by node and vmid
// @Tags proxmox
// @Produce json
// @Param path path ActionRequest true "Request"
// @Success 200 {object} apitypes.SuccessResponse
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse
// @Router /proxmox/lxc/:node/:vmid/restart [post]
func Restart(c *gin.Context) {
var req ActionRequest
if err := c.ShouldBindUri(&req); err != nil {
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", err))
return
}
node, ok := proxmox.Nodes.Get(req.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
if err := node.LXCAction(c.Request.Context(), req.VMID, proxmox.LXCReboot); err != nil {
c.Error(apitypes.InternalServerError(err, "failed to restart container"))
return
}
c.JSON(http.StatusOK, apitypes.Success("container restarted"))
}

View File

@@ -0,0 +1,42 @@
package proxmoxapi
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
apitypes "github.com/yusing/goutils/apitypes"
)
// @x-id "lxcStart"
// @BasePath /api/v1
// @Summary Start LXC container
// @Description Start LXC container by node and vmid
// @Tags proxmox
// @Produce json
// @Param path path ActionRequest true "Request"
// @Success 200 {object} apitypes.SuccessResponse
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse
// @Router /proxmox/lxc/:node/:vmid/start [post]
func Start(c *gin.Context) {
var req ActionRequest
if err := c.ShouldBindUri(&req); err != nil {
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", err))
return
}
node, ok := proxmox.Nodes.Get(req.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
if err := node.LXCAction(c.Request.Context(), req.VMID, proxmox.LXCStart); err != nil {
c.Error(apitypes.InternalServerError(err, "failed to start container"))
return
}
c.JSON(http.StatusOK, apitypes.Success("container started"))
}

View File

@@ -0,0 +1,139 @@
package proxmoxapi
import (
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
"github.com/yusing/goutils/apitypes"
"github.com/yusing/goutils/http/httpheaders"
"github.com/yusing/goutils/http/websocket"
)
type StatsRequest struct {
Node string `uri:"node" binding:"required"`
VMID int `uri:"vmid" binding:"required"`
}
// @x-id "nodeStats"
// @BasePath /api/v1
// @Summary Get proxmox node stats
// @Description Get proxmox node stats in json
// @Tags proxmox,websocket
// @Produce application/json
// @Param node path string true "Node name"
// @Success 200 {object} proxmox.NodeStats "Stats output"
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 403 {object} apitypes.ErrorResponse "Unauthorized"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse "Internal server error"
// @Router /proxmox/stats/{node} [get]
func NodeStats(c *gin.Context) {
nodeName := c.Param("node")
if nodeName == "" {
c.JSON(http.StatusBadRequest, apitypes.Error("node name is required"))
return
}
node, ok := proxmox.Nodes.Get(nodeName)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
isWs := httpheaders.IsWebsocket(c.Request.Header)
reader, err := node.NodeStats(c.Request.Context(), isWs)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get stats"))
return
}
defer reader.Close()
if !isWs {
var line [512]byte
n, err := reader.Read(line[:])
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy stats"))
return
}
c.Data(http.StatusOK, "application/json", line[:n])
return
}
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to upgrade to websocket"))
return
}
defer manager.Close()
writer := manager.NewWriter(websocket.TextMessage)
_, err = io.Copy(writer, reader)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy stats"))
return
}
}
// @x-id "vmStats"
// @BasePath /api/v1
// @Summary Get proxmox VM stats
// @Description Get proxmox VM stats in format of "STATUS|CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O"
// @Tags proxmox,websocket
// @Produce text/plain
// @Param path path StatsRequest true "Request"
// @Success 200 string plain "Stats output"
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 403 {object} apitypes.ErrorResponse "Unauthorized"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse "Internal server error"
// @Router /proxmox/stats/{node}/{vmid} [get]
func VMStats(c *gin.Context) {
var request StatsRequest
if err := c.ShouldBindUri(&request); err != nil {
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", err))
return
}
node, ok := proxmox.Nodes.Get(request.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
isWs := httpheaders.IsWebsocket(c.Request.Header)
reader, err := node.LXCStats(c.Request.Context(), request.VMID, isWs)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get stats"))
return
}
defer reader.Close()
if !isWs {
var line [128]byte
n, err := reader.Read(line[:])
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy stats"))
return
}
c.Data(http.StatusOK, "text/plain; charset=utf-8", line[:n])
return
}
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to upgrade to websocket"))
return
}
defer manager.Close()
writer := manager.NewWriter(websocket.TextMessage)
_, err = io.Copy(writer, reader)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy stats"))
return
}
}

View File

@@ -0,0 +1,42 @@
package proxmoxapi
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
apitypes "github.com/yusing/goutils/apitypes"
)
// @x-id "lxcStop"
// @BasePath /api/v1
// @Summary Stop LXC container
// @Description Stop LXC container by node and vmid
// @Tags proxmox
// @Produce json
// @Param path path ActionRequest true "Request"
// @Success 200 {object} apitypes.SuccessResponse
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse
// @Router /proxmox/lxc/:node/:vmid/stop [post]
func Stop(c *gin.Context) {
var req ActionRequest
if err := c.ShouldBindUri(&req); err != nil {
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", err))
return
}
node, ok := proxmox.Nodes.Get(req.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
if err := node.LXCAction(c.Request.Context(), req.VMID, proxmox.LXCShutdown); err != nil {
c.Error(apitypes.InternalServerError(err, "failed to stop container"))
return
}
c.JSON(http.StatusOK, apitypes.Success("container stopped"))
}

View File

@@ -0,0 +1,77 @@
package proxmoxapi
import (
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/yusing/godoxy/internal/proxmox"
"github.com/yusing/goutils/apitypes"
"github.com/yusing/goutils/http/websocket"
)
// e.g. ws://localhost:8889/api/v1/proxmox/tail?node=pve&vmid=127&file=/var/log/immich/web.log&file=/var/log/immich/ml.log&limit=10
type TailRequest struct {
Node string `form:"node" binding:"required"` // Node name
VMID *int `form:"vmid"` // Container VMID (optional - if not provided, streams node journalctl)
Files []string `form:"file" binding:"required,dive,filepath"` // File paths
Limit int `form:"limit" default:"100" binding:"min=1,max=1000"` // Limit output lines (1-1000)
} // @name ProxmoxTailRequest
// @x-id "tail"
// @BasePath /api/v1
// @Summary Get tail output
// @Description Get tail output for node or LXC container. If vmid is not provided, streams node tail.
// @Tags proxmox,websocket
// @Accept json
// @Produce application/json
// @Param query query TailRequest true "Request"
// @Success 200 string plain "Tail output"
// @Failure 400 {object} apitypes.ErrorResponse "Invalid request"
// @Failure 403 {object} apitypes.ErrorResponse "Unauthorized"
// @Failure 404 {object} apitypes.ErrorResponse "Node not found"
// @Failure 500 {object} apitypes.ErrorResponse "Internal server error"
// @Router /proxmox/tail [get]
func Tail(c *gin.Context) {
var request TailRequest
if err := c.ShouldBindQuery(&request); err != nil {
c.JSON(http.StatusBadRequest, apitypes.Error("invalid request", err))
return
}
node, ok := proxmox.Nodes.Get(request.Node)
if !ok {
c.JSON(http.StatusNotFound, apitypes.Error("node not found"))
return
}
c.Status(http.StatusContinue)
var reader io.ReadCloser
var err error
if request.VMID == nil {
reader, err = node.NodeTail(c.Request.Context(), request.Files, request.Limit)
} else {
reader, err = node.LXCTail(c.Request.Context(), *request.VMID, request.Files, request.Limit)
}
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to get journalctl output"))
return
}
defer reader.Close()
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to upgrade to websocket"))
return
}
defer manager.Close()
writer := manager.NewWriter(websocket.TextMessage)
_, err = io.Copy(writer, reader)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to copy journalctl output"))
return
}
}

View File

@@ -4,7 +4,6 @@ import (
"net/http"
"github.com/gin-gonic/gin"
statequery "github.com/yusing/godoxy/internal/config/query"
"github.com/yusing/godoxy/internal/route/routes"
apitypes "github.com/yusing/goutils/apitypes"
)
@@ -33,17 +32,10 @@ func Route(c *gin.Context) {
return
}
route, ok := routes.Get(request.Which)
route, ok := routes.GetIncludeExcluded(request.Which)
if ok {
c.JSON(http.StatusOK, route)
return
}
// also search for excluded routes
route = statequery.SearchRoute(request.Which)
if route != nil {
c.JSON(http.StatusOK, route)
return
}
c.JSON(http.StatusNotFound, nil)
c.JSON(http.StatusNotFound, apitypes.Error("route not found"))
}

View File

@@ -13,6 +13,8 @@ var (
IsDebug = env.GetEnvBool("DEBUG", IsTest)
IsTrace = env.GetEnvBool("TRACE", false) && IsDebug
InitTimeout = env.GetEnvDuation("INIT_TIMEOUT", 1*time.Minute)
ShortLinkPrefix = env.GetEnvString("SHORTLINK_PREFIX", "go")
ProxyHTTPAddr,
@@ -30,6 +32,11 @@ var (
APIHTTPPort,
APIHTTPURL = env.GetAddrEnv("API_ADDR", "127.0.0.1:8888", "http")
LocalAPIHTTPAddr,
LocalAPIHTTPHost,
LocalAPIHTTPPort,
LocalAPIHTTPURL = env.GetAddrEnv("LOCAL_API_ADDR", "", "http")
APIJWTSecure = env.GetEnvBool("API_JWT_SECURE", true)
APIJWTSecret = decodeJWTKey(env.GetEnvString("API_JWT_SECRET", ""))
APIJWTTokenTTL = env.GetEnvDuation("API_JWT_TOKEN_TTL", 24*time.Hour)

View File

@@ -10,6 +10,7 @@ import (
"github.com/yusing/godoxy/internal/common"
config "github.com/yusing/godoxy/internal/config/types"
"github.com/yusing/godoxy/internal/notif"
"github.com/yusing/godoxy/internal/route/routes"
"github.com/yusing/godoxy/internal/watcher"
"github.com/yusing/godoxy/internal/watcher/events"
gperr "github.com/yusing/goutils/errs"
@@ -59,6 +60,15 @@ func Load() error {
cfgWatcher = watcher.NewConfigFileWatcher(common.ConfigFileName)
// disable pool logging temporary since we already have pretty logging
routes.HTTP.DisableLog(true)
routes.Stream.DisableLog(true)
defer func() {
routes.HTTP.DisableLog(false)
routes.Stream.DisableLog(false)
}()
initErr := state.InitFromFile(common.ConfigPath)
err := errors.Join(initErr, state.StartProviders())
if err != nil {

View File

@@ -54,12 +54,6 @@ Returns all route providers as a map keyed by their short name. Thread-safe acce
func RouteProviderList() []RouteProviderListResponse
```
Returns a list of route providers with their short and full names. Useful for API responses.
```go
func SearchRoute(alias string) types.Route
```
Searches for a route by alias across all providers. Returns `nil` if not found.
```go
@@ -179,15 +173,6 @@ for shortName, provider := range providers {
}
```
### Searching for a route
```go
route := statequery.SearchRoute("my-service")
if route != nil {
fmt.Printf("Found route: %s\n", route.Alias())
}
```
### Getting system statistics
```go
@@ -213,14 +198,4 @@ func handleGetStats(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(stats)
}
func handleFindRoute(w http.ResponseWriter, r *http.Request) {
alias := r.URL.Query().Get("alias")
route := statequery.SearchRoute(alias)
if route == nil {
http.NotFound(w, r)
return
}
json.NewEncoder(w).Encode(route)
}
```

View File

@@ -30,13 +30,3 @@ func RouteProviderList() []RouteProviderListResponse {
}
return list
}
func SearchRoute(alias string) types.Route {
state := config.ActiveState.Load()
for _, p := range state.IterProviders() {
if r, ok := p.GetRoute(alias); ok {
return r
}
}
return nil
}

View File

@@ -28,7 +28,6 @@ import (
"github.com/yusing/godoxy/internal/maxmind"
"github.com/yusing/godoxy/internal/notif"
route "github.com/yusing/godoxy/internal/route/provider"
"github.com/yusing/godoxy/internal/route/routes"
"github.com/yusing/godoxy/internal/serialization"
"github.com/yusing/godoxy/internal/types"
gperr "github.com/yusing/goutils/errs"
@@ -74,7 +73,6 @@ func SetState(state config.State) {
cfg := state.Value()
config.ActiveState.Store(state)
acl.ActiveConfig.Store(cfg.ACL)
entrypoint.ActiveConfig.Store(&cfg.Entrypoint)
homepage.ActiveConfig.Store(&cfg.Homepage)
if autocertProvider := state.AutoCertProvider(); autocertProvider != nil {
@@ -113,14 +111,14 @@ func (state *state) Init(data []byte) error {
g := gperr.NewGroup("config load error")
g.Go(state.initMaxMind)
g.Go(state.initProxmox)
g.Go(state.loadRouteProviders)
g.Go(state.initAutoCert)
errs := g.Wait()
// these won't benefit from running on goroutines
errs.Add(state.initNotification())
errs.Add(state.initAccessLogger())
errs.Add(state.initACL())
errs.Add(state.initEntrypoint())
errs.Add(state.loadRouteProviders())
return errs.Error()
}
@@ -192,12 +190,17 @@ func (state *state) FlushTmpLog() {
state.tmpLogBuf.Reset()
}
// this one is connection level access logger, different from entrypoint access logger
func (state *state) initAccessLogger() error {
// initACL initializes the ACL.
func (state *state) initACL() error {
if !state.ACL.Valid() {
return nil
}
return state.ACL.Start(state.task)
err := state.ACL.Start(state.task)
if err != nil {
return err
}
state.task.SetValue(acl.ContextKey{}, state.ACL)
return nil
}
func (state *state) initEntrypoint() error {
@@ -314,76 +317,50 @@ func (state *state) initProxmox() error {
return errs.Wait().Error()
}
func (state *state) storeProvider(p types.RouteProvider) {
state.providers.Store(p.String(), p)
}
func (state *state) loadRouteProviders() error {
// disable pool logging temporary since we will have pretty logging below
routes.HTTP.DisableLog(true)
routes.Stream.DisableLog(true)
defer func() {
routes.HTTP.DisableLog(false)
routes.Stream.DisableLog(false)
}()
providers := &state.Providers
providers := state.Providers
errs := gperr.NewGroup("route provider errors")
results := gperr.NewGroup("loaded route providers")
agentpool.RemoveAll()
numProviders := len(providers.Agents) + len(providers.Files) + len(providers.Docker)
providersCh := make(chan types.RouteProvider, numProviders)
// start providers concurrently
var providersConsumer sync.WaitGroup
providersConsumer.Go(func() {
for p := range providersCh {
if actual, loaded := state.providers.LoadOrStore(p.String(), p); loaded {
errs.Add(gperr.Errorf("provider %s already exists, first: %s, second: %s", p.String(), actual.GetType(), p.GetType()))
continue
}
state.storeProvider(p)
registerProvider := func(p types.RouteProvider) {
if actual, loaded := state.providers.LoadOrStore(p.String(), p); loaded {
errs.Addf("provider %s already exists, first: %s, second: %s", p.String(), actual.GetType(), p.GetType())
}
})
}
var providersProducer sync.WaitGroup
agentErrs := gperr.NewGroup("agent init errors")
for _, a := range providers.Agents {
providersProducer.Go(func() {
agentErrs.Go(func() error {
if err := a.Init(state.task.Context()); err != nil {
errs.Add(gperr.PrependSubject(a.String(), err))
return
return gperr.PrependSubject(a.String(), err)
}
agentpool.Add(a)
p := route.NewAgentProvider(a)
providersCh <- p
return nil
})
}
if err := agentErrs.Wait().Error(); err != nil {
errs.Add(err)
}
for _, a := range providers.Agents {
registerProvider(route.NewAgentProvider(a))
}
for _, filename := range providers.Files {
providersProducer.Go(func() {
p, err := route.NewFileProvider(filename)
if err != nil {
errs.Add(gperr.PrependSubject(filename, err))
} else {
providersCh <- p
}
})
p, err := route.NewFileProvider(filename)
if err != nil {
errs.Add(gperr.PrependSubject(filename, err))
return err
}
registerProvider(p)
}
for name, dockerCfg := range providers.Docker {
providersProducer.Go(func() {
providersCh <- route.NewDockerProvider(name, dockerCfg)
})
registerProvider(route.NewDockerProvider(name, dockerCfg))
}
providersProducer.Wait()
close(providersCh)
providersConsumer.Wait()
lenLongestName := 0
for k := range state.providers.Range {
if len(k) > lenLongestName {
@@ -392,18 +369,26 @@ func (state *state) loadRouteProviders() error {
}
// load routes concurrently
var providersLoader sync.WaitGroup
loadErrs := gperr.NewGroup("route load errors")
results := gperr.NewBuilder("loaded route providers")
resultsMu := sync.Mutex{}
for _, p := range state.providers.Range {
providersLoader.Go(func() {
loadErrs.Go(func() error {
if err := p.LoadRoutes(); err != nil {
errs.Add(err.Subject(p.String()))
return err.Subject(p.String())
}
resultsMu.Lock()
results.Addf("%-"+strconv.Itoa(lenLongestName)+"s %d routes", p.String(), p.NumRoutes())
resultsMu.Unlock()
return nil
})
}
providersLoader.Wait()
if err := loadErrs.Wait().Error(); err != nil {
errs.Add(err)
}
state.tmpLog.Info().Msg(results.Wait().String())
state.tmpLog.Info().Msg(results.String())
state.printRoutesByProvider(lenLongestName)
state.printState()
return errs.Wait().Error()

View File

@@ -36,7 +36,7 @@ type (
Docker map[string]types.DockerProviderConfig `json:"docker" yaml:"docker,omitempty" validate:"non_empty_docker_keys"`
Agents []*agent.AgentConfig `json:"agents" yaml:"agents,omitempty"`
Notification []*notif.NotificationConfig `json:"notification" yaml:"notification,omitempty"`
Proxmox []proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
Proxmox []*proxmox.Config `json:"proxmox" yaml:"proxmox,omitempty"`
MaxMind *maxmind.Config `json:"maxmind" yaml:"maxmind,omitempty"`
}
)

View File

@@ -6,11 +6,11 @@ replace github.com/yusing/godoxy => ../..
require (
github.com/go-acme/lego/v4 v4.31.0
github.com/yusing/godoxy v0.24.1
github.com/yusing/godoxy v0.25.0
)
require (
cloud.google.com/go/auth v0.18.0 // indirect
cloud.google.com/go/auth v0.18.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect
@@ -24,8 +24,8 @@ require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.14.2 // indirect
github.com/bytedance/sonic/loader v0.4.0 // indirect
github.com/bytedance/sonic v1.15.0 // indirect
github.com/bytedance/sonic/loader v0.5.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
@@ -61,18 +61,18 @@ require (
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/maxatome/go-testdeep v1.14.0 // indirect
github.com/miekg/dns v1.1.70 // indirect
github.com/miekg/dns v1.1.72 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/nrdcg/goacmedns v0.2.0 // indirect
github.com/nrdcg/goinwx v0.12.0 // indirect
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0 // indirect
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0 // indirect
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1 // indirect
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1 // indirect
github.com/nrdcg/porkbun v0.4.0 // indirect
github.com/ovh/go-ovh v1.9.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/otp v1.5.0 // indirect
github.com/puzpuzpuz/xsync/v4 v4.3.0 // indirect
github.com/puzpuzpuz/xsync/v4 v4.4.0 // indirect
github.com/rs/zerolog v1.34.0 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 // indirect
github.com/sony/gobreaker v1.0.0 // indirect
@@ -98,8 +98,8 @@ require (
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.41.0 // indirect
google.golang.org/api v0.260.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect
google.golang.org/api v0.262.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/ini.v1 v1.67.1 // indirect

View File

@@ -1,5 +1,5 @@
cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0=
cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo=
cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=
cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
@@ -39,10 +39,10 @@ github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVk
github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE=
github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980=
github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o=
github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE=
github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -142,18 +142,18 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/maxatome/go-testdeep v1.14.0 h1:rRlLv1+kI8eOI3OaBXZwb3O7xY3exRzdW5QyX48g9wI=
github.com/maxatome/go-testdeep v1.14.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nrdcg/goacmedns v0.2.0 h1:ADMbThobzEMnr6kg2ohs4KGa3LFqmgiBA22/6jUWJR0=
github.com/nrdcg/goacmedns v0.2.0/go.mod h1:T5o6+xvSLrQpugmwHvrSNkzWht0UGAwj2ACBMhh73Cg=
github.com/nrdcg/goinwx v0.12.0 h1:ujdUqDBnaRSFwzVnImvPHYw3w3m9XgmGImNUw1GyMb4=
github.com/nrdcg/goinwx v0.12.0/go.mod h1:IrVKd3ZDbFiMjdPgML4CSxZAY9wOoqLvH44zv3NodJ0=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0 h1:4MRzV6spwPHKct+4/ETqkEtr39Hq+0KvxhsgqbgQ2Bo=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.0/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0 h1:RxraLVYX3eMUfQ1pDtJVvykEFGheky2YsrUt2HHRDcw=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.0/go.mod h1:JLMEKMX8IYPZ1TUSVHAVAbtnNSfP/I8OZQkAnfEMA0I=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1 h1:+fx2mbWeR8XX/vidwpRMepJMtRIYQP44Iezm2oeObVM=
github.com/nrdcg/oci-go-sdk/common/v1065 v1065.106.1/go.mod h1:Gcs8GCaZXL3FdiDWgdnMxlOLEdRprJJnPYB22TX1jw8=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1 h1:GDhBiaIAm/QXLzHJ0ASDdY/6R/9w60+gk8lY5rgfxEQ=
github.com/nrdcg/oci-go-sdk/dns/v1065 v1065.106.1/go.mod h1:EHScJdbM0gg5Is7e3C0ceRYAFMMsfP4Vf8sBRoxoTgk=
github.com/nrdcg/porkbun v0.4.0 h1:rWweKlwo1PToQ3H+tEO9gPRW0wzzgmI/Ob3n2Guticw=
github.com/nrdcg/porkbun v0.4.0/go.mod h1:/QMskrHEIM0IhC/wY7iTCUgINsxdT2WcOphktJ9+Q54=
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
@@ -166,8 +166,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
@@ -249,14 +249,14 @@ golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.260.0 h1:XbNi5E6bOVEj/uLXQRlt6TKuEzMD7zvW/6tNwltE4P4=
google.golang.org/api v0.260.0/go.mod h1:Shj1j0Phr/9sloYrKomICzdYgsSDImpTxME8rGLaZ/o=
google.golang.org/api v0.262.0 h1:4B+3u8He2GwyN8St3Jhnd3XRHlIvc//sBmgHSp78oNY=
google.golang.org/api v0.262.0/go.mod h1:jNwmH8BgUBJ/VrUG6/lIl9YiildyLd09r9ZLHiQ6cGI=
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=

View File

@@ -152,7 +152,7 @@ func NewClient(cfg types.DockerProviderConfig, unique ...bool) (*SharedClient, e
if agent.IsDockerHostAgent(host) {
a, ok := agentpool.Get(host)
if !ok {
panic(fmt.Errorf("agent %q not found", host))
return nil, fmt.Errorf("agent %q not found", host)
}
opt = []client.Opt{
client.WithHost(agent.DockerHost),

View File

@@ -100,7 +100,7 @@ func (ep *Entrypoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
rec := accesslog.GetResponseRecorder(w)
w = rec
defer func() {
ep.accessLogger.Log(r, rec.Response())
ep.accessLogger.LogRequest(r, rec.Response())
accesslog.PutResponseRecorder(rec)
}()
}

1
internal/go-proxmox Submodule

Submodule internal/go-proxmox added at 7a07c21f07

View File

@@ -26,6 +26,10 @@ const proxmoxStateCheckInterval = 1 * time.Second
var ErrNodeNotFound = gperr.New("node not found in pool")
func NewProxmoxProvider(ctx context.Context, nodeName string, vmid int) (idlewatcher.Provider, error) {
if nodeName == "" || vmid == 0 {
return nil, gperr.New("node name and vmid are required")
}
node, ok := proxmox.Nodes.Get(nodeName)
if !ok {
return nil, ErrNodeNotFound.Subject(nodeName).

View File

@@ -1 +0,0 @@
package idlewatcher

View File

@@ -173,7 +173,7 @@ func NewWatcher(parent task.Parent, r types.Route, cfg *types.IdlewatcherConfig)
}
if !ok {
depRoute, ok = routes.Get(dep)
depRoute, ok = routes.GetIncludeExcluded(dep)
if !ok {
depErrors.Addf("dependency %q not found", dep)
continue

View File

@@ -13,10 +13,9 @@ type ReaderAtSeeker interface {
// BackScanner provides an interface to read a file backward line by line.
type BackScanner struct {
file ReaderAtSeeker
size int64
chunkSize int
chunkBuf []byte
file ReaderAtSeeker
size int64
chunkBuf []byte
offset int64
chunk []byte
@@ -27,16 +26,25 @@ type BackScanner struct {
// NewBackScanner creates a new Scanner to read the file backward.
// chunkSize determines the size of each read chunk from the end of the file.
func NewBackScanner(file ReaderAtSeeker, fileSize int64, chunkSize int) *BackScanner {
return newBackScanner(file, fileSize, make([]byte, chunkSize))
return newBackScanner(file, fileSize, sizedPool.GetSized(chunkSize))
}
func newBackScanner(file ReaderAtSeeker, fileSize int64, buf []byte) *BackScanner {
return &BackScanner{
file: file,
size: fileSize,
offset: fileSize,
chunkSize: len(buf),
chunkBuf: buf,
file: file,
size: fileSize,
offset: fileSize,
chunkBuf: buf,
}
}
// Release releases the buffer back to the pool.
func (s *BackScanner) Release() {
sizedPool.Put(s.chunkBuf)
s.chunkBuf = nil
if s.chunk != nil {
sizedPool.Put(s.chunk)
s.chunk = nil
}
}
@@ -64,13 +72,14 @@ func (s *BackScanner) Scan() bool {
// No more data to read; check remaining buffer
if len(s.chunk) > 0 {
s.line = s.chunk
sizedPool.Put(s.chunk)
s.chunk = nil
return true
}
return false
}
newOffset := max(0, s.offset-int64(s.chunkSize))
newOffset := max(0, s.offset-int64(len(s.chunkBuf)))
chunkSize := s.offset - newOffset
chunk := s.chunkBuf[:chunkSize]
@@ -85,8 +94,19 @@ func (s *BackScanner) Scan() bool {
}
// Prepend the chunk to the buffer
clone := append([]byte{}, chunk[:n]...)
s.chunk = append(clone, s.chunk...)
if s.chunk == nil { // first chunk
s.chunk = sizedPool.GetSized(2 * len(s.chunkBuf))
copy(s.chunk, chunk[:n])
s.chunk = s.chunk[:n]
} else {
neededSize := n + len(s.chunk)
newChunk := sizedPool.GetSized(max(neededSize, 2*len(s.chunkBuf)))
copy(newChunk, chunk[:n])
copy(newChunk[n:], s.chunk)
sizedPool.Put(s.chunk)
s.chunk = newChunk[:neededSize]
}
s.offset = newOffset
// Check for newline in the updated buffer
@@ -111,12 +131,3 @@ func (s *BackScanner) Bytes() []byte {
func (s *BackScanner) Err() error {
return s.err
}
func (s *BackScanner) Reset() error {
_, err := s.file.Seek(0, io.SeekStart)
if err != nil {
return err
}
*s = *newBackScanner(s.file, s.size, s.chunkBuf)
return nil
}

View File

@@ -1,15 +1,17 @@
package accesslog
import (
"bytes"
"fmt"
"math/rand/v2"
"net/http"
"net/http/httptest"
"os"
"strconv"
"strings"
"testing"
"github.com/spf13/afero"
expect "github.com/yusing/goutils/testing"
strutils "github.com/yusing/goutils/strings"
"github.com/yusing/goutils/task"
@@ -135,88 +137,40 @@ func TestBackScannerWithVaryingChunkSizes(t *testing.T) {
}
}
func logEntry() []byte {
var logEntry = func() func() []byte {
accesslog := NewMockAccessLogger(task.RootTask("test", false), &RequestLoggerConfig{
Format: FormatJSON,
})
contentTypes := []string{"application/json", "text/html", "text/plain", "application/xml", "application/x-www-form-urlencoded"}
userAgents := []string{"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/120.0"}
methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS", "HEAD"}
paths := []string{"/", "/about", "/contact", "/login", "/logout", "/register", "/profile"}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("hello"))
allocSize := rand.IntN(8192)
w.Header().Set("Content-Type", contentTypes[rand.IntN(len(contentTypes))])
w.Header().Set("Content-Length", strconv.Itoa(allocSize))
w.WriteHeader(http.StatusOK)
}))
srv.URL = "http://localhost:8080"
defer srv.Close()
// make a request to the server
req, _ := http.NewRequest(http.MethodGet, srv.URL, nil)
res := httptest.NewRecorder()
// server the request
srv.Config.Handler.ServeHTTP(res, req)
b := accesslog.(RequestFormatter).AppendRequestLog(nil, req, res.Result())
if b[len(b)-1] != '\n' {
b = append(b, '\n')
}
return b
}
func TestReset(t *testing.T) {
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
return func() []byte {
// make a request to the server
req, _ := http.NewRequest(http.MethodGet, srv.URL, nil)
res := httptest.NewRecorder()
req.Header.Set("User-Agent", userAgents[rand.IntN(len(userAgents))])
req.Method = methods[rand.IntN(len(methods))]
req.URL.Path = paths[rand.IntN(len(paths))]
// server the request
srv.Config.Handler.ServeHTTP(res, req)
b := bytes.NewBuffer(make([]byte, 0, 1024))
accesslog.(RequestFormatter).AppendRequestLog(b, req, res.Result())
return b.Bytes()
}
defer os.Remove(file.Name())
line := logEntry()
nLines := 1000
for range nLines {
_, err := file.Write(line)
if err != nil {
t.Fatalf("failed to write to temp file: %v", err)
}
}
linesRead := 0
stat, _ := file.Stat()
s := NewBackScanner(file, stat.Size(), defaultChunkSize)
for s.Scan() {
linesRead++
}
if err := s.Err(); err != nil {
t.Errorf("scanner error: %v", err)
}
expect.Equal(t, linesRead, nLines)
err = s.Reset()
if err != nil {
t.Errorf("failed to reset scanner: %v", err)
}
linesRead = 0
for s.Scan() {
linesRead++
}
if err := s.Err(); err != nil {
t.Errorf("scanner error: %v", err)
}
expect.Equal(t, linesRead, nLines)
}
}()
// 100000 log entries.
func BenchmarkBackScanner(b *testing.B) {
mockFile := NewMockFile(false)
line := logEntry()
for range 100000 {
_, _ = mockFile.Write(line)
}
for i := range 14 {
chunkSize := (2 << i) * kilobyte
scanner := NewBackScanner(mockFile, mockFile.MustSize(), chunkSize)
name := strutils.FormatByteSize(chunkSize)
b.ResetTimer()
b.Run(name, func(b *testing.B) {
for b.Loop() {
_ = scanner.Reset()
for scanner.Scan() {
}
}
})
}
}
func BenchmarkBackScannerRealFile(b *testing.B) {
file, err := afero.TempFile(afero.NewOsFs(), "", "accesslog")
if err != nil {
@@ -224,51 +178,58 @@ func BenchmarkBackScannerRealFile(b *testing.B) {
}
defer os.Remove(file.Name())
for range 10000 {
_, err = file.Write(logEntry())
if err != nil {
b.Fatalf("failed to write to temp file: %v", err)
}
buf := bytes.NewBuffer(nil)
for range 100000 {
buf.Write(logEntry())
}
stat, _ := file.Stat()
scanner := NewBackScanner(file, stat.Size(), 256*kilobyte)
b.ResetTimer()
for scanner.Scan() {
fSize := int64(buf.Len())
_, err = file.Write(buf.Bytes())
if err != nil {
b.Fatalf("failed to write to file: %v", err)
}
if err := scanner.Err(); err != nil {
b.Errorf("scanner error: %v", err)
// file position does not matter, Seek not needed
for i := range 12 {
chunkSize := (2 << i) * kilobyte
name := strutils.FormatByteSize(chunkSize)
b.ResetTimer()
b.Run(name, func(b *testing.B) {
for b.Loop() {
scanner := NewBackScanner(file, fSize, chunkSize)
for scanner.Scan() {
}
scanner.Release()
}
})
}
}
/*
BenchmarkBackScanner
BenchmarkBackScanner/2_KiB
BenchmarkBackScanner/2_KiB-20 52 23254071 ns/op 67596663 B/op 26420 allocs/op
BenchmarkBackScanner/4_KiB
BenchmarkBackScanner/4_KiB-20 55 20961059 ns/op 62529378 B/op 13211 allocs/op
BenchmarkBackScanner/8_KiB
BenchmarkBackScanner/8_KiB-20 64 18242460 ns/op 62951141 B/op 6608 allocs/op
BenchmarkBackScanner/16_KiB
BenchmarkBackScanner/16_KiB-20 52 20162076 ns/op 62940256 B/op 3306 allocs/op
BenchmarkBackScanner/32_KiB
BenchmarkBackScanner/32_KiB-20 54 19247968 ns/op 67553645 B/op 1656 allocs/op
BenchmarkBackScanner/64_KiB
BenchmarkBackScanner/64_KiB-20 60 20909046 ns/op 64053342 B/op 827 allocs/op
BenchmarkBackScanner/128_KiB
BenchmarkBackScanner/128_KiB-20 68 17759890 ns/op 62201945 B/op 414 allocs/op
BenchmarkBackScanner/256_KiB
BenchmarkBackScanner/256_KiB-20 52 19531877 ns/op 61030487 B/op 208 allocs/op
BenchmarkBackScanner/512_KiB
BenchmarkBackScanner/512_KiB-20 54 19124656 ns/op 61030485 B/op 208 allocs/op
BenchmarkBackScanner/1_MiB
BenchmarkBackScanner/1_MiB-20 67 17078936 ns/op 61030495 B/op 208 allocs/op
BenchmarkBackScanner/2_MiB
BenchmarkBackScanner/2_MiB-20 66 18467421 ns/op 61030492 B/op 208 allocs/op
BenchmarkBackScanner/4_MiB
BenchmarkBackScanner/4_MiB-20 68 17214573 ns/op 61030486 B/op 208 allocs/op
BenchmarkBackScanner/8_MiB
BenchmarkBackScanner/8_MiB-20 57 18235229 ns/op 61030492 B/op 208 allocs/op
BenchmarkBackScanner/16_MiB
BenchmarkBackScanner/16_MiB-20 57 19343441 ns/op 61030499 B/op 208 allocs/op
BenchmarkBackScannerRealFile
BenchmarkBackScannerRealFile/2_KiB
BenchmarkBackScannerRealFile/2_KiB-10 21 51796773 ns/op 619 B/op 1 allocs/op
BenchmarkBackScannerRealFile/4_KiB
BenchmarkBackScannerRealFile/4_KiB-10 36 32081281 ns/op 699 B/op 1 allocs/op
BenchmarkBackScannerRealFile/8_KiB
BenchmarkBackScannerRealFile/8_KiB-10 57 22155619 ns/op 847 B/op 1 allocs/op
BenchmarkBackScannerRealFile/16_KiB
BenchmarkBackScannerRealFile/16_KiB-10 62 21323125 ns/op 1449 B/op 1 allocs/op
BenchmarkBackScannerRealFile/32_KiB
BenchmarkBackScannerRealFile/32_KiB-10 63 17534883 ns/op 2729 B/op 1 allocs/op
BenchmarkBackScannerRealFile/64_KiB
BenchmarkBackScannerRealFile/64_KiB-10 73 17877029 ns/op 4617 B/op 1 allocs/op
BenchmarkBackScannerRealFile/128_KiB
BenchmarkBackScannerRealFile/128_KiB-10 75 17797267 ns/op 8866 B/op 1 allocs/op
BenchmarkBackScannerRealFile/256_KiB
BenchmarkBackScannerRealFile/256_KiB-10 67 16732108 ns/op 19691 B/op 1 allocs/op
BenchmarkBackScannerRealFile/512_KiB
BenchmarkBackScannerRealFile/512_KiB-10 70 17121683 ns/op 37577 B/op 1 allocs/op
BenchmarkBackScannerRealFile/1_MiB
BenchmarkBackScannerRealFile/1_MiB-10 51 19615791 ns/op 102930 B/op 1 allocs/op
BenchmarkBackScannerRealFile/2_MiB
BenchmarkBackScannerRealFile/2_MiB-10 26 41744928 ns/op 77595287 B/op 57 allocs/op
BenchmarkBackScannerRealFile/4_MiB
BenchmarkBackScannerRealFile/4_MiB-10 22 48081521 ns/op 79692224 B/op 49 allocs/op
*/

View File

@@ -1,6 +1,7 @@
package accesslog
import (
"net/http"
"time"
"github.com/yusing/godoxy/internal/serialization"
@@ -9,16 +10,15 @@ import (
type (
ConfigBase struct {
B int `json:"buffer_size"` // Deprecated: buffer size is adjusted dynamically
Path string `json:"path"`
Path string `json:"path,omitempty"`
Stdout bool `json:"stdout"`
Retention *Retention `json:"retention" aliases:"keep"`
RotateInterval time.Duration `json:"rotate_interval,omitempty" swaggertype:"primitive,integer"`
}
} // @name AccessLoggerConfigBase
ACLLoggerConfig struct {
ConfigBase
LogAllowed bool `json:"log_allowed"`
}
} // @name ACLLoggerConfig
RequestLoggerConfig struct {
ConfigBase
Format Format `json:"format" validate:"oneof=common combined json"`
@@ -32,21 +32,21 @@ type (
}
AnyConfig interface {
ToConfig() *Config
Writers() ([]Writer, error)
Writers() ([]File, error)
}
Format string
Filters struct {
StatusCodes LogFilter[*StatusCodeRange] `json:"status_codes"`
Method LogFilter[HTTPMethod] `json:"method"`
Host LogFilter[Host] `json:"host"`
Headers LogFilter[*HTTPHeader] `json:"headers"` // header exists or header == value
CIDR LogFilter[*CIDR] `json:"cidr"`
StatusCodes LogFilter[*StatusCodeRange] `json:"status_codes,omitzero"`
Method LogFilter[HTTPMethod] `json:"method,omitzero"`
Host LogFilter[Host] `json:"host,omitzero"`
Headers LogFilter[*HTTPHeader] `json:"headers,omitzero"` // header exists or header == value
CIDR LogFilter[*CIDR] `json:"cidr,omitzero"`
}
Fields struct {
Headers FieldConfig `json:"headers" aliases:"header"`
Query FieldConfig `json:"query" aliases:"queries"`
Cookies FieldConfig `json:"cookies" aliases:"cookie"`
Headers FieldConfig `json:"headers,omitzero" aliases:"header"`
Query FieldConfig `json:"query,omitzero" aliases:"queries"`
Cookies FieldConfig `json:"cookies,omitzero" aliases:"cookie"`
}
)
@@ -66,17 +66,17 @@ func (cfg *ConfigBase) Validate() gperr.Error {
}
// Writers returns a list of writers for the config.
func (cfg *ConfigBase) Writers() ([]Writer, error) {
writers := make([]Writer, 0, 2)
func (cfg *ConfigBase) Writers() ([]File, error) {
writers := make([]File, 0, 2)
if cfg.Path != "" {
io, err := NewFileIO(cfg.Path)
f, err := OpenFile(cfg.Path)
if err != nil {
return nil, err
}
writers = append(writers, io)
writers = append(writers, f)
}
if cfg.Stdout {
writers = append(writers, NewStdout())
writers = append(writers, stdout)
}
return writers, nil
}
@@ -95,6 +95,16 @@ func (cfg *RequestLoggerConfig) ToConfig() *Config {
}
}
func (cfg *Config) ShouldLogRequest(req *http.Request, res *http.Response) bool {
if cfg.req == nil {
return true
}
return cfg.req.Filters.StatusCodes.CheckKeep(req, res) &&
cfg.req.Filters.Method.CheckKeep(req, res) &&
cfg.req.Filters.Headers.CheckKeep(req, res) &&
cfg.req.Filters.CIDR.CheckKeep(req, res)
}
func DefaultRequestLoggerConfig() *RequestLoggerConfig {
return &RequestLoggerConfig{
ConfigBase: ConfigBase{

View File

@@ -0,0 +1,73 @@
package accesslog
import (
"net/http"
"os"
"github.com/rs/zerolog"
maxmind "github.com/yusing/godoxy/internal/maxmind/types"
)
type ConsoleLogger struct {
cfg *Config
formatter ConsoleFormatter
}
var stdoutLogger = func() *zerolog.Logger {
l := zerolog.New(zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) {
w.Out = os.Stdout
w.TimeFormat = zerolog.TimeFieldFormat
w.FieldsOrder = []string{
"uri", "protocol", "type", "size",
"useragent", "query", "headers", "cookies",
"error", "iso_code", "time_zone"}
})).With().Str("level", zerolog.InfoLevel.String()).Timestamp().Logger()
return &l
}()
// placeholder for console logger
var stdout File = &sharedFileHandle{}
func NewConsoleLogger(cfg *Config) AccessLogger {
if cfg == nil {
panic("accesslog: NewConsoleLogger called with nil config")
}
l := &ConsoleLogger{
cfg: cfg,
}
if cfg.req != nil {
l.formatter = ConsoleFormatter{cfg: &cfg.req.Fields}
}
return l
}
func (l *ConsoleLogger) Config() *Config {
return l.cfg
}
func (l *ConsoleLogger) LogRequest(req *http.Request, res *http.Response) {
if !l.cfg.ShouldLogRequest(req, res) {
return
}
l.formatter.LogRequestZeroLog(stdoutLogger, req, res)
}
func (l *ConsoleLogger) LogError(req *http.Request, err error) {
log := stdoutLogger.With().Err(err).Logger()
l.formatter.LogRequestZeroLog(&log, req, internalErrorResponse)
}
func (l *ConsoleLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
ConsoleACLFormatter{}.LogACLZeroLog(stdoutLogger, info, blocked)
}
func (l *ConsoleLogger) Flush() {
// No-op for console logger
}
func (l *ConsoleLogger) Close() error {
// No-op for console logger
return nil
}

View File

@@ -20,25 +20,20 @@ import (
)
type (
AccessLogger interface {
Log(req *http.Request, res *http.Response)
LogError(req *http.Request, err error)
LogACL(info *maxmind.IPInfo, blocked bool)
Config() *Config
Flush()
Close() error
File interface {
io.WriteCloser
supportRotate
Name() string
}
accessLogger struct {
fileAccessLogger struct {
task *task.Task
cfg *Config
writer BufferedWriter
supportRotate SupportRotate
writeLock *sync.Mutex
closed bool
writer BufferedWriter
file File
writeLock *sync.Mutex
closed bool
writeCount int64
bufSize int
@@ -48,32 +43,7 @@ type (
logger zerolog.Logger
RequestFormatter
ACLFormatter
}
Writer interface {
io.WriteCloser
ShouldBeBuffered() bool
Name() string // file name or path
}
SupportRotate interface {
io.Writer
supportRotate
Name() string
}
AccessLogRotater interface {
Rotate(result *RotateResult) (rotated bool, err error)
}
RequestFormatter interface {
// AppendRequestLog appends a log line to line with or without a trailing newline
AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte
}
ACLFormatter interface {
// AppendACLLog appends a log line to line with or without a trailing newline
AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte
ACLLogFormatter
}
)
@@ -96,112 +66,87 @@ const (
var bytesPool = synk.GetUnsizedBytesPool()
var sizedPool = synk.GetSizedBytesPool()
func NewAccessLogger(parent task.Parent, cfg AnyConfig) (AccessLogger, error) {
writers, err := cfg.Writers()
if err != nil {
return nil, err
}
return NewMultiAccessLogger(parent, cfg, writers), nil
}
func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) AccessLogger {
return NewAccessLoggerWithIO(parent, NewMockFile(true), cfg)
}
func NewAccessLoggerWithIO(parent task.Parent, writer Writer, anyCfg AnyConfig) AccessLogger {
func NewFileAccessLogger(parent task.Parent, file File, anyCfg AnyConfig) AccessLogger {
cfg := anyCfg.ToConfig()
if cfg.RotateInterval == 0 {
cfg.RotateInterval = defaultRotateInterval
}
l := &accessLogger{
task: parent.Subtask("accesslog."+writer.Name(), true),
name := file.Name()
l := &fileAccessLogger{
task: parent.Subtask("accesslog."+name, true),
cfg: cfg,
bufSize: InitialBufferSize,
errRateLimiter: rate.NewLimiter(rate.Every(errRateLimit), errBurst),
logger: log.With().Str("file", writer.Name()).Logger(),
logger: log.With().Str("file", name).Logger(),
}
l.writeLock, _ = writerLocks.LoadOrStore(writer.Name(), &sync.Mutex{})
l.writeLock, _ = writerLocks.LoadOrStore(name, &sync.Mutex{})
if writer.ShouldBeBuffered() {
l.writer = ioutils.NewBufferedWriter(writer, InitialBufferSize)
} else {
l.writer = NewUnbufferedWriter(writer)
}
if supportRotate, ok := writer.(SupportRotate); ok {
l.supportRotate = supportRotate
}
l.writer = ioutils.NewBufferedWriter(file, InitialBufferSize)
l.file = file
if cfg.req != nil {
fmt := CommonFormatter{cfg: &cfg.req.Fields}
switch cfg.req.Format {
case FormatCommon:
l.RequestFormatter = &fmt
l.RequestFormatter = CommonFormatter{cfg: &cfg.req.Fields}
case FormatCombined:
l.RequestFormatter = &CombinedFormatter{fmt}
l.RequestFormatter = CombinedFormatter{CommonFormatter{cfg: &cfg.req.Fields}}
case FormatJSON:
l.RequestFormatter = &JSONFormatter{fmt}
l.RequestFormatter = JSONFormatter{cfg: &cfg.req.Fields}
default: // should not happen, validation has done by validate tags
panic("invalid access log format")
}
} else {
l.ACLFormatter = ACLLogFormatter{}
}
go l.start()
return l
}
func (l *accessLogger) Config() *Config {
func (l *fileAccessLogger) Config() *Config {
return l.cfg
}
func (l *accessLogger) shouldLog(req *http.Request, res *http.Response) bool {
if !l.cfg.req.Filters.StatusCodes.CheckKeep(req, res) ||
!l.cfg.req.Filters.Method.CheckKeep(req, res) ||
!l.cfg.req.Filters.Headers.CheckKeep(req, res) ||
!l.cfg.req.Filters.CIDR.CheckKeep(req, res) {
return false
}
return true
}
func (l *accessLogger) Log(req *http.Request, res *http.Response) {
if !l.shouldLog(req, res) {
func (l *fileAccessLogger) LogRequest(req *http.Request, res *http.Response) {
if !l.cfg.ShouldLogRequest(req, res) {
return
}
line := bytesPool.Get()
line = l.AppendRequestLog(line, req, res)
if line[len(line)-1] != '\n' {
line = append(line, '\n')
line := bytesPool.GetBuffer()
defer bytesPool.PutBuffer(line)
l.AppendRequestLog(line, req, res)
// line is never empty
if line.Bytes()[line.Len()-1] != '\n' {
line.WriteByte('\n')
}
l.write(line)
bytesPool.Put(line)
l.write(line.Bytes())
}
func (l *accessLogger) LogError(req *http.Request, err error) {
l.Log(req, &http.Response{StatusCode: http.StatusInternalServerError, Status: err.Error()})
var internalErrorResponse = &http.Response{
StatusCode: http.StatusInternalServerError,
Status: http.StatusText(http.StatusInternalServerError),
}
func (l *accessLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
line := bytesPool.Get()
line = l.AppendACLLog(line, info, blocked)
if line[len(line)-1] != '\n' {
line = append(line, '\n')
func (l *fileAccessLogger) LogError(req *http.Request, err error) {
l.LogRequest(req, internalErrorResponse)
}
func (l *fileAccessLogger) LogACL(info *maxmind.IPInfo, blocked bool) {
line := bytesPool.GetBuffer()
defer bytesPool.PutBuffer(line)
l.AppendACLLog(line, info, blocked)
// line is never empty
if line.Bytes()[line.Len()-1] != '\n' {
line.WriteByte('\n')
}
l.write(line)
bytesPool.Put(line)
l.write(line.Bytes())
}
func (l *accessLogger) ShouldRotate() bool {
return l.supportRotate != nil && l.cfg.Retention.IsValid()
func (l *fileAccessLogger) ShouldRotate() bool {
return l.cfg.Retention.IsValid()
}
func (l *accessLogger) Rotate(result *RotateResult) (rotated bool, err error) {
func (l *fileAccessLogger) Rotate(result *RotateResult) (rotated bool, err error) {
if !l.ShouldRotate() {
return false, nil
}
@@ -210,11 +155,11 @@ func (l *accessLogger) Rotate(result *RotateResult) (rotated bool, err error) {
l.writeLock.Lock()
defer l.writeLock.Unlock()
rotated, err = rotateLogFile(l.supportRotate, l.cfg.Retention, result)
rotated, err = rotateLogFile(l.file, l.cfg.Retention, result)
return
}
func (l *accessLogger) handleErr(err error) {
func (l *fileAccessLogger) handleErr(err error) {
if l.errRateLimiter.Allow() {
gperr.LogError("failed to write access log", err, &l.logger)
} else {
@@ -223,7 +168,7 @@ func (l *accessLogger) handleErr(err error) {
}
}
func (l *accessLogger) start() {
func (l *fileAccessLogger) start() {
defer func() {
l.Flush()
l.Close()
@@ -259,7 +204,7 @@ func (l *accessLogger) start() {
}
}
func (l *accessLogger) Close() error {
func (l *fileAccessLogger) Close() error {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -270,7 +215,7 @@ func (l *accessLogger) Close() error {
return l.writer.Close()
}
func (l *accessLogger) Flush() {
func (l *fileAccessLogger) Flush() {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -279,7 +224,7 @@ func (l *accessLogger) Flush() {
l.writer.Flush()
}
func (l *accessLogger) write(data []byte) {
func (l *fileAccessLogger) write(data []byte) {
l.writeLock.Lock()
defer l.writeLock.Unlock()
if l.closed {
@@ -294,7 +239,7 @@ func (l *accessLogger) write(data []byte) {
atomic.AddInt64(&l.writeCount, int64(n))
}
func (l *accessLogger) adjustBuffer() {
func (l *fileAccessLogger) adjustBuffer() {
wps := int(atomic.SwapInt64(&l.writeCount, 0)) / int(bufferAdjustInterval.Seconds())
origBufSize := l.bufSize
newBufSize := origBufSize

View File

@@ -1,6 +1,7 @@
package accesslog_test
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
@@ -53,13 +54,13 @@ var (
)
func fmtLog(cfg *RequestLoggerConfig) (ts string, line string) {
buf := make([]byte, 0, 1024)
buf := bytes.NewBuffer(make([]byte, 0, 1024))
t := time.Now()
logger := NewMockAccessLogger(testTask, cfg)
mockable.MockTimeNow(t)
buf = logger.(RequestFormatter).AppendRequestLog(buf, req, resp)
return t.Format(LogTimeFormat), string(buf)
logger.(RequestFormatter).AppendRequestLog(buf, req, resp)
return t.Format(LogTimeFormat), buf.String()
}
func TestAccessLoggerCommon(t *testing.T) {
@@ -141,9 +142,6 @@ func TestAccessLoggerJSON(t *testing.T) {
expect.Equal(t, entry.UserAgent, ua)
expect.Equal(t, len(entry.Headers), 0)
expect.Equal(t, len(entry.Cookies), 0)
if status >= 400 {
expect.Equal(t, entry.Error, http.StatusText(status))
}
}
func BenchmarkAccessLoggerJSON(b *testing.B) {
@@ -152,7 +150,7 @@ func BenchmarkAccessLoggerJSON(b *testing.B) {
logger := NewMockAccessLogger(testTask, config)
b.ResetTimer()
for b.Loop() {
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
}
@@ -162,6 +160,6 @@ func BenchmarkAccessLoggerCombined(b *testing.B) {
logger := NewMockAccessLogger(testTask, config)
b.ResetTimer()
for b.Loop() {
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
}

View File

@@ -16,9 +16,11 @@ type (
CommonFormatter struct {
cfg *Fields
}
CombinedFormatter struct{ CommonFormatter }
JSONFormatter struct{ CommonFormatter }
ACLLogFormatter struct{}
CombinedFormatter struct{ CommonFormatter }
JSONFormatter struct{ cfg *Fields }
ConsoleFormatter struct{ cfg *Fields }
ACLLogFormatter struct{}
ConsoleACLFormatter struct{}
)
const LogTimeFormat = "02/Jan/2006:15:04:05 -0700"
@@ -30,24 +32,26 @@ func scheme(req *http.Request) string {
return "http"
}
func appendRequestURI(line []byte, req *http.Request, query iter.Seq2[string, []string]) []byte {
func appendRequestURI(line *bytes.Buffer, req *http.Request, query iter.Seq2[string, []string]) {
uri := req.URL.EscapedPath()
line = append(line, uri...)
line.WriteString(uri)
isFirst := true
for k, v := range query {
if isFirst {
line = append(line, '?')
line.WriteByte('?')
isFirst = false
} else {
line = append(line, '&')
line.WriteByte('&')
}
line = append(line, k...)
line = append(line, '=')
for _, v := range v {
line = append(line, v...)
for i, val := range v {
if i > 0 {
line.WriteByte('&')
}
line.WriteString(k)
line.WriteByte('=')
line.WriteString(val)
}
}
return line
}
func clientIP(req *http.Request) string {
@@ -58,50 +62,51 @@ func clientIP(req *http.Request) string {
return req.RemoteAddr
}
func (f *CommonFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
func (f CommonFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
query := f.cfg.Query.IterQuery(req.URL.Query())
line = append(line, req.Host...)
line = append(line, ' ')
line.WriteString(req.Host)
line.WriteByte(' ')
line = append(line, clientIP(req)...)
line = append(line, " - - ["...)
line.WriteString(clientIP(req))
line.WriteString(" - - [")
line = mockable.TimeNow().AppendFormat(line, LogTimeFormat)
line = append(line, `] "`...)
line.WriteString(mockable.TimeNow().Format(LogTimeFormat))
line.WriteString("] \"")
line = append(line, req.Method...)
line = append(line, ' ')
line = appendRequestURI(line, req, query)
line = append(line, ' ')
line = append(line, req.Proto...)
line = append(line, '"')
line = append(line, ' ')
line.WriteString(req.Method)
line.WriteByte(' ')
appendRequestURI(line, req, query)
line.WriteByte(' ')
line.WriteString(req.Proto)
line.WriteByte('"')
line.WriteByte(' ')
line = strconv.AppendInt(line, int64(res.StatusCode), 10)
line = append(line, ' ')
line = strconv.AppendInt(line, res.ContentLength, 10)
return line
line.WriteString(strconv.FormatInt(int64(res.StatusCode), 10))
line.WriteByte(' ')
line.WriteString(strconv.FormatInt(res.ContentLength, 10))
}
func (f *CombinedFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
line = f.CommonFormatter.AppendRequestLog(line, req, res)
line = append(line, " \""...)
line = append(line, req.Referer()...)
line = append(line, "\" \""...)
line = append(line, req.UserAgent()...)
line = append(line, '"')
return line
func (f CombinedFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
f.CommonFormatter.AppendRequestLog(line, req, res)
line.WriteString(" \"")
line.WriteString(req.Referer())
line.WriteString("\" \"")
line.WriteString(req.UserAgent())
line.WriteByte('"')
}
func (f *JSONFormatter) AppendRequestLog(line []byte, req *http.Request, res *http.Response) []byte {
func (f JSONFormatter) AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response) {
logger := zerolog.New(line)
f.LogRequestZeroLog(&logger, req, res)
}
func (f JSONFormatter) LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response) {
query := f.cfg.Query.ZerologQuery(req.URL.Query())
headers := f.cfg.Headers.ZerologHeaders(req.Header)
cookies := f.cfg.Cookies.ZerologCookies(req.Cookies())
contentType := res.Header.Get("Content-Type")
writer := bytes.NewBuffer(line)
logger := zerolog.New(writer)
event := logger.Info().
Str("time", mockable.TimeNow().Format(LogTimeFormat)).
Str("ip", clientIP(req)).
@@ -119,22 +124,33 @@ func (f *JSONFormatter) AppendRequestLog(line []byte, req *http.Request, res *ht
Object("headers", headers).
Object("cookies", cookies)
if res.StatusCode >= 400 {
if res.Status != "" {
event.Str("error", res.Status)
} else {
event.Str("error", http.StatusText(res.StatusCode))
}
}
// NOTE: zerolog will append a newline to the buffer
event.Send()
return writer.Bytes()
}
func (f ACLLogFormatter) AppendACLLog(line []byte, info *maxmind.IPInfo, blocked bool) []byte {
writer := bytes.NewBuffer(line)
logger := zerolog.New(writer)
func (f ConsoleFormatter) LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response) {
contentType := res.Header.Get("Content-Type")
var reqURI bytes.Buffer
appendRequestURI(&reqURI, req, f.cfg.Query.IterQuery(req.URL.Query()))
event := logger.Info().
Bytes("uri", reqURI.Bytes()).
Str("protocol", req.Proto).
Str("type", contentType).
Int64("size", res.ContentLength).
Str("useragent", req.UserAgent())
// NOTE: zerolog will append a newline to the buffer
event.Msgf("[%d] %s %s://%s from %s", res.StatusCode, req.Method, scheme(req), req.Host, clientIP(req))
}
func (f ACLLogFormatter) AppendACLLog(line *bytes.Buffer, info *maxmind.IPInfo, blocked bool) {
logger := zerolog.New(line)
f.LogACLZeroLog(&logger, info, blocked)
}
func (f ACLLogFormatter) LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool) {
event := logger.Info().
Str("time", mockable.TimeNow().Format(LogTimeFormat)).
Str("ip", info.Str)
@@ -144,10 +160,32 @@ func (f ACLLogFormatter) AppendACLLog(line []byte, info *maxmind.IPInfo, blocked
event.Str("action", "allow")
}
if info.City != nil {
event.Str("iso_code", info.City.Country.IsoCode)
event.Str("time_zone", info.City.Location.TimeZone)
if isoCode := info.City.Country.IsoCode; isoCode != "" {
event.Str("iso_code", isoCode)
}
if timeZone := info.City.Location.TimeZone; timeZone != "" {
event.Str("time_zone", timeZone)
}
}
// NOTE: zerolog will append a newline to the buffer
event.Send()
return writer.Bytes()
}
func (f ConsoleACLFormatter) LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool) {
event := logger.Info()
if info.City != nil {
if isoCode := info.City.Country.IsoCode; isoCode != "" {
event.Str("iso_code", isoCode)
}
if timeZone := info.City.Location.TimeZone; timeZone != "" {
event.Str("time_zone", timeZone)
}
}
action := "accepted"
if blocked {
action = "denied"
}
// NOTE: zerolog will append a newline to the buffer
event.Msgf("request %s from %s", action, info.Str)
}

View File

@@ -13,7 +13,7 @@ type MockFile struct {
buffered bool
}
var _ SupportRotate = (*MockFile)(nil)
var _ File = (*MockFile)(nil)
func NewMockFile(buffered bool) *MockFile {
f, _ := afero.TempFile(afero.NewMemMapFs(), "", "")
@@ -52,14 +52,9 @@ func (m *MockFile) NumLines() int {
return count
}
func (m *MockFile) Size() (int64, error) {
stat, _ := m.Stat()
return stat.Size(), nil
}
func (m *MockFile) MustSize() int64 {
size, _ := m.Size()
return size
stat, _ := m.Stat()
return stat.Size()
}
func (m *MockFile) Close() error {

View File

@@ -15,14 +15,21 @@ type MultiAccessLogger struct {
//
// If there is only one writer, it will return a single AccessLogger.
// Otherwise, it will return a MultiAccessLogger that writes to all the writers.
func NewMultiAccessLogger(parent task.Parent, cfg AnyConfig, writers []Writer) AccessLogger {
func NewMultiAccessLogger(parent task.Parent, cfg AnyConfig, writers []File) AccessLogger {
if len(writers) == 1 {
return NewAccessLoggerWithIO(parent, writers[0], cfg)
if writers[0] == stdout {
return NewConsoleLogger(cfg.ToConfig())
}
return NewFileAccessLogger(parent, writers[0], cfg)
}
accessLoggers := make([]AccessLogger, len(writers))
for i, writer := range writers {
accessLoggers[i] = NewAccessLoggerWithIO(parent, writer, cfg)
if writer == stdout {
accessLoggers[i] = NewConsoleLogger(cfg.ToConfig())
} else {
accessLoggers[i] = NewFileAccessLogger(parent, writer, cfg)
}
}
return &MultiAccessLogger{accessLoggers}
}
@@ -31,9 +38,9 @@ func (m *MultiAccessLogger) Config() *Config {
return m.accessLoggers[0].Config()
}
func (m *MultiAccessLogger) Log(req *http.Request, res *http.Response) {
func (m *MultiAccessLogger) LogRequest(req *http.Request, res *http.Response) {
for _, accessLogger := range m.accessLoggers {
accessLogger.Log(req, res)
accessLogger.LogRequest(req, res)
}
}

View File

@@ -16,7 +16,7 @@ func TestNewMultiAccessLogger(t *testing.T) {
testTask := task.RootTask("test", false)
cfg := DefaultRequestLoggerConfig()
writers := []Writer{
writers := []File{
NewMockFile(true),
NewMockFile(true),
}
@@ -30,7 +30,7 @@ func TestMultiAccessLoggerConfig(t *testing.T) {
cfg := DefaultRequestLoggerConfig()
cfg.Format = FormatCommon
writers := []Writer{
writers := []File{
NewMockFile(true),
NewMockFile(true),
}
@@ -48,7 +48,7 @@ func TestMultiAccessLoggerLog(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -68,7 +68,7 @@ func TestMultiAccessLoggerLog(t *testing.T) {
ContentLength: 100,
}
logger.Log(req, resp)
logger.LogRequest(req, resp)
logger.Flush()
expect.Equal(t, writer1.NumLines(), 1)
@@ -81,7 +81,7 @@ func TestMultiAccessLoggerLogError(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -107,7 +107,7 @@ func TestMultiAccessLoggerLogACL(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -129,7 +129,7 @@ func TestMultiAccessLoggerFlush(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -143,7 +143,7 @@ func TestMultiAccessLoggerFlush(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.Log(req, resp)
logger.LogRequest(req, resp)
logger.Flush()
expect.Equal(t, writer1.NumLines(), 1)
@@ -156,7 +156,7 @@ func TestMultiAccessLoggerClose(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -170,7 +170,7 @@ func TestMultiAccessLoggerMultipleLogs(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -185,7 +185,7 @@ func TestMultiAccessLoggerMultipleLogs(t *testing.T) {
resp := &http.Response{
StatusCode: http.StatusOK,
}
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
@@ -199,7 +199,7 @@ func TestMultiAccessLoggerSingleWriter(t *testing.T) {
cfg := DefaultRequestLoggerConfig()
writer := NewMockFile(true)
writers := []Writer{writer}
writers := []File{writer}
logger := NewMultiAccessLogger(testTask, cfg, writers)
expect.NotNil(t, logger)
@@ -214,7 +214,7 @@ func TestMultiAccessLoggerSingleWriter(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.Log(req, resp)
logger.LogRequest(req, resp)
logger.Flush()
expect.Equal(t, writer.NumLines(), 1)
@@ -226,7 +226,7 @@ func TestMultiAccessLoggerMixedOperations(t *testing.T) {
writer1 := NewMockFile(true)
writer2 := NewMockFile(true)
writers := []Writer{writer1, writer2}
writers := []File{writer1, writer2}
logger := NewMultiAccessLogger(testTask, cfg, writers)
@@ -241,7 +241,7 @@ func TestMultiAccessLoggerMixedOperations(t *testing.T) {
StatusCode: http.StatusOK,
}
logger.Log(req, resp)
logger.LogRequest(req, resp)
logger.Flush()
info := &maxmind.IPInfo{

View File

@@ -9,9 +9,9 @@ import (
)
type Retention struct {
Days uint64 `json:"days"`
Last uint64 `json:"last"`
KeepSize uint64 `json:"keep_size"`
Days uint64 `json:"days,omitempty"`
Last uint64 `json:"last,omitempty"`
KeepSize uint64 `json:"keep_size,omitempty"`
} // @name LogRetention
var (
@@ -20,7 +20,7 @@ var (
)
// see back_scanner_test.go#L210 for benchmarks
var defaultChunkSize = 256 * kilobyte
var defaultChunkSize = 32 * kilobyte
// Syntax:
//

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"time"
"github.com/rs/zerolog"
@@ -17,7 +18,7 @@ type supportRotate interface {
io.ReaderAt
io.WriterAt
Truncate(size int64) error
Size() (int64, error)
Stat() (fs.FileInfo, error)
}
type RotateResult struct {
@@ -93,10 +94,11 @@ func rotateLogFileByPolicy(file supportRotate, config *Retention, result *Rotate
return false, nil // should not happen
}
fileSize, err := file.Size()
stat, err := file.Stat()
if err != nil {
return false, err
}
fileSize := stat.Size()
// nothing to rotate, return the nothing
if fileSize == 0 {
@@ -104,6 +106,7 @@ func rotateLogFileByPolicy(file supportRotate, config *Retention, result *Rotate
}
s := NewBackScanner(file, fileSize, defaultChunkSize)
defer s.Release()
result.OriginalSize = fileSize
// Store the line positions and sizes we want to keep
@@ -216,16 +219,17 @@ func fileContentMove(file supportRotate, srcPos, dstPos int64, size int) error {
//
// Invalid lines will not be detected and included in the result.
func rotateLogFileBySize(file supportRotate, config *Retention, result *RotateResult) (rotated bool, err error) {
filesize, err := file.Size()
stat, err := file.Stat()
if err != nil {
return false, err
}
fileSize := stat.Size()
result.OriginalSize = filesize
result.OriginalSize = fileSize
keepSize := int64(config.KeepSize)
if keepSize >= filesize {
result.NumBytesKeep = filesize
if keepSize >= fileSize {
result.NumBytesKeep = fileSize
return false, nil
}
result.NumBytesKeep = keepSize

View File

@@ -57,13 +57,13 @@ func TestRotateKeepLast(t *testing.T) {
t.Run(string(format)+" keep last", func(t *testing.T) {
file := NewMockFile(true)
mockable.MockTimeNow(testTime)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
for range 10 {
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
@@ -87,14 +87,14 @@ func TestRotateKeepLast(t *testing.T) {
t.Run(string(format)+" keep days", func(t *testing.T) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -133,14 +133,14 @@ func TestRotateKeepFileSize(t *testing.T) {
for _, format := range ReqLoggerFormats {
t.Run(string(format)+" keep size no rotation", func(t *testing.T) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -165,14 +165,14 @@ func TestRotateKeepFileSize(t *testing.T) {
t.Run("keep size with rotation", func(t *testing.T) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: FormatJSON,
})
expect.Nil(t, logger.Config().Retention)
nLines := 100
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
expect.Equal(t, file.NumLines(), nLines)
@@ -199,14 +199,14 @@ func TestRotateSkipInvalidTime(t *testing.T) {
for _, format := range ReqLoggerFormats {
t.Run(string(format), func(t *testing.T) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
Format: format,
})
expect.Nil(t, logger.Config().Retention)
nLines := 10
for i := range nLines {
mockable.MockTimeNow(testTime.AddDate(0, 0, -nLines+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
logger.Flush()
n, err := file.Write([]byte("invalid time\n"))
@@ -241,7 +241,7 @@ func BenchmarkRotate(b *testing.B) {
for _, retention := range tests {
b.Run(fmt.Sprintf("retention_%s", retention.String()), func(b *testing.B) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
ConfigBase: ConfigBase{
Retention: retention,
},
@@ -249,7 +249,7 @@ func BenchmarkRotate(b *testing.B) {
})
for i := range 100 {
mockable.MockTimeNow(testTime.AddDate(0, 0, -100+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
}
logger.Flush()
content := file.Content()
@@ -275,7 +275,7 @@ func BenchmarkRotateWithInvalidTime(b *testing.B) {
for _, retention := range tests {
b.Run(fmt.Sprintf("retention_%s", retention.String()), func(b *testing.B) {
file := NewMockFile(true)
logger := NewAccessLoggerWithIO(task.RootTask("test", false), file, &RequestLoggerConfig{
logger := NewFileAccessLogger(task.RootTask("test", false), file, &RequestLoggerConfig{
ConfigBase: ConfigBase{
Retention: retention,
},
@@ -283,7 +283,7 @@ func BenchmarkRotateWithInvalidTime(b *testing.B) {
})
for i := range 10000 {
mockable.MockTimeNow(testTime.AddDate(0, 0, -10000+i+1))
logger.Log(req, resp)
logger.LogRequest(req, resp)
if i%10 == 0 {
_, _ = file.Write([]byte("invalid time\n"))
}

View File

@@ -11,8 +11,8 @@ import (
"github.com/yusing/goutils/synk"
)
type File struct {
f *os.File
type sharedFileHandle struct {
*os.File
// os.File.Name() may not equal to key of `openedFiles`.
// Store it for later delete from `openedFiles`.
@@ -22,18 +22,18 @@ type File struct {
}
var (
openedFiles = make(map[string]*File)
openedFiles = make(map[string]*sharedFileHandle)
openedFilesMu sync.Mutex
)
// NewFileIO creates a new file writer with cleaned path.
// OpenFile creates a new file writer with cleaned path.
//
// If the file is already opened, it will be returned.
func NewFileIO(path string) (Writer, error) {
func OpenFile(path string) (File, error) {
openedFilesMu.Lock()
defer openedFilesMu.Unlock()
var file *File
var file *sharedFileHandle
var err error
// make it absolute path, so that we can use it as key of `openedFiles` and shared lock
@@ -53,65 +53,38 @@ func NewFileIO(path string) (Writer, error) {
return nil, fmt.Errorf("access log open error: %w", err)
}
if _, err := f.Seek(0, io.SeekEnd); err != nil {
f.Close()
return nil, fmt.Errorf("access log seek error: %w", err)
}
file = &File{f: f, path: path, refCount: synk.NewRefCounter()}
file = &sharedFileHandle{File: f, path: path, refCount: synk.NewRefCounter()}
openedFiles[path] = file
log.Debug().Str("path", path).Msg("file opened")
go file.closeOnZero()
return file, nil
}
// Name returns the absolute path of the file.
func (f *File) Name() string {
func (f *sharedFileHandle) Name() string {
return f.path
}
func (f *File) ShouldBeBuffered() bool {
return true
}
func (f *File) Write(p []byte) (n int, err error) {
return f.f.Write(p)
}
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
return f.f.ReadAt(p, off)
}
func (f *File) WriteAt(p []byte, off int64) (n int, err error) {
return f.f.WriteAt(p, off)
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
return f.f.Seek(offset, whence)
}
func (f *File) Size() (int64, error) {
stat, err := f.f.Stat()
if err != nil {
return 0, err
}
return stat.Size(), nil
}
func (f *File) Truncate(size int64) error {
return f.f.Truncate(size)
}
func (f *File) Close() error {
func (f *sharedFileHandle) Close() error {
f.refCount.Sub()
return nil
}
func (f *File) closeOnZero() {
defer log.Debug().
Str("path", f.path).
Msg("access log closed")
func (f *sharedFileHandle) closeOnZero() {
defer log.Debug().Str("path", f.path).Msg("file closed")
<-f.refCount.Zero()
openedFilesMu.Lock()
delete(openedFiles, f.path)
openedFilesMu.Unlock()
f.f.Close()
err := f.File.Close()
if err != nil {
log.Error().Str("path", f.path).Err(err).Msg("failed to close file")
}
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/yusing/goutils/task"
"golang.org/x/sync/errgroup"
)
func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
@@ -18,7 +19,7 @@ func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
cfg.Path = "test.log"
loggerCount := runtime.GOMAXPROCS(0)
accessLogIOs := make([]Writer, loggerCount)
accessLogIOs := make([]File, loggerCount)
// make test log file
file, err := os.Create(cfg.Path)
@@ -28,16 +29,20 @@ func TestConcurrentFileLoggersShareSameAccessLogIO(t *testing.T) {
assert.NoError(t, os.Remove(cfg.Path))
})
var wg sync.WaitGroup
var errs errgroup.Group
for i := range loggerCount {
wg.Go(func() {
file, err := NewFileIO(cfg.Path)
assert.NoError(t, err)
errs.Go(func() error {
file, err := OpenFile(cfg.Path)
if err != nil {
return err
}
accessLogIOs[i] = file
return nil
})
}
wg.Wait()
err = errs.Wait()
assert.NoError(t, err)
firstIO := accessLogIOs[0]
for _, io := range accessLogIOs {
@@ -58,7 +63,7 @@ func TestConcurrentAccessLoggerLogAndFlush(t *testing.T) {
loggers := make([]AccessLogger, loggerCount)
for i := range loggerCount {
loggers[i] = NewAccessLoggerWithIO(parent, file, cfg)
loggers[i] = NewFileAccessLogger(parent, file, cfg)
}
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
@@ -87,7 +92,7 @@ func concurrentLog(logger AccessLogger, req *http.Request, resp *http.Response,
var wg sync.WaitGroup
for range n {
wg.Go(func() {
logger.Log(req, resp)
logger.LogRequest(req, resp)
if rand.IntN(2) == 0 {
logger.Flush()
}

View File

@@ -1,32 +0,0 @@
package accesslog
import (
"os"
"github.com/rs/zerolog"
"github.com/yusing/godoxy/internal/logging"
)
type Stdout struct {
logger zerolog.Logger
}
func NewStdout() Writer {
return &Stdout{logger: logging.NewLoggerWithFixedLevel(zerolog.InfoLevel, os.Stdout)}
}
func (s Stdout) Name() string {
return "stdout"
}
func (s Stdout) ShouldBeBuffered() bool {
return false
}
func (s Stdout) Write(p []byte) (n int, err error) {
return s.logger.Write(p)
}
func (s Stdout) Close() error {
return nil
}

View File

@@ -0,0 +1,55 @@
package accesslog
import (
"bytes"
"net/http"
"github.com/rs/zerolog"
maxmind "github.com/yusing/godoxy/internal/maxmind/types"
"github.com/yusing/goutils/task"
)
type (
AccessLogger interface {
LogRequest(req *http.Request, res *http.Response)
LogError(req *http.Request, err error)
LogACL(info *maxmind.IPInfo, blocked bool)
Config() *Config
Flush()
Close() error
}
AccessLogRotater interface {
Rotate(result *RotateResult) (rotated bool, err error)
}
RequestFormatter interface {
// AppendRequestLog appends a log line to line with or without a trailing newline
AppendRequestLog(line *bytes.Buffer, req *http.Request, res *http.Response)
}
RequestFormatterZeroLog interface {
// LogRequestZeroLog logs a request log to the logger
LogRequestZeroLog(logger *zerolog.Logger, req *http.Request, res *http.Response)
}
ACLFormatter interface {
// AppendACLLog appends a log line to line with or without a trailing newline
AppendACLLog(line *bytes.Buffer, info *maxmind.IPInfo, blocked bool)
// LogACLZeroLog logs an ACL log to the logger
LogACLZeroLog(logger *zerolog.Logger, info *maxmind.IPInfo, blocked bool)
}
)
func NewAccessLogger(parent task.Parent, cfg AnyConfig) (AccessLogger, error) {
writers, err := cfg.Writers()
if err != nil {
return nil, err
}
return NewMultiAccessLogger(parent, cfg, writers), nil
}
func NewMockAccessLogger(parent task.Parent, cfg *RequestLoggerConfig) AccessLogger {
return NewFileAccessLogger(parent, NewMockFile(true), cfg)
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/rs/zerolog"
"github.com/yusing/godoxy/internal/common"
"github.com/rs/zerolog/diode"
zerologlog "github.com/rs/zerolog/log"
)
@@ -68,7 +69,13 @@ func fmtMessage(msg string) string {
return sb.String()
}
func multiLevelWriter(out ...io.Writer) io.Writer {
func diodeMultiWriter(out ...io.Writer) io.Writer {
return diode.NewWriter(multiWriter(out...), 1024, 0, func(missed int) {
zerologlog.Warn().Int("missed", missed).Msg("missed log messages")
})
}
func multiWriter(out ...io.Writer) io.Writer {
if len(out) == 0 {
return os.Stdout
}
@@ -80,7 +87,7 @@ func multiLevelWriter(out ...io.Writer) io.Writer {
func NewLogger(out ...io.Writer) zerolog.Logger {
writer := zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) {
w.Out = multiLevelWriter(out...)
w.Out = diodeMultiWriter(out...)
w.TimeFormat = timeFmt
w.FormatMessage = func(msgI any) string { // pad spaces for each line
if msgI == nil {
@@ -92,9 +99,9 @@ func NewLogger(out ...io.Writer) zerolog.Logger {
return zerolog.New(writer).Level(level).With().Timestamp().Logger()
}
func NewLoggerWithFixedLevel(level zerolog.Level, out ...io.Writer) zerolog.Logger {
func NewLoggerWithFixedLevel(lvl zerolog.Level, out ...io.Writer) zerolog.Logger {
writer := zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) {
w.Out = multiLevelWriter(out...)
w.Out = diodeMultiWriter(out...)
w.TimeFormat = timeFmt
w.FormatMessage = func(msgI any) string { // pad spaces for each line
if msgI == nil {
@@ -103,5 +110,5 @@ func NewLoggerWithFixedLevel(level zerolog.Level, out ...io.Writer) zerolog.Logg
return fmtMessage(msgI.(string))
}
})
return zerolog.New(writer).Level(level).With().Str("level", level.String()).Timestamp().Logger()
return zerolog.New(writer).Level(level).With().Str("level", lvl.String()).Timestamp().Logger()
}

View File

@@ -2,42 +2,31 @@ package memlogger
import (
"bytes"
"context"
"io"
"slices"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/puzpuzpuz/xsync/v4"
apitypes "github.com/yusing/goutils/apitypes"
"github.com/yusing/goutils/http/websocket"
)
type logEntryRange struct {
Start, End int
}
type memLogger struct {
*bytes.Buffer
sync.RWMutex
buf *bytes.Buffer
bufLock sync.RWMutex
notifyLock sync.RWMutex
connChans *xsync.Map[chan *logEntryRange, struct{}]
listeners *xsync.Map[chan []byte, struct{}]
channelLock sync.RWMutex
listeners *xsync.Map[chan []byte, struct{}]
}
type MemLogger io.Writer
const (
maxMemLogSize = 16 * 1024
truncateSize = maxMemLogSize / 2
initialWriteChunkSize = 4 * 1024
writeTimeout = 10 * time.Second
maxMemLogSize = 16 * 1024
truncateSize = maxMemLogSize / 2
listenerChanBufSize = 64
)
var memLoggerInstance = &memLogger{
Buffer: bytes.NewBuffer(make([]byte, maxMemLogSize)),
connChans: xsync.NewMap[chan *logEntryRange, struct{}](),
buf: bytes.NewBuffer(make([]byte, 0, maxMemLogSize)),
listeners: xsync.NewMap[chan []byte, struct{}](),
}
@@ -45,10 +34,6 @@ func GetMemLogger() MemLogger {
return memLoggerInstance
}
func HandlerFunc() gin.HandlerFunc {
return memLoggerInstance.ServeHTTP
}
func Events() (<-chan []byte, func()) {
return memLoggerInstance.events()
}
@@ -56,136 +41,90 @@ func Events() (<-chan []byte, func()) {
// Write implements io.Writer.
func (m *memLogger) Write(p []byte) (n int, err error) {
n = len(p)
if n == 0 {
return 0, nil
}
m.truncateIfNeeded(n)
pos, err := m.writeBuf(p)
err = m.writeBuf(p)
if err != nil {
// not logging the error here, it will cause Run to be called again = infinite loop
return n, err
}
m.notifyWS(pos, n)
return n, err
}
func (m *memLogger) ServeHTTP(c *gin.Context) {
manager, err := websocket.NewManagerWithUpgrade(c)
if err != nil {
c.Error(apitypes.InternalServerError(err, "failed to create websocket manager"))
return
if m.listeners.Size() == 0 {
return n, nil
}
logCh := make(chan *logEntryRange)
m.connChans.Store(logCh, struct{}{})
defer func() {
manager.Close()
m.notifyLock.Lock()
m.connChans.Delete(logCh)
close(logCh)
m.notifyLock.Unlock()
}()
if err := m.wsInitial(manager); err != nil {
c.Error(apitypes.InternalServerError(err, "failed to send initial log"))
return
}
m.wsStreamLog(c.Request.Context(), manager, logCh)
msg := slices.Clone(p)
m.notifyWS(msg)
return n, nil
}
func (m *memLogger) truncateIfNeeded(n int) {
m.RLock()
needTruncate := m.Len()+n > maxMemLogSize
m.RUnlock()
m.bufLock.RLock()
needTruncate := m.buf.Len()+n > maxMemLogSize
m.bufLock.RUnlock()
if needTruncate {
m.Lock()
defer m.Unlock()
needTruncate = m.Len()+n > maxMemLogSize
if !needTruncate {
return
}
m.Truncate(truncateSize)
}
}
func (m *memLogger) notifyWS(pos, n int) {
if m.connChans.Size() == 0 && m.listeners.Size() == 0 {
if !needTruncate {
return
}
timeout := time.NewTimer(3 * time.Second)
defer timeout.Stop()
m.bufLock.Lock()
defer m.bufLock.Unlock()
m.notifyLock.RLock()
defer m.notifyLock.RUnlock()
m.connChans.Range(func(ch chan *logEntryRange, _ struct{}) bool {
select {
case ch <- &logEntryRange{pos, pos + n}:
return true
case <-timeout.C:
return false
}
})
if m.listeners.Size() > 0 {
msg := m.Bytes()[pos : pos+n]
m.listeners.Range(func(ch chan []byte, _ struct{}) bool {
select {
case <-timeout.C:
return false
case ch <- msg:
return true
}
})
discard := m.buf.Len() - truncateSize
if discard > 0 {
_ = m.buf.Next(discard)
}
}
func (m *memLogger) writeBuf(b []byte) (pos int, err error) {
m.Lock()
defer m.Unlock()
pos = m.Len()
_, err = m.Buffer.Write(b)
return pos, err
func (m *memLogger) notifyWS(msg []byte) {
if len(msg) == 0 || m.listeners.Size() == 0 {
return
}
m.channelLock.RLock()
defer m.channelLock.RUnlock()
for ch := range m.listeners.Range {
select {
case ch <- msg:
default:
}
}
}
func (m *memLogger) writeBuf(b []byte) (err error) {
m.bufLock.Lock()
defer m.bufLock.Unlock()
_, err = m.buf.Write(b)
if err != nil {
return err
}
if m.buf.Len() > maxMemLogSize {
discard := m.buf.Len() - maxMemLogSize
if discard > 0 {
_ = m.buf.Next(discard)
}
}
return nil
}
func (m *memLogger) events() (logs <-chan []byte, cancel func()) {
ch := make(chan []byte)
m.notifyLock.Lock()
defer m.notifyLock.Unlock()
ch := make(chan []byte, listenerChanBufSize)
m.channelLock.Lock()
defer m.channelLock.Unlock()
m.listeners.Store(ch, struct{}{})
return ch, func() {
m.notifyLock.Lock()
defer m.notifyLock.Unlock()
m.channelLock.Lock()
defer m.channelLock.Unlock()
m.listeners.Delete(ch)
close(ch)
}
}
func (m *memLogger) wsInitial(manager *websocket.Manager) error {
m.Lock()
defer m.Unlock()
return manager.WriteData(websocket.TextMessage, m.Bytes(), writeTimeout)
}
func (m *memLogger) wsStreamLog(ctx context.Context, manager *websocket.Manager, ch <-chan *logEntryRange) {
for {
select {
case <-ctx.Done():
return
case logRange := <-ch:
m.RLock()
msg := m.Bytes()[logRange.Start:logRange.End]
err := manager.WriteData(websocket.TextMessage, msg, writeTimeout)
m.RUnlock()
if err != nil {
return
}
}
}
}

View File

@@ -55,13 +55,10 @@ Individual route status at a point in time.
```go
type RouteAggregate struct {
Alias string `json:"alias"`
DisplayName string `json:"display_name"`
Uptime float32 `json:"uptime"`
Downtime float32 `json:"downtime"`
Idle float32 `json:"idle"`
AvgLatency float32 `json:"avg_latency"`
IsDocker bool `json:"is_docker"`
IsExcluded bool `json:"is_excluded"`
CurrentStatus types.HealthStatus `json:"current_status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"`
Statuses []Status `json:"statuses"`
}
@@ -312,7 +309,7 @@ const ws = new WebSocket(
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
data.data.forEach((route) => {
console.log(`${route.display_name}: ${route.uptime * 100}% uptime`);
console.log(`${route.alias}: ${route.uptime * 100}% uptime`);
});
};
```
@@ -336,7 +333,7 @@ _, agg := uptime.aggregateStatuses(entries, url.Values{
for _, route := range agg {
fmt.Printf("%s: %.1f%% uptime, %.1fms avg latency\n",
route.DisplayName, route.Uptime*100, route.AvgLatency)
route.Alias, route.Uptime*100, route.AvgLatency)
}
```
@@ -365,13 +362,10 @@ for _, route := range agg {
"data": [
{
"alias": "api-server",
"display_name": "API Server",
"uptime": 0.98,
"downtime": 0.02,
"idle": 0.0,
"avg_latency": 45.5,
"is_docker": true,
"is_excluded": false,
"current_status": "healthy",
"statuses": [
{ "status": "healthy", "latency": 45, "timestamp": 1704892800 }

View File

@@ -27,13 +27,10 @@ type (
RouteStatuses map[string][]Status // @name RouteStatuses
RouteAggregate struct {
Alias string `json:"alias"`
DisplayName string `json:"display_name"`
Uptime float32 `json:"uptime"`
Downtime float32 `json:"downtime"`
Idle float32 `json:"idle"`
AvgLatency float32 `json:"avg_latency"`
IsDocker bool `json:"is_docker"`
IsExcluded bool `json:"is_excluded"`
CurrentStatus types.HealthStatus `json:"current_status" swaggertype:"string" enums:"healthy,unhealthy,unknown,napping,starting"`
Statuses []Status `json:"statuses"`
} // @name RouteUptimeAggregate
@@ -129,18 +126,9 @@ func (rs RouteStatuses) aggregate(limit int, offset int) Aggregated {
statuses := rs[alias]
up, down, idle, latency := rs.calculateInfo(statuses)
displayName := alias
r, ok := routes.Get(alias)
if !ok {
// also search for excluded routes
r, ok = routes.Excluded.Get(alias)
}
if r != nil {
displayName = r.DisplayName()
}
status := types.StatusUnknown
if r != nil {
r, ok := routes.GetIncludeExcluded(alias)
if ok {
mon := r.HealthMonitor()
if mon != nil {
status = mon.Status()
@@ -149,15 +137,12 @@ func (rs RouteStatuses) aggregate(limit int, offset int) Aggregated {
result[i] = RouteAggregate{
Alias: alias,
DisplayName: displayName,
Uptime: up,
Downtime: down,
Idle: idle,
AvgLatency: latency,
CurrentStatus: status,
Statuses: statuses,
IsDocker: r != nil && r.IsDocker(),
IsExcluded: r == nil || r.ShouldExclude(),
}
}
return result

View File

@@ -33,7 +33,7 @@ type (
task *task.Task
pool pool.Pool[types.LoadBalancerServer]
pool *pool.Pool[types.LoadBalancerServer]
poolMu sync.Mutex
sumWeight int

View File

@@ -0,0 +1,203 @@
package middleware
import (
"bytes"
"context"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/yusing/godoxy/internal/route/routes"
httputils "github.com/yusing/goutils/http"
ioutils "github.com/yusing/goutils/io"
)
type (
crowdsecMiddleware struct {
CrowdsecMiddlewareOpts
}
CrowdsecMiddlewareOpts struct {
Route string `json:"route" validate:"required"` // route name (alias) or IP address
Port int `json:"port"` // port number (optional if using route name)
APIKey string `json:"api_key" validate:"required"` // API key for CrowdSec AppSec (mandatory)
Endpoint string `json:"endpoint"` // default: "/"
LogBlocked bool `json:"log_blocked"` // default: false
Timeout time.Duration `json:"timeout"` // default: 5 seconds
httpClient *http.Client
}
)
var Crowdsec = NewMiddleware[crowdsecMiddleware]()
func (m *crowdsecMiddleware) setup() {
m.CrowdsecMiddlewareOpts = CrowdsecMiddlewareOpts{
Route: "",
Port: 7422, // default port for CrowdSec AppSec
APIKey: "",
Endpoint: "/",
LogBlocked: false,
Timeout: 5 * time.Second,
}
}
func (m *crowdsecMiddleware) finalize() error {
if !strings.HasPrefix(m.Endpoint, "/") {
return fmt.Errorf("endpoint must start with /")
}
if m.Timeout == 0 {
m.Timeout = 5 * time.Second
}
m.httpClient = &http.Client{
Timeout: m.Timeout,
// do not follow redirects
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
return nil
}
// before implements RequestModifier.
func (m *crowdsecMiddleware) before(w http.ResponseWriter, r *http.Request) (proceed bool) {
// Build CrowdSec URL
crowdsecURL, err := m.buildCrowdSecURL()
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to build CrowdSec URL")
w.WriteHeader(http.StatusInternalServerError)
return false
}
// Determine HTTP method: GET for requests without body, POST for requests with body
method := http.MethodGet
var body io.Reader
if r.Body != nil && r.Body != http.NoBody {
method = http.MethodPost
// Read the body
bodyBytes, release, err := httputils.ReadAllRequestBody(r)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to read request body")
w.WriteHeader(http.StatusInternalServerError)
return false
}
r.Body = ioutils.NewHookReadCloser(io.NopCloser(bytes.NewReader(bodyBytes)), func() {
release(bodyBytes)
})
body = bytes.NewReader(bodyBytes)
}
ctx, cancel := context.WithTimeout(r.Context(), m.Timeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, method, crowdsecURL, body)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to create CrowdSec request")
w.WriteHeader(http.StatusInternalServerError)
return false
}
// Get remote IP
remoteIP, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
remoteIP = r.RemoteAddr
}
// Get HTTP version in integer form (10, 11, 20, etc.)
httpVersion := m.getHTTPVersion(r)
// Copy original headers
req.Header = r.Header.Clone()
// Overwrite CrowdSec required headers to prevent spoofing
req.Header.Set("X-Crowdsec-Appsec-Ip", remoteIP)
req.Header.Set("X-Crowdsec-Appsec-Uri", r.URL.RequestURI())
req.Header.Set("X-Crowdsec-Appsec-Host", r.Host)
req.Header.Set("X-Crowdsec-Appsec-Verb", r.Method)
req.Header.Set("X-Crowdsec-Appsec-Api-Key", m.APIKey)
req.Header.Set("X-Crowdsec-Appsec-User-Agent", r.UserAgent())
req.Header.Set("X-Crowdsec-Appsec-Http-Version", httpVersion)
// Make request to CrowdSec
resp, err := m.httpClient.Do(req)
if err != nil {
Crowdsec.LogError(r).Err(err).Msg("failed to connect to CrowdSec server")
w.WriteHeader(http.StatusInternalServerError)
return false
}
defer resp.Body.Close()
// Handle response codes
switch resp.StatusCode {
case http.StatusOK:
// Request is allowed
return true
case http.StatusForbidden:
// Request is blocked by CrowdSec
if m.LogBlocked {
Crowdsec.LogWarn(r).
Str("ip", remoteIP).
Msg("request blocked by CrowdSec")
}
w.WriteHeader(http.StatusForbidden)
return false
case http.StatusInternalServerError:
// CrowdSec server error
bodyBytes, release, err := httputils.ReadAllBody(resp)
if err == nil {
defer release(bodyBytes)
Crowdsec.LogError(r).
Str("crowdsec_response", string(bodyBytes)).
Msg("CrowdSec server error")
}
w.WriteHeader(http.StatusInternalServerError)
return false
default:
// Unexpected response code
Crowdsec.LogWarn(r).
Int("status_code", resp.StatusCode).
Msg("unexpected response from CrowdSec server")
w.WriteHeader(http.StatusInternalServerError)
return false
}
}
// buildCrowdSecURL constructs the CrowdSec server URL based on route or IP configuration
func (m *crowdsecMiddleware) buildCrowdSecURL() (string, error) {
// Try to get route first
if m.Route != "" {
if route, ok := routes.HTTP.Get(m.Route); ok {
// Using route name
targetURL := *route.TargetURL()
targetURL.Path = m.Endpoint
return targetURL.String(), nil
}
// If not found in routes, assume it's an IP address
if m.Port == 0 {
return "", fmt.Errorf("port must be specified when using IP address")
}
return fmt.Sprintf("http://%s%s", net.JoinHostPort(m.Route, strconv.Itoa(m.Port)), m.Endpoint), nil
}
return "", fmt.Errorf("route or IP address must be specified")
}
func (m *crowdsecMiddleware) getHTTPVersion(r *http.Request) string {
switch {
case r.ProtoMajor == 1 && r.ProtoMinor == 0:
return "10"
case r.ProtoMajor == 1 && r.ProtoMinor == 1:
return "11"
case r.ProtoMajor == 2:
return "20"
case r.ProtoMajor == 3:
return "30"
default:
return strconv.Itoa(r.ProtoMajor*10 + r.ProtoMinor)
}
}

View File

@@ -19,6 +19,7 @@ var allMiddlewares = map[string]*Middleware{
"oidc": OIDC,
"forwardauth": ForwardAuth,
"crowdsec": Crowdsec,
"request": ModifyRequest,
"modifyrequest": ModifyRequest,

View File

@@ -10,10 +10,15 @@ The proxmox package implements Proxmox API client management, node discovery, an
- Proxmox API client management
- Node discovery and pool management
- LXC container operations (start, stop, status)
- IP address retrieval for containers
- LXC container operations (start, stop, status, stats, command execution)
- IP address retrieval for containers (online and offline)
- Container stats streaming (like `docker stats`)
- Container command execution via VNC websocket
- Journalctl streaming for LXC containers
- Reverse resource lookup by IP, hostname, or alias
- Reverse node lookup by hostname, IP, or alias
- TLS configuration options
- Token-based authentication
- Token and username/password authentication
## Architecture
@@ -29,12 +34,14 @@ graph TD
G --> I[Start Container]
G --> J[Stop Container]
G --> K[Check Status]
G --> L[Execute Command]
G --> M[Stream Stats]
subgraph Node Pool
F --> L[Nodes Map]
L --> M[Node 1]
L --> N[Node 2]
L --> O[Node 3]
F --> N[Nodes Map]
N --> O[Node 1]
N --> P[Node 2]
N --> Q[Node 3]
end
```
@@ -45,8 +52,11 @@ graph TD
```go
type Config struct {
URL string `json:"url" validate:"required,url"`
TokenID string `json:"token_id" validate:"required"`
Secret strutils.Redacted `json:"secret" validate:"required"`
Username string `json:"username" validate:"required_without_all=TokenID Secret"`
Password strutils.Redacted `json:"password" validate:"required_without_all=TokenID Secret"`
Realm string `json:"realm"`
TokenID string `json:"token_id" validate:"required_without_all=Username Password"`
Secret strutils.Redacted `json:"secret" validate:"required_without_all=Username Password"`
NoTLSVerify bool `json:"no_tls_verify"`
client *Client
@@ -58,8 +68,16 @@ type Config struct {
```go
type Client struct {
*proxmox.Client
proxmox.Cluster
*proxmox.Cluster
Version *proxmox.Version
// id -> resource; id: lxc/<vmid> or qemu/<vmid>
resources map[string]*VMResource
resourcesMu sync.RWMutex
}
type VMResource struct {
*proxmox.ClusterResource
IPs []net.IP
}
```
@@ -69,12 +87,23 @@ type Client struct {
type Node struct {
name string
id string
client *proxmox.Client
client *Client
}
var Nodes = pool.New[*Node]("proxmox_nodes")
```
### NodeConfig
```go
type NodeConfig struct {
Node string `json:"node" validate:"required"`
VMID int `json:"vmid" validate:"required"`
VMName string `json:"vmname,omitempty"`
Service string `json:"service,omitempty"`
}
```
## Public API
### Configuration
@@ -87,11 +116,45 @@ func (c *Config) Init(ctx context.Context) gperr.Error
func (c *Config) Client() *Client
```
### Client Operations
```go
// UpdateClusterInfo fetches cluster info and discovers nodes.
func (c *Client) UpdateClusterInfo(ctx context.Context) error
// UpdateResources fetches VM resources and their IP addresses.
func (c *Client) UpdateResources(ctx context.Context) error
// GetResource gets a resource by kind and id.
func (c *Client) GetResource(kind string, id int) (*VMResource, error)
// ReverseLookupResource looks up a resource by IP, hostname, or alias.
func (c *Client) ReverseLookupResource(ip net.IP, hostname string, alias string) (*VMResource, error)
// ReverseLookupNode looks up a node by hostname, IP, or alias.
func (c *Client) ReverseLookupNode(hostname string, ip net.IP, alias string) string
// NumNodes returns the number of nodes in the cluster.
func (c *Client) NumNodes() int
```
### Node Operations
```go
// AvailableNodeNames returns all available node names.
// AvailableNodeNames returns all available node names as a comma-separated string.
func AvailableNodeNames() string
// Node.Client returns the Proxmox client.
func (n *Node) Client() *Client
// Node.Get performs a GET request on the node.
func (n *Node) Get(ctx context.Context, path string, v any) error
// NodeCommand executes a command on the node and streams output.
func (n *Node) NodeCommand(ctx context.Context, command string) (io.ReadCloser, error)
// NodeJournalctl streams journalctl output from the node.
func (n *Node) NodeJournalctl(ctx context.Context, service string, limit int) (io.ReadCloser, error)
```
## Usage
@@ -136,57 +199,83 @@ fmt.Printf("Available nodes: %s\n", names)
## LXC Operations
### Container Status
```go
type LXCStatus string
const (
LXCStatusRunning LXCStatus = "running"
LXCStatusStopped LXCStatus = "stopped"
LXCStatusSuspended LXCStatus = "suspended"
)
// LXCStatus returns the current status of a container.
func (node *Node) LXCStatus(ctx context.Context, vmid int) (LXCStatus, error)
// LXCIsRunning checks if a container is running.
func (node *Node) LXCIsRunning(ctx context.Context, vmid int) (bool, error)
// LXCIsStopped checks if a container is stopped.
func (node *Node) LXCIsStopped(ctx context.Context, vmid int) (bool, error)
// LXCName returns the name of a container.
func (node *Node) LXCName(ctx context.Context, vmid int) (string, error)
```
### Container Actions
```go
type LXCAction string
const (
LXCStart LXCAction = "start"
LXCShutdown LXCAction = "shutdown"
LXCSuspend LXCAction = "suspend"
LXCResume LXCAction = "resume"
LXCReboot LXCAction = "reboot"
)
// LXCAction performs an action on a container with task tracking.
func (node *Node) LXCAction(ctx context.Context, vmid int, action LXCAction) error
// LXCSetShutdownTimeout sets the shutdown timeout for a container.
func (node *Node) LXCSetShutdownTimeout(ctx context.Context, vmid int, timeout time.Duration) error
```
### Get Container IPs
```go
func getContainerIPs(ctx context.Context, node *proxmox.Node, vmid int) ([]net.IP, error) {
var ips []net.IP
// LXCGetIPs returns IP addresses of a container.
// First tries interfaces (online), then falls back to config (offline).
func (node *Node) LXCGetIPs(ctx context.Context, vmid int) ([]net.IP, error)
err := node.Get(ctx, "/lxc/"+strconv.Itoa(vmid)+"/config", &config)
if err != nil {
return nil, err
}
// LXCGetIPsFromInterfaces returns IP addresses from network interfaces.
// Returns empty if container is stopped.
func (node *Node) LXCGetIPsFromInterfaces(ctx context.Context, vmid int) ([]net.IP, error)
// Parse IP addresses from config
for _, ip := range config {
if ipNet := net.ParseCIDR(ip); ipNet != nil {
ips = append(ips, ipNet.IP)
}
}
return ips, nil
}
// LXCGetIPsFromConfig returns IP addresses from container config.
// Works for stopped/offline containers.
func (node *Node) LXCGetIPsFromConfig(ctx context.Context, vmid int) ([]net.IP, error)
```
### Check Container Status
### Container Stats (like `docker stats`)
```go
func (node *Node) LXCIsRunning(ctx context.Context, vmid int) (bool, error) {
var status struct {
Status string `json:"status"`
}
err := node.Get(ctx, "/lxc/"+strconv.Itoa(vmid)+"/status/current", &status)
if err != nil {
return false, err
}
return status.Status == "running", nil
}
// LXCStats streams container statistics.
// Format: "STATUS|CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O"
// Example: "running|31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25GiB/36GiB"
func (node *Node) LXCStats(ctx context.Context, vmid int, stream bool) (io.ReadCloser, error)
```
### Start Container
### Container Command Execution
```go
func (node *Node) LXCAction(ctx context.Context, vmid int, action string) error {
return node.Post(ctx,
"/lxc/"+strconv.Itoa(vmid)+"/status/"+action,
nil,
nil,
)
}
// LXCCommand executes a command inside a container and streams output.
func (node *Node) LXCCommand(ctx context.Context, vmid int, command string) (io.ReadCloser, error)
const LXCStart = "start"
// LXCJournalctl streams journalctl output for a container service.
func (node *Node) LXCJournalctl(ctx context.Context, vmid int, service string, limit int) (io.ReadCloser, error)
```
## Data Flow
@@ -218,6 +307,13 @@ sequenceDiagram
Node->>ProxmoxAPI: POST /lxc/{vmid}/status/start
ProxmoxAPI-->>Node: Success
Node-->>User: Done
User->>Node: LXCCommand(vmid, "df -h")
Node->>ProxmoxAPI: WebSocket /nodes/{node}/termproxy
ProxmoxAPI-->>Node: WebSocket connection
Node->>ProxmoxAPI: Send: "pct exec {vmid} -- df -h"
ProxmoxAPI-->>Node: Command output stream
Node-->>User: Stream output
```
## Configuration
@@ -228,11 +324,38 @@ sequenceDiagram
providers:
proxmox:
- url: https://proxmox.example.com:8006
# Token-based authentication (optional)
token_id: user@pam!token-name
secret: your-api-token-secret
# Username/Password authentication (required for journalctl (service logs) streaming)
# username: root
# password: your-password
# realm: pam
no_tls_verify: false
```
### Authentication Options
```go
// Token-based authentication (recommended)
opts := []proxmox.Option{
proxmox.WithAPIToken(c.TokenID, c.Secret.String()),
proxmox.WithHTTPClient(&http.Client{Transport: tr}),
}
// Username/Password authentication
opts := []proxmox.Option{
proxmox.WithCredentials(&proxmox.Credentials{
Username: c.Username,
Password: c.Password.String(),
Realm: c.Realm,
}),
proxmox.WithHTTPClient(&http.Client{Transport: tr}),
}
```
### TLS Configuration
```go
@@ -291,16 +414,16 @@ if r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
## Authentication
The package uses API tokens for authentication:
The package supports two authentication methods:
```go
opts := []proxmox.Option{
proxmox.WithAPIToken(c.TokenID, c.Secret.String()),
proxmox.WithHTTPClient(&http.Client{
Transport: tr,
}),
}
```
1. **API Token** (recommended): Uses `token_id` and `secret`
2. **Username/Password**: Uses `username`, `password`, and `realm`
Username/password authentication is required for:
- WebSocket connections (command execution, journalctl streaming)
Both methods support TLS verification options.
## Error Handling
@@ -312,11 +435,38 @@ if errors.Is(err, context.DeadlineExceeded) {
// Connection errors
return gperr.New("failed to fetch proxmox cluster info").With(err)
// Resource not found
return gperr.New("resource not found").With(ErrResourceNotFound)
// No session (for WebSocket operations)
return gperr.New("no session").With(ErrNoSession)
```
## Errors
```go
var (
ErrResourceNotFound = errors.New("resource not found")
ErrNoResources = errors.New("no resources")
ErrNoSession = fmt.Errorf("no session found, make sure username and password are set")
)
```
## Performance Considerations
- Cluster info fetched once on init
- Nodes cached in pool
- Per-operation API calls
- 3-second timeout for initial connection
- Resources updated in background loop (every 3 seconds by default)
- Concurrent IP resolution for all containers (limited to GOMAXPROCS \* 2)
- 5-second timeout for initial connection
- Per-operation API calls with 3-second timeout
- WebSocket connections properly closed to prevent goroutine leaks
## Constants
```go
const ResourcePollInterval = 3 * time.Second
```
The `ResourcePollInterval` constant controls how often resources are updated in the background loop.

View File

@@ -2,37 +2,196 @@ package proxmox
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"github.com/bytedance/sonic"
"github.com/luthermonson/go-proxmox"
"github.com/rs/zerolog/log"
"golang.org/x/sync/errgroup"
)
type Client struct {
*proxmox.Client
proxmox.Cluster
*proxmox.Cluster
Version *proxmox.Version
BaseURL *url.URL
// id -> resource; id: lxc/<vmid> or qemu/<vmid>
resources map[string]*VMResource
resourcesMu sync.RWMutex
}
type VMResource struct {
*proxmox.ClusterResource
IPs []net.IP
}
var (
ErrResourceNotFound = errors.New("resource not found")
ErrNoResources = errors.New("no resources")
)
func NewClient(baseUrl string, opts ...proxmox.Option) *Client {
return &Client{Client: proxmox.NewClient(baseUrl, opts...)}
return &Client{
Client: proxmox.NewClient(baseUrl, opts...),
resources: make(map[string]*VMResource),
}
}
func (c *Client) UpdateClusterInfo(ctx context.Context) (err error) {
baseURL, err := url.Parse(c.Client.GetBaseURL())
if err != nil {
return err
}
c.BaseURL = baseURL
c.Version, err = c.Client.Version(ctx)
if err != nil {
return err
}
// requires (/, Sys.Audit)
if err := c.Get(ctx, "/cluster/status", &c.Cluster); err != nil {
cluster, err := c.Client.Cluster(ctx)
if err != nil {
return err
}
c.Cluster = cluster
for _, node := range c.Cluster.Nodes {
Nodes.Add(&Node{name: node.Name, id: node.ID, client: c.Client})
Nodes.Add(NewNode(c, node.Name, node.ID))
}
if cluster.Name == "" && len(c.Cluster.Nodes) == 1 {
cluster.Name = c.Cluster.Nodes[0].Name
}
return nil
}
func (c *Client) UpdateResources(ctx context.Context) error {
if c.Cluster == nil {
return errors.New("cluster not initialized, call UpdateClusterInfo first")
}
resourcesSlice, err := c.Cluster.Resources(ctx, "vm")
if err != nil {
return err
}
vmResources := make([]*VMResource, len(resourcesSlice))
for i, resource := range resourcesSlice {
vmResources[i] = &VMResource{
ClusterResource: resource,
IPs: nil,
}
}
var errs errgroup.Group
errs.SetLimit(runtime.GOMAXPROCS(0) * 2)
for i, resource := range resourcesSlice {
vmResource := vmResources[i]
errs.Go(func() error {
node, ok := Nodes.Get(resource.Node)
if !ok {
return fmt.Errorf("node %s not found", resource.Node)
}
vmid, ok := strings.CutPrefix(resource.ID, "lxc/")
if !ok {
return nil // not a lxc resource
}
vmidInt, err := strconv.Atoi(vmid)
if err != nil {
return fmt.Errorf("invalid resource id %s: %w", resource.ID, err)
}
ips, err := node.LXCGetIPs(ctx, vmidInt)
if err != nil {
return fmt.Errorf("failed to get ips for resource %s: %w", resource.ID, err)
}
vmResource.IPs = ips
return nil
})
}
if err := errs.Wait(); err != nil {
return err
}
c.resourcesMu.Lock()
clear(c.resources)
for i, resource := range resourcesSlice {
c.resources[resource.ID] = vmResources[i]
}
c.resourcesMu.Unlock()
log.Debug().Str("cluster", c.Cluster.Name).Msgf("[proxmox] updated %d resources", len(c.resources))
return nil
}
// GetResource gets a resource by kind and id.
// kind: lxc or qemu
// id: <vmid>
func (c *Client) GetResource(kind string, id int) (*VMResource, error) {
c.resourcesMu.RLock()
defer c.resourcesMu.RUnlock()
resource, ok := c.resources[kind+"/"+strconv.Itoa(id)]
if !ok {
return nil, ErrResourceNotFound
}
return resource, nil
}
// ReverseLookupResource looks up a resource by ip address, hostname, alias or all of them
func (c *Client) ReverseLookupResource(ip net.IP, hostname string, alias string) (*VMResource, error) {
c.resourcesMu.RLock()
defer c.resourcesMu.RUnlock()
shouldCheckIP := ip != nil && !ip.IsLoopback() && !ip.IsUnspecified()
shouldCheckHostname := hostname != ""
shouldCheckAlias := alias != ""
if shouldCheckHostname {
hostname, _, _ = strings.Cut(hostname, ".")
}
for _, resource := range c.resources {
if shouldCheckIP && slices.ContainsFunc(resource.IPs, func(a net.IP) bool { return a.Equal(ip) }) {
return resource, nil
}
if shouldCheckHostname && resource.Name == hostname {
return resource, nil
}
if shouldCheckAlias && resource.Name == alias {
return resource, nil
}
}
return nil, ErrResourceNotFound
}
// ReverseLookupNode looks up a node by name or IP address.
// Returns the node name if found.
func (c *Client) ReverseLookupNode(hostname string, ip net.IP, alias string) string {
shouldCheckHostname := hostname != ""
shouldCheckIP := ip != nil && !ip.IsLoopback() && !ip.IsUnspecified()
shouldCheckAlias := alias != ""
if shouldCheckHostname {
hostname, _, _ = strings.Cut(hostname, ".")
}
for _, node := range c.Cluster.Nodes {
if shouldCheckHostname && node.Name == hostname {
return node.Name
}
if shouldCheckIP {
nodeIP := net.ParseIP(node.IP)
if nodeIP != nil && nodeIP.Equal(ip) {
return node.Name
}
}
if shouldCheckAlias && node.Name == alias {
return node.Name
}
}
return ""
}
// Key implements pool.Object
func (c *Client) Key() string {
return c.Cluster.ID

View File

@@ -0,0 +1,60 @@
package proxmox
import (
"fmt"
"strings"
)
// checkValidInput checks if the input contains invalid characters.
//
// The characters are: & | $ ; ' " ` $( ${ < >
// These characters are used in the command line to escape the input or to expand variables.
// We need to check if the input contains these characters and return an error if it does.
// This is to prevent command injection.
func checkValidInput(input string) error {
if strings.ContainsAny(input, "&|$;'\"`<>") {
return fmt.Errorf("input contains invalid characters: %q", input)
}
if strings.Contains(input, "$(") {
return fmt.Errorf("input contains $(: %q", input)
}
if strings.Contains(input, "${") {
return fmt.Errorf("input contains ${: %q", input)
}
return nil
}
func formatTail(files []string, limit int) (string, error) {
for _, file := range files {
if err := checkValidInput(file); err != nil {
return "", err
}
}
var command strings.Builder
command.WriteString("tail -f -q ")
for _, file := range files {
fmt.Fprintf(&command, " %q ", file)
}
if limit > 0 {
fmt.Fprintf(&command, " -n %d", limit)
}
// try --retry first, if it fails, try the command again
return fmt.Sprintf("sh -c '%s --retry 2>/dev/null || %s'", command.String(), command.String()), nil
}
func formatJournalctl(services []string, limit int) (string, error) {
for _, service := range services {
if err := checkValidInput(service); err != nil {
return "", err
}
}
var command strings.Builder
command.WriteString("journalctl -f")
for _, service := range services {
fmt.Fprintf(&command, " -u %q ", service)
}
if limit > 0 {
fmt.Fprintf(&command, " -n %d", limit)
}
return command.String(), nil
}

View File

@@ -4,11 +4,13 @@ import (
"context"
"crypto/tls"
"errors"
"math"
"net/http"
"strings"
"time"
"github.com/luthermonson/go-proxmox"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/net/gphttp"
gperr "github.com/yusing/goutils/errs"
strutils "github.com/yusing/goutils/strings"
@@ -17,14 +19,24 @@ import (
type Config struct {
URL string `json:"url" validate:"required,url"`
TokenID string `json:"token_id" validate:"required"`
Secret strutils.Redacted `json:"secret" validate:"required"`
Username string `json:"username" validate:"required_without_all=TokenID Secret"`
Password strutils.Redacted `json:"password" validate:"required_without_all=TokenID Secret"`
Realm string `json:"realm"` // default is "pam"
TokenID string `json:"token_id" validate:"required_without_all=Username Password"`
Secret strutils.Redacted `json:"secret" validate:"required_without_all=Username Password"`
NoTLSVerify bool `json:"no_tls_verify" yaml:"no_tls_verify,omitempty"`
client *Client
}
const ResourcePollInterval = 3 * time.Second
const SessionRefreshInterval = 1 * time.Minute
// NodeStatsPollInterval controls how often node stats are streamed when streaming is enabled.
const NodeStatsPollInterval = time.Second
func (c *Config) Client() *Client {
if c.client == nil {
panic("proxmox client accessed before init")
@@ -49,21 +61,105 @@ func (c *Config) Init(ctx context.Context) gperr.Error {
}
opts := []proxmox.Option{
proxmox.WithAPIToken(c.TokenID, c.Secret.String()),
proxmox.WithHTTPClient(&http.Client{
Transport: tr,
}),
}
useCredentials := false
if c.Username != "" && c.Password != "" {
if c.Realm == "" {
c.Realm = "pam"
}
opts = append(opts, proxmox.WithCredentials(&proxmox.Credentials{
Username: c.Username,
Password: c.Password.String(),
Realm: c.Realm,
}))
useCredentials = true
} else {
opts = append(opts, proxmox.WithAPIToken(c.TokenID, c.Secret.String()))
}
c.client = NewClient(c.URL, opts...)
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
initCtx, initCtxCancel := context.WithTimeout(ctx, 5*time.Second)
defer initCtxCancel()
if err := c.client.UpdateClusterInfo(ctx); err != nil {
if useCredentials {
err := c.client.CreateSession(initCtx)
if err != nil {
return gperr.New("failed to create session").With(err)
}
}
if err := c.client.UpdateClusterInfo(initCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return gperr.New("timeout fetching proxmox cluster info")
}
return gperr.New("failed to fetch proxmox cluster info").With(err)
}
{
reqCtx, reqCtxCancel := context.WithTimeout(ctx, ResourcePollInterval)
err := c.client.UpdateResources(reqCtx)
reqCtxCancel()
if err != nil {
log.Warn().Err(err).Str("cluster", c.client.Cluster.Name).Msg("[proxmox] failed to update resources")
}
}
go c.updateResourcesLoop(ctx)
go c.refreshSessionLoop(ctx)
return nil
}
func (c *Config) updateResourcesLoop(ctx context.Context) {
ticker := time.NewTicker(ResourcePollInterval)
defer ticker.Stop()
log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] starting resources update loop")
for {
select {
case <-ctx.Done():
log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] stopping resources update loop")
return
case <-ticker.C:
reqCtx, reqCtxCancel := context.WithTimeout(ctx, ResourcePollInterval)
err := c.client.UpdateResources(reqCtx)
reqCtxCancel()
if err != nil {
log.Error().Err(err).Str("cluster", c.client.Cluster.Name).Msg("[proxmox] failed to update resources")
}
}
}
}
func (c *Config) refreshSessionLoop(ctx context.Context) {
ticker := time.NewTicker(SessionRefreshInterval)
defer ticker.Stop()
log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] starting session refresh loop")
numRetries := 0
for {
select {
case <-ctx.Done():
log.Trace().Str("cluster", c.client.Cluster.Name).Msg("[proxmox] stopping session refresh loop")
return
case <-ticker.C:
reqCtx, reqCtxCancel := context.WithTimeout(ctx, SessionRefreshInterval)
err := c.client.RefreshSession(reqCtx)
reqCtxCancel()
if err != nil {
log.Error().Err(err).Str("cluster", c.client.Cluster.Name).Msg("[proxmox] failed to refresh session")
// exponential backoff
numRetries++
backoff := time.Duration(min(math.Pow(2, float64(numRetries)), 10)) * time.Second
ticker.Reset(backoff)
} else {
ticker.Reset(SessionRefreshInterval)
}
}
}
}

View File

@@ -47,7 +47,7 @@ func (n *Node) LXCAction(ctx context.Context, vmid int, action LXCAction) error
return err
}
task := proxmox.NewTask(upid, n.client)
task := proxmox.NewTask(upid, n.client.Client)
checkTicker := time.NewTicker(proxmoxTaskCheckInterval)
defer checkTicker.Stop()
for {
@@ -170,17 +170,17 @@ func getIPFromNet(s string) (res []net.IP) { // name:...,bridge:...,gw=..,ip=...
}
// LXCGetIPs returns the ip addresses of the container
// it first tries to get the ip addresses from the config
// if that fails, it gets the ip addresses from the interfaces
// it first tries to get the ip addresses from the interfaces
// if that fails, it gets the ip addresses from the config (offline containers)
func (n *Node) LXCGetIPs(ctx context.Context, vmid int) (res []net.IP, err error) {
ips, err := n.LXCGetIPsFromConfig(ctx, vmid)
ips, err := n.LXCGetIPsFromInterfaces(ctx, vmid)
if err != nil {
return nil, err
}
if len(ips) > 0 {
return ips, nil
}
ips, err = n.LXCGetIPsFromInterfaces(ctx, vmid)
ips, err = n.LXCGetIPsFromConfig(ctx, vmid)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,68 @@
package proxmox
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"github.com/luthermonson/go-proxmox"
)
var ErrNoSession = fmt.Errorf("no session found, make sure username and password are set")
// closeTransportConnections forces close idle HTTP connections to prevent goroutine leaks.
// This is needed because the go-proxmox library's TermWebSocket closer doesn't close
// the underlying HTTP/2 connections, leaving goroutines stuck in writeLoop/readLoop.
func closeTransportConnections(httpClient *http.Client) {
if tr, ok := httpClient.Transport.(*http.Transport); ok {
tr.CloseIdleConnections()
}
}
// LXCCommand connects to the Proxmox VNC websocket and streams command output.
// It returns an io.ReadCloser that streams the command output.
func (n *Node) LXCCommand(ctx context.Context, vmid int, command string) (io.ReadCloser, error) {
node := proxmox.NewNode(n.client.Client, n.name)
lxc, err := node.Container(ctx, vmid)
if err != nil {
return nil, fmt.Errorf("failed to get container: %w", err)
}
if lxc.Status != "running" {
return io.NopCloser(bytes.NewReader(fmt.Appendf(nil, "container %d is not running, status: %s\n", vmid, lxc.Status))), nil
}
return n.NodeCommand(ctx, fmt.Sprintf("pct exec %d -- %s", vmid, command))
}
// LXCJournalctl streams journalctl output for the given service.
//
// On non systemd systems, it will tail /var/log/messages as fallback.
//
// If services are not empty, it will be used to filter the output by service.
// If limit is greater than 0, it will be used to limit the number of lines of output.
func (n *Node) LXCJournalctl(ctx context.Context, vmid int, services []string, limit int) (io.ReadCloser, error) {
command, err := formatJournalctl(services, limit)
if err != nil {
return nil, err
}
if len(services) == 0 {
// add /var/log/messages fallback for non systemd systems
// in tail command, try --retry first, if it fails, try the command again
command = fmt.Sprintf("sh -c '%s 2>/dev/null || tail -f -q --retry /var/log/messages 2>/dev/null || tail -f -q /var/log/messages'", command)
}
return n.LXCCommand(ctx, vmid, command)
}
// LXCTail streams tail output for the given file.
//
// If limit is greater than 0, it will be used to limit the number of lines of output.
func (n *Node) LXCTail(ctx context.Context, vmid int, files []string, limit int) (io.ReadCloser, error) {
command, err := formatTail(files, limit)
if err != nil {
return nil, err
}
return n.LXCCommand(ctx, vmid, command)
}

View File

@@ -0,0 +1,171 @@
package proxmox
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"time"
)
// const statsScriptLocation = "/tmp/godoxy-stats.sh"
// const statsScript = `#!/bin/sh
// # LXCStats script, written by godoxy.
// printf "%s|%s|%s|%s|%s\n" \
// "$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1"%"}')" \
// "$(free -b | awk 'NR==2{printf "%.0f\n%.0f", $3, $2}' | numfmt --to=iec-i --suffix=B | paste -sd/)" \
// "$(free | awk 'NR==2{printf "%.2f%%", $3/$2*100}')" \
// "$(awk 'NR>2{r+=$2;t+=$10}END{printf "%.0f\n%.0f", r, t}' /proc/net/dev | numfmt --to=iec-i --suffix=B | paste -sd/)" \
// "$(awk '{r+=$6;w+=$10}END{printf "%.0f\n%.0f", r*512, w*512}' /proc/diskstats | numfmt --to=iec-i --suffix=B | paste -sd/)"`
// var statsScriptBase64 = base64.StdEncoding.EncodeToString([]byte(statsScript))
// var statsInitCommand = fmt.Sprintf("sh -c 'echo %s | base64 -d > %s && chmod +x %s'", statsScriptBase64, statsScriptLocation, statsScriptLocation)
// var statsStreamScript = fmt.Sprintf("watch -t -w -p -n1 '%s'", statsScriptLocation)
// var statsNonStreamScript = statsScriptLocation
// lxcStatsScriptInit initializes the stats script for the given container.
// func (n *Node) lxcStatsScriptInit(ctx context.Context, vmid int) error {
// reader, err := n.LXCCommand(ctx, vmid, statsInitCommand)
// if err != nil {
// return fmt.Errorf("failed to execute stats init command: %w", err)
// }
// reader.Close()
// return nil
// }
// LXCStats streams container stats, like docker stats.
//
// - format: "STATUS|CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O"
// - example: running|31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25GiB/36GiB
func (n *Node) LXCStats(ctx context.Context, vmid int, stream bool) (io.ReadCloser, error) {
if !stream {
resource, err := n.client.GetResource("lxc", vmid)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := writeLXCStatsLine(resource, &buf); err != nil {
return nil, err
}
return io.NopCloser(&buf), nil
}
// Validate the resource exists before returning a stream.
_, err := n.client.GetResource("lxc", vmid)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
interval := ResourcePollInterval
if interval <= 0 {
interval = time.Second
}
go func() {
writeSample := func() error {
resource, err := n.client.GetResource("lxc", vmid)
if err != nil {
return err
}
err = writeLXCStatsLine(resource, pw)
return err
}
// Match `watch` behavior: write immediately, then on each tick.
if err := writeSample(); err != nil {
_ = pw.CloseWithError(err)
return
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
_ = pw.CloseWithError(ctx.Err())
return
case <-ticker.C:
if err := writeSample(); err != nil {
_ = pw.CloseWithError(err)
return
}
}
}
}()
return pr, nil
}
func writeLXCStatsLine(resource *VMResource, w io.Writer) error {
cpu := fmt.Sprintf("%.1f%%", resource.CPU*100)
memUsage := formatIECBytes(resource.Mem)
memLimit := formatIECBytes(resource.MaxMem)
memPct := "0.00%"
if resource.MaxMem > 0 {
memPct = fmt.Sprintf("%.2f%%", float64(resource.Mem)/float64(resource.MaxMem)*100)
}
netIO := formatIECBytes(resource.NetIn) + "/" + formatIECBytes(resource.NetOut)
blockIO := formatIECBytes(resource.DiskRead) + "/" + formatIECBytes(resource.DiskWrite)
// Keep the format consistent with LXCStatsAlt / `statsScript` (newline terminated).
_, err := fmt.Fprintf(w, "%s|%s|%s/%s|%s|%s|%s\n", resource.Status, cpu, memUsage, memLimit, memPct, netIO, blockIO)
return err
}
// formatIECBytes formats a byte count using IEC binary prefixes (KiB, MiB, GiB, ...),
// similar to `numfmt --to=iec-i --suffix=B`.
func formatIECBytes(b uint64) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%dB", b)
}
prefixes := []string{"B", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei"}
val := float64(b)
exp := 0
for val >= unit && exp < len(prefixes)-1 {
val /= unit
exp++
}
// One decimal, trimming trailing ".0" to keep output compact (e.g. "10GiB").
s := fmt.Sprintf("%.1f", val)
s = strings.TrimSuffix(s, ".0")
if exp == 0 {
return s + "B"
}
return s + prefixes[exp] + "B"
}
// LXCStatsAlt streams container stats, like docker stats.
//
// - format: "CPU%%|MEM USAGE/LIMIT|MEM%%|NET I/O|BLOCK I/O"
// - example: 31.1%|9.6GiB/20GiB|48.87%|4.7GiB/3.3GiB|25TiB/36TiB
// func (n *Node) LXCStatsAlt(ctx context.Context, vmid int, stream bool) (io.ReadCloser, error) {
// // Initialize the stats script if it hasn't been initialized yet.
// initScriptErr, _ := n.statsScriptInitErrs.LoadOrCompute(vmid,
// func() (newValue error, cancel bool) {
// if err := n.lxcStatsScriptInit(ctx, vmid); err != nil {
// cancel = errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
// return err, cancel
// }
// return nil, false
// })
// if initScriptErr != nil {
// return nil, initScriptErr
// }
// if stream {
// return n.LXCCommand(ctx, vmid, statsStreamScript)
// }
// return n.LXCCommand(ctx, vmid, statsNonStreamScript)
// }

View File

@@ -6,18 +6,36 @@ import (
"strings"
"github.com/bytedance/sonic"
"github.com/luthermonson/go-proxmox"
"github.com/yusing/goutils/pool"
)
type NodeConfig struct {
Node string `json:"node"`
VMID *int `json:"vmid"` // unset: auto discover; explicit 0: node-level route; >0: lxc/qemu resource route
VMName string `json:"vmname,omitempty"`
Services []string `json:"services,omitempty" aliases:"service"`
Files []string `json:"files,omitempty" aliases:"file"`
} // @name ProxmoxNodeConfig
type Node struct {
name string
id string // likely node/<name>
client *proxmox.Client
client *Client
// statsScriptInitErrs *xsync.Map[int, error]
}
var Nodes = pool.New[*Node]("proxmox_nodes")
func NewNode(client *Client, name, id string) *Node {
return &Node{
name: name,
id: id,
client: client,
// statsScriptInitErrs: xsync.NewMap[int, error](xsync.WithGrowOnly()),
}
}
func AvailableNodeNames() string {
if Nodes.Size() == 0 {
return ""
@@ -38,6 +56,10 @@ func (n *Node) Name() string {
return n.name
}
func (n *Node) Client() *Client {
return n.client
}
func (n *Node) String() string {
return fmt.Sprintf("%s (%s)", n.name, n.id)
}

View File

@@ -0,0 +1,144 @@
package proxmox
import (
"bytes"
"context"
"fmt"
"io"
"github.com/gorilla/websocket"
"github.com/luthermonson/go-proxmox"
)
// NodeCommand connects to the Proxmox VNC websocket and streams command output.
// It returns an io.ReadCloser that streams the command output.
func (n *Node) NodeCommand(ctx context.Context, command string) (io.ReadCloser, error) {
if !n.client.HasSession() {
return nil, ErrNoSession
}
node := proxmox.NewNode(n.client.Client, n.name)
term, err := node.TermProxy(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get term proxy: %w", err)
}
send, recv, errs, closeWS, err := node.TermWebSocket(term)
if err != nil {
return nil, fmt.Errorf("failed to connect to term websocket: %w", err)
}
// Wrap the websocket closer to also close HTTP transport connections.
// This prevents goroutine leaks when streaming connections are interrupted.
httpClient := n.client.GetHTTPClient()
closeFn := func() error {
closeTransportConnections(httpClient)
return closeWS()
}
handleSend := func(data []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
case send <- data:
return nil
case err := <-errs:
return fmt.Errorf("failed to send: %w", err)
}
}
// Send command
cmd := []byte(command + "\n")
if err := handleSend(cmd); err != nil {
closeFn()
return nil, err
}
// Create a pipe to stream the websocket messages
pr, pw := io.Pipe()
// Command line without trailing newline for matching in output
cmdLine := cmd[:len(cmd)-1]
// Start a goroutine to read from websocket and write to pipe
go func() {
defer closeFn()
defer pw.Close()
seenCommand := false
shouldSkip := true
for {
select {
case <-ctx.Done():
_ = pw.CloseWithError(ctx.Err())
return
case msg := <-recv:
// skip the header message like
// Linux pve 6.17.4-1-pve #1 SMP PREEMPT_DYNAMIC PMX 6.17.4-1 (2025-12-03T15:42Z) x86_64
//
// The programs included with the Debian GNU/Linux system are free software;
// the exact distribution terms for each program are described in the
// individual files in /usr/share/doc/*/copyright.
//
// Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent
// permitted by applicable law.
//
// root@pve:~# pct exec 101 -- journalctl -u "sftpgo" -f
//
// send begins after the line above
if shouldSkip {
// First, check if this message contains our command echo
if !seenCommand && bytes.Contains(msg, cmdLine) {
seenCommand = true
}
// Only stop skipping after we've seen the command AND output markers
if seenCommand {
if bytes.Contains(msg, []byte("\x1b[H")) || // watch cursor home
bytes.Contains(msg, []byte("\x1b[?2004l")) { // bracket paste OFF (command ended)
shouldSkip = false
}
}
continue
}
if _, err := pw.Write(msg); err != nil {
return
}
case err := <-errs:
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
return
}
_ = pw.CloseWithError(err)
return
}
}
}
}()
return pr, nil
}
// NodeJournalctl streams journalctl output for the given service.
//
// If services are not empty, it will be used to filter the output by services.
// If limit is greater than 0, it will be used to limit the number of lines of output.
func (n *Node) NodeJournalctl(ctx context.Context, services []string, limit int) (io.ReadCloser, error) {
command, err := formatJournalctl(services, limit)
if err != nil {
return nil, err
}
return n.NodeCommand(ctx, command)
}
// NodeTail streams tail output for the given file.
//
// If limit is greater than 0, it will be used to limit the number of lines of output.
func (n *Node) NodeTail(ctx context.Context, files []string, limit int) (io.ReadCloser, error) {
command, err := formatTail(files, limit)
if err != nil {
return nil, err
}
return n.NodeCommand(ctx, command)
}

View File

@@ -0,0 +1,143 @@
package proxmox
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"strings"
"time"
)
type NodeStats struct {
KernelVersion string `json:"kernel_version"`
PVEVersion string `json:"pve_version"`
CPUUsage string `json:"cpu_usage"`
CPUModel string `json:"cpu_model"`
MemUsage string `json:"mem_usage"`
MemTotal string `json:"mem_total"`
MemPct string `json:"mem_pct"`
RootFSUsage string `json:"rootfs_usage"`
RootFSTotal string `json:"rootfs_total"`
RootFSPct string `json:"rootfs_pct"`
Uptime string `json:"uptime"`
LoadAvg1m string `json:"load_avg_1m"`
LoadAvg5m string `json:"load_avg_5m"`
LoadAvg15m string `json:"load_avg_15m"`
}
// NodeStats streams node stats, like docker stats.
func (n *Node) NodeStats(ctx context.Context, stream bool) (io.ReadCloser, error) {
if !stream {
var buf bytes.Buffer
if err := n.writeNodeStatsLine(ctx, &buf); err != nil {
return nil, err
}
return io.NopCloser(&buf), nil
}
pr, pw := io.Pipe()
go func() {
writeSample := func() error {
return n.writeNodeStatsLine(ctx, pw)
}
// Match `watch` behavior: write immediately, then on each tick.
if err := writeSample(); err != nil {
_ = pw.CloseWithError(err)
return
}
ticker := time.NewTicker(NodeStatsPollInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
_ = pw.CloseWithError(ctx.Err())
return
case <-ticker.C:
if err := writeSample(); err != nil {
_ = pw.CloseWithError(err)
return
}
}
}
}()
return pr, nil
}
func (n *Node) writeNodeStatsLine(ctx context.Context, w io.Writer) error {
// Fetch node status for CPU and memory metrics.
node, err := n.client.Node(ctx, n.name)
if err != nil {
return err
}
cpu := fmt.Sprintf("%.1f%%", node.CPU*100)
memUsage := formatIECBytes(node.Memory.Used)
memTotal := formatIECBytes(node.Memory.Total)
memPct := "0.00%"
if node.Memory.Total > 0 {
memPct = fmt.Sprintf("%.2f%%", float64(node.Memory.Used)/float64(node.Memory.Total)*100)
}
rootFSUsage := formatIECBytes(node.RootFS.Used)
rootFSTotal := formatIECBytes(node.RootFS.Total)
rootFSPct := "0.00%"
if node.RootFS.Total > 0 {
rootFSPct = fmt.Sprintf("%.2f%%", float64(node.RootFS.Used)/float64(node.RootFS.Total)*100)
}
uptime := formatDuration(node.Uptime)
if len(node.LoadAvg) != 3 {
return fmt.Errorf("unexpected load average length: %d, expected 3 (1m, 5m, 15m)", len(node.LoadAvg))
}
// Linux 6.17.4-1-pve #1 SMP PREEMPT_DYNAMIC PMX 6.17.4-1 (2025-12-03T15:42Z)
// => 6.17.4-1-pve #1 SMP PREEMPT_DYNAMIC PMX 6.17.4-1 (2025-12-03T15:42Z)
kversion, _ := strings.CutPrefix(node.Kversion, "Linux ")
// => 6.17.4-1-pve
kversion, _, _ = strings.Cut(kversion, " ")
nodeStats := NodeStats{
KernelVersion: kversion,
PVEVersion: node.PVEVersion,
CPUUsage: cpu,
CPUModel: node.CPUInfo.Model,
MemUsage: memUsage,
MemTotal: memTotal,
MemPct: memPct,
RootFSUsage: rootFSUsage,
RootFSTotal: rootFSTotal,
RootFSPct: rootFSPct,
Uptime: uptime,
LoadAvg1m: node.LoadAvg[0],
LoadAvg5m: node.LoadAvg[1],
LoadAvg15m: node.LoadAvg[2],
}
err = json.NewEncoder(w).Encode(nodeStats)
return err
}
// formatDuration formats uptime in seconds to a human-readable string.
func formatDuration(seconds uint64) string {
if seconds < 60 {
return fmt.Sprintf("%ds", seconds)
}
days := seconds / 86400
hours := (seconds % 86400) / 3600
mins := (seconds % 3600) / 60
if days > 0 {
return fmt.Sprintf("%dd%dh%dm", days, hours, mins)
}
if hours > 0 {
return fmt.Sprintf("%dh%dm", hours, mins)
}
return fmt.Sprintf("%dm", mins)
}

View File

@@ -143,8 +143,13 @@ func (s *FileServer) RootPath() string {
// ServeHTTP implements http.Handler.
func (s *FileServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
s.handler.ServeHTTP(w, req)
if s.accessLogger != nil {
s.accessLogger.Log(req, req.Response)
rec := accesslog.GetResponseRecorder(w)
w = rec
defer func() {
s.accessLogger.LogRequest(req, rec.Response())
accesslog.PutResponseRecorder(rec)
}()
}
s.handler.ServeHTTP(w, req)
}

View File

@@ -64,6 +64,8 @@ type (
AccessLog *accesslog.RequestLoggerConfig `json:"access_log,omitempty" extensions:"x-nullable"`
Agent string `json:"agent,omitempty"`
Proxmox *proxmox.NodeConfig `json:"proxmox,omitempty" extensions:"x-nullable"`
Idlewatcher *types.IdlewatcherConfig `json:"idlewatcher,omitempty" extensions:"x-nullable"`
Metadata `deserialize:"-"`
@@ -130,6 +132,10 @@ func (r Routes) Contains(alias string) bool {
}
func (r *Route) Validate() gperr.Error {
// wait for alias to be set
if r.Alias == "" {
return nil
}
// pcs := make([]uintptr, 1)
// runtime.Callers(2, pcs)
// f := runtime.FuncForPC(pcs[0])
@@ -176,69 +182,143 @@ func (r *Route) validate() gperr.Error {
}
}, r.started)
if r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
node := r.Idlewatcher.Proxmox.Node
vmid := r.Idlewatcher.Proxmox.VMID
if node == "" {
return gperr.Errorf("node (proxmox node name) is required")
if r.Proxmox != nil && r.Idlewatcher != nil {
r.Idlewatcher.Proxmox = &types.ProxmoxConfig{
Node: r.Proxmox.Node,
}
if vmid <= 0 {
return gperr.Errorf("vmid (lxc id) is required")
if r.Proxmox.VMID != nil {
r.Idlewatcher.Proxmox.VMID = *r.Proxmox.VMID
}
if r.Host == DefaultHost {
containerName := r.Idlewatcher.ContainerName()
// get ip addresses of the vmid
node, ok := proxmox.Nodes.Get(node)
if !ok {
return gperr.Errorf("proxmox node %s not found in pool", node)
}
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if r.Proxmox == nil && r.Idlewatcher != nil && r.Idlewatcher.Proxmox != nil {
r.Proxmox = &proxmox.NodeConfig{
Node: r.Idlewatcher.Proxmox.Node,
VMID: &r.Idlewatcher.Proxmox.VMID,
}
}
ips, err := node.LXCGetIPs(ctx, vmid)
if err != nil {
return gperr.Errorf("failed to get ip addresses of vmid %d: %w", vmid, err)
}
if len(ips) == 0 {
return gperr.Multiline().
Addf("no ip addresses found for %s", containerName).
Adds("make sure you have set static ip address for container instead of dhcp").
Subject(containerName)
}
l := log.With().Str("container", containerName).Logger()
l.Info().Msg("checking if container is running")
running, err := node.LXCIsRunning(ctx, vmid)
if err != nil {
return gperr.New("failed to check container state").With(err)
}
if !running {
l.Info().Msg("starting container")
if err := node.LXCAction(ctx, vmid, proxmox.LXCStart); err != nil {
return gperr.New("failed to start container").With(err)
if (r.Proxmox == nil || r.Proxmox.Node == "" || r.Proxmox.VMID == nil) && r.Container == nil {
proxmoxProviders := config.WorkingState.Load().Value().Providers.Proxmox
if len(proxmoxProviders) > 0 {
// it's fine if ip is nil
hostname := r.Host
ip := net.ParseIP(hostname)
for _, p := range proxmoxProviders {
// First check if hostname, IP, or alias matches a node (node-level route)
if nodeName := p.Client().ReverseLookupNode(hostname, ip, r.Alias); nodeName != "" {
zero := 0
if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{}
}
r.Proxmox.Node = nodeName
r.Proxmox.VMID = &zero
r.Proxmox.VMName = ""
log.Info().
Str("node", nodeName).
Msgf("found proxmox node for route %q", r.Alias)
break
}
}
l.Info().Msgf("finding reachable ip addresses")
errs := gperr.NewBuilder("failed to find reachable ip addresses")
for _, ip := range ips {
if err := netutils.PingTCP(ctx, ip, r.Port.Proxy); err != nil {
errs.Add(gperr.Unwrap(err).Subjectf("%s:%d", ip, r.Port.Proxy))
} else {
r.Host = ip.String()
l.Info().Msgf("using ip %s", r.Host)
// Then check if hostname, IP, or alias matches a VM resource
resource, _ := p.Client().ReverseLookupResource(ip, hostname, r.Alias)
if resource != nil {
vmid := int(resource.VMID)
if r.Proxmox == nil {
r.Proxmox = &proxmox.NodeConfig{}
}
r.Proxmox.Node = resource.Node
r.Proxmox.VMID = &vmid
r.Proxmox.VMName = resource.Name
log.Info().
Str("node", resource.Node).
Int("vmid", int(resource.VMID)).
Str("vmname", resource.Name).
Msgf("found proxmox resource for route %q", r.Alias)
break
}
}
}
}
if r.Proxmox != nil {
nodeName := r.Proxmox.Node
vmid := r.Proxmox.VMID
if nodeName == "" || vmid == nil {
return gperr.Errorf("node (proxmox node name) is required")
}
node, ok := proxmox.Nodes.Get(nodeName)
if !ok {
return gperr.Errorf("proxmox node %s not found in pool", nodeName)
}
// Node-level route (VMID = 0)
if *vmid == 0 {
r.Scheme = route.SchemeHTTPS
if r.Host == DefaultHost {
return gperr.Multiline().
Addf("no reachable ip addresses found, tried %d IPs", len(ips)).
With(errs.Error()).
Subject(containerName)
r.Host = node.Client().BaseURL.Hostname()
}
port, _ := strconv.Atoi(node.Client().BaseURL.Port())
if port == 0 {
port = 8006
}
r.Port.Proxy = port
} else {
res, err := node.Client().GetResource("lxc", *vmid)
if err != nil {
return gperr.Wrap(err) // ErrResourceNotFound
}
r.Proxmox.VMName = res.Name
if r.Host == DefaultHost {
containerName := res.Name
// get ip addresses of the vmid
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ips := res.IPs
if len(ips) == 0 {
return gperr.Multiline().
Addf("no ip addresses found for %s", containerName).
Adds("make sure you have set static ip address for container instead of dhcp").
Subject(containerName)
}
l := log.With().Str("container", containerName).Logger()
l.Info().Msg("checking if container is running")
running, err := node.LXCIsRunning(ctx, *vmid)
if err != nil {
return gperr.New("failed to check container state").With(err)
}
if !running {
l.Info().Msg("starting container")
if err := node.LXCAction(ctx, *vmid, proxmox.LXCStart); err != nil {
return gperr.New("failed to start container").With(err)
}
}
l.Info().Msgf("finding reachable ip addresses")
errs := gperr.NewBuilder("failed to find reachable ip addresses")
for _, ip := range ips {
if err := netutils.PingTCP(ctx, ip, r.Port.Proxy); err != nil {
errs.Add(gperr.Unwrap(err).Subjectf("%s:%d", ip, r.Port.Proxy))
} else {
r.Host = ip.String()
l.Info().Msgf("using ip %s", r.Host)
break
}
}
if r.Host == DefaultHost {
return gperr.Multiline().
Addf("no reachable ip addresses found, tried %d IPs", len(ips)).
With(errs.Error()).
Subject(containerName)
}
}
}
}
@@ -485,11 +565,24 @@ func (r *Route) References() []string {
}
if r.Container != nil {
if r.Container.ContainerName != r.Alias {
if r.Container.ContainerName != aliasRef {
return []string{r.Container.ContainerName, aliasRef, r.Container.Image.Name, r.Container.Image.Author}
}
return []string{r.Container.Image.Name, aliasRef, r.Container.Image.Author}
}
if r.Proxmox != nil {
if len(r.Proxmox.Services) > 0 && r.Proxmox.Services[0] != aliasRef {
if r.Proxmox.VMName != aliasRef {
return []string{r.Proxmox.VMName, aliasRef, r.Proxmox.Services[0]}
}
return []string{r.Proxmox.Services[0], aliasRef}
} else {
if r.Proxmox.VMName != aliasRef {
return []string{r.Proxmox.VMName, aliasRef}
}
}
}
return []string{aliasRef}
}

View File

@@ -117,6 +117,43 @@ func GetHealthInfoSimple() map[string]types.HealthStatus
func ByProvider() map[string][]types.Route
```
## Proxmox Integration
Routes can be automatically linked to Proxmox nodes or LXC containers through reverse lookup during validation.
### Node-Level Routes
Routes can be linked to a Proxmox node directly (VMID = 0) when the route's hostname, IP, or alias matches a node name or IP:
```go
// Route linked to Proxmox node (no specific VM)
route.Proxmox = &proxmox.NodeConfig{
Node: "pve-node-01",
VMID: 0, // node-level, no container
VMName: "",
}
```
### Container-Level Routes
Routes are linked to LXC containers when they match a VM resource by hostname, IP, or alias:
```go
// Route linked to LXC container
route.Proxmox = &proxmox.NodeConfig{
Node: "pve-node-01",
VMID: 100,
VMName: "my-container",
}
```
### Lookup Priority
1. **Node match** - If hostname, IP, or alias matches a Proxmox node
2. **VM match** - If hostname, IP, or alias matches a VM resource
Node-level routes skip container control logic (start/check IPs) and can be used to proxy node services directly.
## Architecture
### Core Components
@@ -174,10 +211,11 @@ sequenceDiagram
## Dependency and Integration Map
| Dependency | Purpose |
| -------------------------------- | --------------------------------- |
| `internal/types` | Route and health type definitions |
| `github.com/yusing/goutils/pool` | Thread-safe pool implementation |
| Dependency | Purpose |
| -------------------------------- | ---------------------------------- |
| `internal/types` | Route and health type definitions |
| `internal/proxmox` | Proxmox node/container integration |
| `github.com/yusing/goutils/pool` | Thread-safe pool implementation |
## Observability

View File

@@ -78,3 +78,14 @@ func Get(alias string) (types.Route, bool) {
}
return nil, false
}
// GetIncludeExcluded returns the route with the given alias, including excluded routes.
func GetIncludeExcluded(alias string) (types.Route, bool) {
if r, ok := HTTP.Get(alias); ok {
return r, true
}
if r, ok := Stream.Get(alias); ok {
return r, true
}
return Excluded.Get(alias)
}

View File

@@ -79,6 +79,10 @@ var commands = map[string]struct {
},
build: func(args any) CommandHandler {
return NonTerminatingCommand(func(w http.ResponseWriter, r *http.Request) error {
if authHandler == nil {
http.Error(w, "Auth handler not initialized", http.StatusInternalServerError)
return errTerminated
}
if !authHandler(w, r) {
return errTerminated
}

View File

@@ -50,7 +50,7 @@ func openFile(path string) (io.WriteCloser, gperr.Error) {
return noopWriteCloser{buf}, nil
}
f, err := accesslog.NewFileIO(path)
f, err := accesslog.OpenFile(path)
if err != nil {
return nil, ErrInvalidArguments.With(err)
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/pires/go-proxyproto"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/acl"
"github.com/yusing/godoxy/internal/agentpool"
"github.com/yusing/godoxy/internal/entrypoint"
@@ -50,12 +51,14 @@ func (s *TCPTCPStream) ListenAndServe(ctx context.Context, preDial, onRead netty
return
}
if acl, ok := ctx.Value(acl.ContextKey{}).(*acl.Config); ok {
log.Debug().Str("listener", s.listener.Addr().String()).Msg("wrapping listener with ACL")
s.listener = acl.WrapTCP(s.listener)
}
if proxyProto := entrypoint.ActiveConfig.Load().SupportProxyProtocol; proxyProto {
s.listener = &proxyproto.Listener{Listener: s.listener}
}
if acl := acl.ActiveConfig.Load(); acl != nil {
s.listener = acl.WrapTCP(s.listener)
}
s.preDial = preDial
s.onRead = onRead

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/yusing/godoxy/internal/acl"
"github.com/yusing/godoxy/internal/agentpool"
nettypes "github.com/yusing/godoxy/internal/net/types"
@@ -81,7 +82,8 @@ func (s *UDPUDPStream) ListenAndServe(ctx context.Context, preDial, onRead netty
return
}
s.listener = l
if acl := acl.ActiveConfig.Load(); acl != nil {
if acl, ok := ctx.Value(acl.ContextKey{}).(*acl.Config); ok {
log.Debug().Str("listener", s.listener.LocalAddr().String()).Msg("wrapping listener with ACL")
s.listener = acl.WrapUDP(s.listener)
}
s.preDial = preDial

View File

@@ -41,11 +41,11 @@ type (
DockerCfg DockerProviderConfig `json:"docker_cfg" validate:"required"`
ContainerID string `json:"container_id" validate:"required"`
ContainerName string `json:"container_name" validate:"required"`
} // @name DockerConfig
} // @name IdlewatcherDockerConfig
ProxmoxConfig struct {
Node string `json:"node" validate:"required"`
VMID int `json:"vmid" validate:"required"`
} // @name ProxmoxConfig
} // @name IdlewatcherProxmoxNodeConfig
)
const (

View File

@@ -8,18 +8,17 @@ import (
)
var (
configDirWatcher *DirWatcher
configDirWatcherMu sync.Mutex
configDirWatcher *DirWatcher
configDirWatcherInitOnce sync.Once
)
func initConfigDirWatcher() {
t := task.RootTask("config_dir_watcher", false)
configDirWatcher = NewDirectoryWatcher(t, common.ConfigBasePath)
}
// create a new file watcher for file under ConfigBasePath.
func NewConfigFileWatcher(filename string) Watcher {
configDirWatcherMu.Lock()
defer configDirWatcherMu.Unlock()
if configDirWatcher == nil {
t := task.RootTask("config_dir_watcher", false)
configDirWatcher = NewDirectoryWatcher(t, common.ConfigBasePath)
}
configDirWatcherInitOnce.Do(initConfigDirWatcher)
return configDirWatcher.Add(filename)
}

View File

@@ -18,7 +18,7 @@ type ImplDoc = {
const START_MARKER = "// GENERATED-IMPL-SIDEBAR-START";
const END_MARKER = "// GENERATED-IMPL-SIDEBAR-END";
const skipSubmodules = ["internal/go-oidc/", "internal/gopsutil/"];
const skipSubmodules = ["internal/go-oidc/", "internal/gopsutil/", "internal/go-proxmox/"];
function escapeRegex(s: string) {
return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
@@ -165,9 +165,8 @@ function rewriteImplMarkdown(params: {
const repoRel = path.posix.normalize(
path.posix.join(pkgPath, filePath)
);
const githubUrl = `${repoUrl}/blob/main/${repoRel}${
line ? `#L${line}` : ""
}`;
const githubUrl = `${repoUrl}/blob/main/${repoRel}${line ? `#L${line}` : ""
}`;
const rewritten = `${githubUrl}${fragment}`;
return angleWrapped === urlRaw ? rewritten : `<${rewritten}>`;
}

View File

@@ -15,7 +15,7 @@ require (
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/puzpuzpuz/xsync/v4 v4.3.0 // indirect
github.com/puzpuzpuz/xsync/v4 v4.4.0 // indirect
github.com/rs/zerolog v1.34.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect

View File

@@ -14,8 +14,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/puzpuzpuz/xsync/v4 v4.3.0 h1:w/bWkEJdYuRNYhHn5eXnIT8LzDM1O629X1I9MJSkD7Q=
github.com/puzpuzpuz/xsync/v4 v4.3.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=