Compare commits
210 Commits
v0.27.1
...
update_fla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c79ed66483 | ||
|
|
20dff82f95 | ||
|
|
31c4331a91 | ||
|
|
ce580f8245 | ||
|
|
bfb6fd80df | ||
|
|
3acce2da87 | ||
|
|
4a9a329339 | ||
|
|
dd16567c52 | ||
|
|
e0a436cefc | ||
|
|
53cdeff129 | ||
|
|
7148a690d0 | ||
|
|
4e73133b9f | ||
|
|
4f8724151e | ||
|
|
91730e2a1d | ||
|
|
b5090a01ec | ||
|
|
27f5641341 | ||
|
|
cf3d30b6f6 | ||
|
|
58020696fe | ||
|
|
e44b402fe4 | ||
|
|
835b7eb960 | ||
|
|
95b1fd636e | ||
|
|
834ac27779 | ||
|
|
4a4032a4b0 | ||
|
|
29aa08df0e | ||
|
|
0b1727c337 | ||
|
|
08fe2e4d6c | ||
|
|
cb29cade46 | ||
|
|
f27298c759 | ||
|
|
8baa14ef4a | ||
|
|
ebdbe03639 | ||
|
|
f735502eae | ||
|
|
53d17aa321 | ||
|
|
14f833bdb9 | ||
|
|
9e50071df9 | ||
|
|
c907b0d323 | ||
|
|
97fa117c48 | ||
|
|
b5329ff0f3 | ||
|
|
eac8a57bce | ||
|
|
44af046196 | ||
|
|
4a744f423b | ||
|
|
ca75e096e6 | ||
|
|
ce7c256d1e | ||
|
|
4912ceaaf5 | ||
|
|
d7f7f2c85e | ||
|
|
df184e5276 | ||
|
|
0630fd32e5 | ||
|
|
306aabbbce | ||
|
|
a09b0d1d69 | ||
|
|
362696a5ef | ||
|
|
1f32c8bf61 | ||
|
|
fb137a8fe3 | ||
|
|
c2f28efbd7 | ||
|
|
11f0d4cfdd | ||
|
|
5d300273dc | ||
|
|
7f003ecaff | ||
|
|
2695d1527e | ||
|
|
d32f6707f7 | ||
|
|
89e436f0e6 | ||
|
|
46daa659e2 | ||
|
|
49b70db7f2 | ||
|
|
04b4071888 | ||
|
|
ee127edbf7 | ||
|
|
606e5f68a0 | ||
|
|
a04b21abc6 | ||
|
|
92caadcee6 | ||
|
|
aa29fd95a3 | ||
|
|
0565e01c2f | ||
|
|
aee1d2a640 | ||
|
|
ee303186b3 | ||
|
|
e9a94f00a9 | ||
|
|
d40203e153 | ||
|
|
5688c201e9 | ||
|
|
4e1834adaf | ||
|
|
22afb2c61b | ||
|
|
b3c4d0ec81 | ||
|
|
b82c9c9c0e | ||
|
|
e0bae9b769 | ||
|
|
a194712c34 | ||
|
|
8776745428 | ||
|
|
b01eda721c | ||
|
|
42bd9cd058 | ||
|
|
515a22e696 | ||
|
|
6654142fbe | ||
|
|
424e26d636 | ||
|
|
d9cbb96603 | ||
|
|
c1cfb59b91 | ||
|
|
4be13baf3f | ||
|
|
98c0817b95 | ||
|
|
951fd5a8e7 | ||
|
|
b8f3e09046 | ||
|
|
4ab06930a2 | ||
|
|
165c5f0491 | ||
|
|
c8c3c9d4a0 | ||
|
|
4dd1b49a35 | ||
|
|
db6882b5f5 | ||
|
|
1325fd8b27 | ||
|
|
8631581852 | ||
|
|
1398d01bd8 | ||
|
|
00da5361b3 | ||
|
|
740d2b5a2c | ||
|
|
3b4b9a4436 | ||
|
|
1b6db34b93 | ||
|
|
07a4b1b1fd | ||
|
|
2e180d2587 | ||
|
|
0451dd4718 | ||
|
|
a6696582a4 | ||
|
|
00f22a8443 | ||
|
|
1d9900273e | ||
|
|
18e13f6ffa | ||
|
|
a445278f76 | ||
|
|
8387c9cd82 | ||
|
|
25a7434830 | ||
|
|
183a38715c | ||
|
|
99d35fbbbc | ||
|
|
d50108c722 | ||
|
|
6d21a4a3fe | ||
|
|
7d81dca9aa | ||
|
|
3689f05407 | ||
|
|
bb30208f97 | ||
|
|
c3e2e57f8e | ||
|
|
e43f19df79 | ||
|
|
0516c0ec37 | ||
|
|
eec54cbbf3 | ||
|
|
72fcb93ef3 | ||
|
|
f5c779626a | ||
|
|
d227b3a135 | ||
|
|
0bcfdc29ad | ||
|
|
87c230d251 | ||
|
|
84c092a9f9 | ||
|
|
9146140217 | ||
|
|
5103b35f3c | ||
|
|
7be20912f5 | ||
|
|
e8753619de | ||
|
|
251e16d772 | ||
|
|
3f0bfe28cc | ||
|
|
82d4275c3b | ||
|
|
f3767dddf8 | ||
|
|
5c6cd62df1 | ||
|
|
56bec66a44 | ||
|
|
f0e464dc36 | ||
|
|
2c3c943acf | ||
|
|
a50bd13930 | ||
|
|
5655ef86d7 | ||
|
|
21ba197d06 | ||
|
|
9d77207ed8 | ||
|
|
cf1ad47b42 | ||
|
|
a288f04a1a | ||
|
|
5767ca5085 | ||
|
|
f67ed36fe2 | ||
|
|
506bd8c8eb | ||
|
|
daf9f36c78 | ||
|
|
616c0e895d | ||
|
|
c4600346f9 | ||
|
|
642073f4b8 | ||
|
|
87bd67318b | ||
|
|
0e1673041c | ||
|
|
f3f2d30004 | ||
|
|
c8376e44a2 | ||
|
|
5d0a6ab0e9 | ||
|
|
22ee2bfc9c | ||
|
|
1f5df017a1 | ||
|
|
bba91a89be | ||
|
|
6359511a62 | ||
|
|
d2fcd5b95b | ||
|
|
15c84b34e0 | ||
|
|
eb788cd007 | ||
|
|
705b239677 | ||
|
|
cb4d5b1906 | ||
|
|
0078eb7790 | ||
|
|
3cf2d7195a | ||
|
|
16d811b306 | ||
|
|
eec196d200 | ||
|
|
bfcd9d261d | ||
|
|
f00c412cde | ||
|
|
2010805712 | ||
|
|
c5133ee5d3 | ||
|
|
9c33cbfdc8 | ||
|
|
9b327f6b56 | ||
|
|
9368fee1c5 | ||
|
|
ed78bf4b98 | ||
|
|
db293e0698 | ||
|
|
9c4c017eac | ||
|
|
14af9b3ab1 | ||
|
|
72d5fd04a7 | ||
|
|
e86d063056 | ||
|
|
e0c9e18e22 | ||
|
|
21af106f68 | ||
|
|
7fb0f9a501 | ||
|
|
4b25976288 | ||
|
|
1c146f70e9 | ||
|
|
249630bed8 | ||
|
|
75247f82b8 | ||
|
|
665cc44094 | ||
|
|
8394e7094a | ||
|
|
da9018a0eb | ||
|
|
e3ced80278 | ||
|
|
09c9762fe0 | ||
|
|
75e24de7bd | ||
|
|
2aa5b8b68d | ||
|
|
4e77e910c5 | ||
|
|
a496864762 | ||
|
|
3ed1067a95 | ||
|
|
285c4e46a9 | ||
|
|
89285c317b | ||
|
|
d14be8d43b | ||
|
|
000d5c3b0c | ||
|
|
218a8db1b9 | ||
|
|
1dcb04ce9b | ||
|
|
299cef4e99 | ||
|
|
6d24afba1c |
@@ -52,7 +52,7 @@ go test ./integration -timeout 45m
|
||||
**Timeout Guidelines by Test Type**:
|
||||
- **Basic functionality tests**: `--timeout=900s` (15 minutes minimum)
|
||||
- **Route/ACL tests**: `--timeout=1200s` (20 minutes)
|
||||
- **HA/failover tests**: `--timeout=1800s` (30 minutes)
|
||||
- **HA/failover tests**: `--timeout=1800s` (30 minutes)
|
||||
- **Long-running tests**: `--timeout=2100s` (35 minutes)
|
||||
- **Full test suite**: `-timeout 45m` (45 minutes)
|
||||
|
||||
@@ -71,7 +71,7 @@ go run ./cmd/hi run "TestName" --timeout=60s
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` runs for 12 minutes
|
||||
|
||||
**CRITICAL**: Only ONE test can run at a time due to Docker port conflicts and resource constraints.
|
||||
**CONCURRENT EXECUTION**: Multiple tests CAN run simultaneously. Each test run gets a unique Run ID for isolation. See "Concurrent Execution and Run ID Isolation" section below.
|
||||
|
||||
## Test Artifacts and Log Analysis
|
||||
|
||||
@@ -98,6 +98,97 @@ When tests fail, examine artifacts in this specific order:
|
||||
4. **Client status dumps** (`*_status.json`): Network state and peer connectivity information
|
||||
5. **Database snapshots** (`.db` files): For data consistency and state persistence issues
|
||||
|
||||
## Concurrent Execution and Run ID Isolation
|
||||
|
||||
### Overview
|
||||
|
||||
The integration test system supports running multiple tests concurrently on the same Docker daemon. Each test run is isolated through a unique Run ID that ensures containers, networks, and cleanup operations don't interfere with each other.
|
||||
|
||||
### Run ID Format and Usage
|
||||
|
||||
Each test run generates a unique Run ID in the format: `YYYYMMDD-HHMMSS-{6-char-hash}`
|
||||
- Example: `20260109-104215-mdjtzx`
|
||||
|
||||
The Run ID is used for:
|
||||
- **Container naming**: `ts-{runIDShort}-{version}-{hash}` (e.g., `ts-mdjtzx-1-74-fgdyls`)
|
||||
- **Docker labels**: All containers get `hi.run-id={runID}` label
|
||||
- **Log directories**: `control_logs/{runID}/`
|
||||
- **Cleanup isolation**: Only containers with matching run ID are cleaned up
|
||||
|
||||
### Container Isolation Mechanisms
|
||||
|
||||
1. **Unique Container Names**: Each container includes the run ID for identification
|
||||
2. **Docker Labels**: `hi.run-id` and `hi.test-type` labels on all containers
|
||||
3. **Dynamic Port Allocation**: All ports use `{HostPort: "0"}` to let kernel assign free ports
|
||||
4. **Per-Run Networks**: Network names include scenario hash for isolation
|
||||
5. **Isolated Cleanup**: `killTestContainersByRunID()` only removes containers matching the run ID
|
||||
|
||||
### ⚠️ CRITICAL: Never Interfere with Other Test Runs
|
||||
|
||||
**FORBIDDEN OPERATIONS** when other tests may be running:
|
||||
|
||||
```bash
|
||||
# ❌ NEVER do global container cleanup while tests are running
|
||||
docker rm -f $(docker ps -q --filter "name=hs-")
|
||||
docker rm -f $(docker ps -q --filter "name=ts-")
|
||||
|
||||
# ❌ NEVER kill all test containers
|
||||
# This will destroy other agents' test sessions!
|
||||
|
||||
# ❌ NEVER prune all Docker resources during active tests
|
||||
docker system prune -f # Only safe when NO tests are running
|
||||
```
|
||||
|
||||
**SAFE OPERATIONS**:
|
||||
|
||||
```bash
|
||||
# ✅ Clean up only YOUR test run's containers (by run ID)
|
||||
# The test runner does this automatically via cleanup functions
|
||||
|
||||
# ✅ Clean stale (stopped/exited) containers only
|
||||
# Pre-test cleanup only removes stopped containers, not running ones
|
||||
|
||||
# ✅ Check what's running before cleanup
|
||||
docker ps --filter "name=headscale-test-suite" --format "{{.Names}}"
|
||||
```
|
||||
|
||||
### Running Concurrent Tests
|
||||
|
||||
```bash
|
||||
# Start multiple tests in parallel - each gets unique run ID
|
||||
go run ./cmd/hi run "TestPingAllByIP" &
|
||||
go run ./cmd/hi run "TestACLAllowUserDst" &
|
||||
go run ./cmd/hi run "TestOIDCAuthenticationPingAll" &
|
||||
|
||||
# Monitor running test suites
|
||||
docker ps --filter "name=headscale-test-suite" --format "table {{.Names}}\t{{.Status}}"
|
||||
```
|
||||
|
||||
### Agent Session Isolation Rules
|
||||
|
||||
When working as an agent:
|
||||
|
||||
1. **Your run ID is unique**: Each test you start gets its own run ID
|
||||
2. **Never clean up globally**: Only use run ID-specific cleanup
|
||||
3. **Check before cleanup**: Verify no other tests are running if you need to prune resources
|
||||
4. **Respect other sessions**: Other agents may have tests running concurrently
|
||||
5. **Log directories are isolated**: Your artifacts are in `control_logs/{your-run-id}/`
|
||||
|
||||
### Identifying Your Containers
|
||||
|
||||
Your test containers can be identified by:
|
||||
- The run ID in the container name
|
||||
- The `hi.run-id` Docker label
|
||||
- The test suite container: `headscale-test-suite-{your-run-id}`
|
||||
|
||||
```bash
|
||||
# List containers for a specific run ID
|
||||
docker ps --filter "label=hi.run-id=20260109-104215-mdjtzx"
|
||||
|
||||
# Get your run ID from the test output
|
||||
# Look for: "Run ID: 20260109-104215-mdjtzx"
|
||||
```
|
||||
|
||||
## Common Failure Patterns and Root Cause Analysis
|
||||
|
||||
### CRITICAL MINDSET: Code Issues vs Infrastructure Issues
|
||||
@@ -250,10 +341,10 @@ require.NotNil(t, targetNode, "should find expected node")
|
||||
- **Detection**: No progress in logs for >2 minutes during initialization
|
||||
- **Solution**: `docker system prune -f` and retry
|
||||
|
||||
3. **Docker Port Conflicts**: Multiple tests trying to use same ports
|
||||
- **Pattern**: "bind: address already in use" errors
|
||||
- **Detection**: Port binding failures in Docker logs
|
||||
- **Solution**: Only run ONE test at a time
|
||||
3. **Docker Resource Exhaustion**: Too many concurrent tests overwhelming system
|
||||
- **Pattern**: Container creation timeouts, OOM kills, slow test execution
|
||||
- **Detection**: System load high, Docker daemon slow to respond
|
||||
- **Solution**: Reduce number of concurrent tests, wait for completion before starting more
|
||||
|
||||
**CODE ISSUES (99% of failures)**:
|
||||
1. **Route Approval Process Failures**: Routes not getting approved when they should be
|
||||
@@ -273,12 +364,22 @@ require.NotNil(t, targetNode, "should find expected node")
|
||||
|
||||
### Critical Test Environment Setup
|
||||
|
||||
**Pre-Test Cleanup (MANDATORY)**:
|
||||
**Pre-Test Cleanup**:
|
||||
|
||||
The test runner automatically handles cleanup:
|
||||
- **Before test**: Removes only stale (stopped/exited) containers - does NOT affect running tests
|
||||
- **After test**: Removes only containers belonging to the specific run ID
|
||||
|
||||
```bash
|
||||
# ALWAYS run this before each test
|
||||
# Only clean old log directories if disk space is low
|
||||
rm -rf control_logs/202507*
|
||||
docker system prune -f
|
||||
df -h # Verify sufficient disk space
|
||||
|
||||
# SAFE: Clean only stale/stopped containers (does not affect running tests)
|
||||
# The test runner does this automatically via cleanupStaleTestContainers()
|
||||
|
||||
# ⚠️ DANGEROUS: Only use when NO tests are running
|
||||
docker system prune -f
|
||||
```
|
||||
|
||||
**Environment Verification**:
|
||||
@@ -286,8 +387,8 @@ df -h # Verify sufficient disk space
|
||||
# Verify system readiness
|
||||
go run ./cmd/hi doctor
|
||||
|
||||
# Check for running containers that might conflict
|
||||
docker ps
|
||||
# Check what tests are currently running (ALWAYS check before global cleanup)
|
||||
docker ps --filter "name=headscale-test-suite" --format "{{.Names}}"
|
||||
```
|
||||
|
||||
### Specific Test Categories and Known Issues
|
||||
@@ -433,7 +534,7 @@ When you understand a test's purpose through debugging, always add comprehensive
|
||||
//
|
||||
// The test verifies:
|
||||
// - Route announcements are received and tracked
|
||||
// - ACL policies control route approval correctly
|
||||
// - ACL policies control route approval correctly
|
||||
// - Only approved routes appear in peer network maps
|
||||
// - Route state persists correctly in the database
|
||||
func TestSubnetRoutes(t *testing.T) {
|
||||
@@ -535,7 +636,7 @@ var nodeKey key.NodePublic
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.GetName() == "router" {
|
||||
routeNode = node
|
||||
@@ -550,7 +651,7 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
|
||||
peerStatus, ok := status.Peer[nodeKey]
|
||||
assert.True(c, ok, "peer should exist in status")
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)
|
||||
@@ -566,7 +667,7 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
|
||||
|
||||
// Second unrelated external call - WRONG!
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
@@ -577,7 +678,7 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
|
||||
// NEVER do this!
|
||||
assert.EventuallyWithT(t, func(c2 *assert.CollectT) {
|
||||
status, _ := client.Status()
|
||||
@@ -666,11 +767,11 @@ When working within EventuallyWithT blocks where you need to prevent panics:
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
|
||||
// For array bounds - use require with t to prevent panic
|
||||
assert.Len(c, nodes, 6) // Test expectation
|
||||
require.GreaterOrEqual(t, len(nodes), 3, "need at least 3 nodes to avoid panic")
|
||||
|
||||
|
||||
// For nil pointer access - use require with t before dereferencing
|
||||
assert.NotNil(c, srs1PeerStatus.PrimaryRoutes) // Test expectation
|
||||
require.NotNil(t, srs1PeerStatus.PrimaryRoutes, "primary routes must be set to avoid panic")
|
||||
@@ -681,7 +782,7 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
}, 5*time.Second, 200*time.Millisecond, "checking route state")
|
||||
```
|
||||
|
||||
**Key Principle**:
|
||||
**Key Principle**:
|
||||
- Use `assert` with `c` (*assert.CollectT) for test expectations that can be retried
|
||||
- Use `require` with `t` (*testing.T) for MUST conditions that prevent panics
|
||||
- Within EventuallyWithT, both are available - choose based on whether failure would cause a panic
|
||||
@@ -704,7 +805,7 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
|
||||
// Check all peers have expected routes
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
@@ -756,8 +857,14 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
- **Why security focus**: Integration tests are the last line of defense against security regressions
|
||||
- **EventuallyWithT Usage**: Proper use prevents race conditions without weakening security assertions
|
||||
|
||||
6. **Concurrent Execution Awareness**: Respect run ID isolation and never interfere with other agents' test sessions. Each test run has a unique run ID - only clean up YOUR containers (by run ID label), never perform global cleanup while tests may be running.
|
||||
- **Why this matters**: Multiple agents/users may run tests concurrently on the same Docker daemon
|
||||
- **Key Rule**: NEVER use global container cleanup commands - the test runner handles cleanup automatically per run ID
|
||||
|
||||
**CRITICAL PRINCIPLE**: Test expectations are sacred contracts that define correct system behavior. When tests fail, fix the code to match the test, never change the test to match broken code. Only timing and observability improvements are allowed - business logic expectations are immutable.
|
||||
|
||||
**ISOLATION PRINCIPLE**: Each test run is isolated by its unique Run ID. Never interfere with other test sessions. The system handles cleanup automatically - manual global cleanup commands are forbidden when other tests may be running.
|
||||
|
||||
**EventuallyWithT PRINCIPLE**: Every external call to headscale server or tailscale client must be wrapped in EventuallyWithT. Follow the five key rules strictly: one external call per block, proper variable scoping, no nesting, use CollectT for assertions, and provide descriptive messages.
|
||||
|
||||
**Remember**: Test failures are usually code issues in Headscale that need to be fixed, not infrastructure problems to be ignored. Use the specific debugging workflows and failure patterns documented above to efficiently identify root causes. Infrastructure issues have very specific signatures - everything else is code-related.
|
||||
|
||||
16
.editorconfig
Normal file
@@ -0,0 +1,16 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
max_line_length = 120
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
25
.github/workflows/build.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
@@ -17,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
@@ -31,13 +29,12 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
@@ -57,7 +54,7 @@ jobs:
|
||||
exit $BUILD_STATUS
|
||||
|
||||
- name: Nix gosum diverging
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
if: failure() && steps.build.outcome == 'failure'
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
@@ -69,7 +66,7 @@ jobs:
|
||||
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
|
||||
})
|
||||
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
@@ -84,22 +81,20 @@ jobs:
|
||||
- "GOARCH=arm64 GOOS=darwin"
|
||||
- "GOARCH=amd64 GOOS=darwin"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run go cross compile
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
run:
|
||||
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
./cmd/headscale
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: "headscale-${{ matrix.env }}"
|
||||
path: "headscale"
|
||||
|
||||
4
.github/workflows/check-generated.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
check-generated:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- '**/*.proto'
|
||||
- 'buf.gen.yaml'
|
||||
- 'tools/**'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
7
.github/workflows/check-tests.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
check-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
@@ -24,13 +24,12 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
|
||||
6
.github/workflows/docs-deploy.yml
vendored
@@ -21,15 +21,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
6
.github/workflows/docs-test.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Install python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@a7833574556fa59680c1b7cb190c1735db73ebf0 # v5.0.0
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
|
||||
@@ -10,6 +10,55 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// testsToSplit defines tests that should be split into multiple CI jobs.
|
||||
// Key is the test function name, value is a list of subtest prefixes.
|
||||
// Each prefix becomes a separate CI job as "TestName/prefix".
|
||||
//
|
||||
// Example: TestAutoApproveMultiNetwork has subtests like:
|
||||
// - TestAutoApproveMultiNetwork/authkey-tag-advertiseduringup-false-pol-database
|
||||
// - TestAutoApproveMultiNetwork/webauth-user-advertiseduringup-true-pol-file
|
||||
//
|
||||
// Splitting by approver type (tag, user, group) creates 6 CI jobs with 4 tests each:
|
||||
// - TestAutoApproveMultiNetwork/authkey-tag.* (4 tests)
|
||||
// - TestAutoApproveMultiNetwork/authkey-user.* (4 tests)
|
||||
// - TestAutoApproveMultiNetwork/authkey-group.* (4 tests)
|
||||
// - TestAutoApproveMultiNetwork/webauth-tag.* (4 tests)
|
||||
// - TestAutoApproveMultiNetwork/webauth-user.* (4 tests)
|
||||
// - TestAutoApproveMultiNetwork/webauth-group.* (4 tests)
|
||||
//
|
||||
// This reduces load per CI job (4 tests instead of 12) to avoid infrastructure
|
||||
// flakiness when running many sequential Docker-based integration tests.
|
||||
var testsToSplit = map[string][]string{
|
||||
"TestAutoApproveMultiNetwork": {
|
||||
"authkey-tag",
|
||||
"authkey-user",
|
||||
"authkey-group",
|
||||
"webauth-tag",
|
||||
"webauth-user",
|
||||
"webauth-group",
|
||||
},
|
||||
}
|
||||
|
||||
// expandTests takes a list of test names and expands any that need splitting
|
||||
// into multiple subtest patterns.
|
||||
func expandTests(tests []string) []string {
|
||||
var expanded []string
|
||||
for _, test := range tests {
|
||||
if prefixes, ok := testsToSplit[test]; ok {
|
||||
// This test should be split into multiple jobs.
|
||||
// We append ".*" to each prefix because the CI runner wraps patterns
|
||||
// with ^...$ anchors. Without ".*", a pattern like "authkey$" wouldn't
|
||||
// match "authkey-tag-advertiseduringup-false-pol-database".
|
||||
for _, prefix := range prefixes {
|
||||
expanded = append(expanded, fmt.Sprintf("%s/%s.*", test, prefix))
|
||||
}
|
||||
} else {
|
||||
expanded = append(expanded, test)
|
||||
}
|
||||
}
|
||||
return expanded
|
||||
}
|
||||
|
||||
func findTests() []string {
|
||||
rgBin, err := exec.LookPath("rg")
|
||||
if err != nil {
|
||||
@@ -66,8 +115,11 @@ func updateYAML(tests []string, jobName string, testPath string) {
|
||||
func main() {
|
||||
tests := findTests()
|
||||
|
||||
quotedTests := make([]string, len(tests))
|
||||
for i, test := range tests {
|
||||
// Expand tests that should be split into multiple jobs
|
||||
expandedTests := expandTests(tests)
|
||||
|
||||
quotedTests := make([]string, len(expandedTests))
|
||||
for i, test := range expandedTests {
|
||||
quotedTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
|
||||
4
.github/workflows/gh-actions-updater.yaml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
- name: Run GitHub Actions Version Updater
|
||||
uses: saadmk11/github-actions-version-updater@64be81ba69383f81f2be476703ea6570c4c8686e # v0.8.1
|
||||
uses: saadmk11/github-actions-version-updater@d8781caf11d11168579c8e5e94f62b068038f442 # v0.9.0
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
92
.github/workflows/integration-test-template.yml
vendored
@@ -28,23 +28,12 @@ jobs:
|
||||
# that triggered the build.
|
||||
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- name: Tailscale
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: tailscale/github-action@6986d2c82a91fbac2949fe01f5bab95cf21b5102 # v3.2.2
|
||||
uses: tailscale/github-action@a392da0a182bba0e9613b6243ebd69529b1878aa # v4.1.0
|
||||
with:
|
||||
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
|
||||
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
|
||||
@@ -52,31 +41,72 @@ jobs:
|
||||
- name: Setup SSH server for Actor
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/setup-sshd-actor@master
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- name: Download headscale image
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
name: headscale-image
|
||||
path: /tmp/artifacts
|
||||
- name: Download tailscale HEAD image
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: tailscale-head-image
|
||||
path: /tmp/artifacts
|
||||
- name: Download hi binary
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: hi-binary
|
||||
path: /tmp/artifacts
|
||||
- name: Download Go cache
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: go-cache
|
||||
path: /tmp/artifacts
|
||||
- name: Download postgres image
|
||||
if: ${{ inputs.postgres_flag == '--postgres=1' }}
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: postgres-image
|
||||
path: /tmp/artifacts
|
||||
- name: Load Docker images, Go cache, and prepare binary
|
||||
run: |
|
||||
gunzip -c /tmp/artifacts/headscale-image.tar.gz | docker load
|
||||
gunzip -c /tmp/artifacts/tailscale-head-image.tar.gz | docker load
|
||||
if [ -f /tmp/artifacts/postgres-image.tar.gz ]; then
|
||||
gunzip -c /tmp/artifacts/postgres-image.tar.gz | docker load
|
||||
fi
|
||||
chmod +x /tmp/artifacts/hi
|
||||
docker images
|
||||
# Extract Go cache to host directories for bind mounting
|
||||
mkdir -p /tmp/go-cache
|
||||
tar -xzf /tmp/artifacts/go-cache.tar.gz -C /tmp/go-cache
|
||||
ls -la /tmp/go-cache/ /tmp/go-cache/.cache/
|
||||
- name: Run Integration Test
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
run:
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
|
||||
env:
|
||||
HEADSCALE_INTEGRATION_HEADSCALE_IMAGE: headscale:${{ github.sha }}
|
||||
HEADSCALE_INTEGRATION_TAILSCALE_IMAGE: tailscale-head:${{ github.sha }}
|
||||
HEADSCALE_INTEGRATION_POSTGRES_IMAGE: ${{ inputs.postgres_flag == '--postgres=1' && format('postgres:{0}', github.sha) || '' }}
|
||||
HEADSCALE_INTEGRATION_GO_CACHE: /tmp/go-cache/go
|
||||
HEADSCALE_INTEGRATION_GO_BUILD_CACHE: /tmp/go-cache/.cache/go-build
|
||||
run: /tmp/artifacts/hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
${{ inputs.postgres_flag }}
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
# Sanitize test name for artifact upload (replace invalid characters: " : < > | * ? \ / with -)
|
||||
- name: Sanitize test name for artifacts
|
||||
if: always()
|
||||
id: sanitize
|
||||
run: echo "name=${TEST_NAME//[\":<>|*?\\\/]/-}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
TEST_NAME: ${{ inputs.test }}
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-logs
|
||||
name: ${{ inputs.database_name }}-${{ steps.sanitize.outputs.name }}-logs
|
||||
path: "control_logs/*/*.log"
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ inputs.database_name }}-${{ inputs.test }}-archives
|
||||
path: "control_logs/*/*.tar"
|
||||
name: ${{ inputs.database_name }}-${{ steps.sanitize.outputs.name }}-artifacts
|
||||
path: control_logs/
|
||||
- name: Setup a blocking tmux session
|
||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||
uses: alexellis/block-with-tmux-action@master
|
||||
|
||||
21
.github/workflows/lint.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
@@ -24,13 +24,12 @@ jobs:
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
@@ -46,7 +45,7 @@ jobs:
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
@@ -65,13 +64,12 @@ jobs:
|
||||
- '**/*.css'
|
||||
- '**/*.scss'
|
||||
- '**/*.html'
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
@@ -83,12 +81,11 @@ jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
|
||||
55
.github/workflows/nix-module-test.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: NixOS Module Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
nix-module-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
nix:
|
||||
- 'nix/**'
|
||||
- 'flake.nix'
|
||||
- 'flake.lock'
|
||||
go:
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'cmd/**'
|
||||
- 'hscontrol/**'
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'
|
||||
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'
|
||||
with:
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run NixOS module tests
|
||||
if: steps.changed-files.outputs.nix == 'true' || steps.changed-files.outputs.go == 'true'
|
||||
run: |
|
||||
echo "Running NixOS module integration test..."
|
||||
nix build .#checks.x86_64-linux.headscale -L
|
||||
11
.github/workflows/release.yml
vendored
@@ -13,28 +13,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
@@ -12,16 +12,14 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
days-before-issue-stale: 90
|
||||
days-before-issue-close: 7
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message:
|
||||
"This issue is stale because it has been open for 90 days with no
|
||||
stale-issue-message: "This issue is stale because it has been open for 90 days with no
|
||||
activity."
|
||||
close-issue-message:
|
||||
"This issue was closed because it has been inactive for 14 days
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days
|
||||
since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
|
||||
168
.github/workflows/test-integration.yaml
vendored
@@ -7,7 +7,117 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
# build: Builds binaries and Docker images once, uploads as artifacts for reuse.
|
||||
# build-postgres: Pulls postgres image separately to avoid Docker Hub rate limits.
|
||||
# sqlite: Runs all integration tests with SQLite backend.
|
||||
# postgres: Runs a subset of tests with PostgreSQL to verify database compatibility.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
files-changed: ${{ steps.changed-files.outputs.files }}
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration/**'
|
||||
- 'config-example.yaml'
|
||||
- '.github/workflows/test-integration.yaml'
|
||||
- '.github/workflows/integration-test-template.yml'
|
||||
- 'Dockerfile.*'
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Build binaries and warm Go cache
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
# Build all Go binaries in one nix shell to maximize cache reuse
|
||||
nix develop --command -- bash -c '
|
||||
go build -o hi ./cmd/hi
|
||||
CGO_ENABLED=0 GOOS=linux go build -o headscale ./cmd/headscale
|
||||
# Build integration test binary to warm the cache with all dependencies
|
||||
go test -c ./integration -o /dev/null 2>/dev/null || true
|
||||
'
|
||||
- name: Upload hi binary
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: hi-binary
|
||||
path: hi
|
||||
retention-days: 10
|
||||
- name: Package Go cache
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
# Package Go module cache and build cache
|
||||
tar -czf go-cache.tar.gz -C ~ go .cache/go-build
|
||||
- name: Upload Go cache
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: go-cache
|
||||
path: go-cache.tar.gz
|
||||
retention-days: 10
|
||||
- name: Build headscale image
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
docker build \
|
||||
--file Dockerfile.integration-ci \
|
||||
--tag headscale:${{ github.sha }} \
|
||||
.
|
||||
docker save headscale:${{ github.sha }} | gzip > headscale-image.tar.gz
|
||||
- name: Build tailscale HEAD image
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
docker build \
|
||||
--file Dockerfile.tailscale-HEAD \
|
||||
--tag tailscale-head:${{ github.sha }} \
|
||||
.
|
||||
docker save tailscale-head:${{ github.sha }} | gzip > tailscale-head-image.tar.gz
|
||||
- name: Upload headscale image
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: headscale-image
|
||||
path: headscale-image.tar.gz
|
||||
retention-days: 10
|
||||
- name: Upload tailscale HEAD image
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: tailscale-head-image
|
||||
path: tailscale-head-image.tar.gz
|
||||
retention-days: 10
|
||||
build-postgres:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: needs.build.outputs.files-changed == 'true'
|
||||
steps:
|
||||
- name: Pull and save postgres image
|
||||
run: |
|
||||
docker pull postgres:latest
|
||||
docker tag postgres:latest postgres:${{ github.sha }}
|
||||
docker save postgres:${{ github.sha }} | gzip > postgres-image.tar.gz
|
||||
- name: Upload postgres image
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: postgres-image
|
||||
path: postgres-image.tar.gz
|
||||
retention-days: 10
|
||||
sqlite:
|
||||
needs: build
|
||||
if: needs.build.outputs.files-changed == 'true'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -25,6 +135,13 @@ jobs:
|
||||
- TestACLAutogroupTagged
|
||||
- TestACLAutogroupSelf
|
||||
- TestACLPolicyPropagationOverTime
|
||||
- TestACLTagPropagation
|
||||
- TestACLTagPropagationPortSpecific
|
||||
- TestACLGroupWithUnknownUser
|
||||
- TestACLGroupAfterUserDeletion
|
||||
- TestACLGroupDeletionExactReproduction
|
||||
- TestACLDynamicUnknownUserAddition
|
||||
- TestACLDynamicUnknownUserRemoval
|
||||
- TestAPIAuthenticationBypass
|
||||
- TestAPIAuthenticationBypassCurl
|
||||
- TestGRPCAuthenticationBypass
|
||||
@@ -32,6 +149,8 @@ jobs:
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
- TestAuthKeyDeleteKey
|
||||
- TestAuthKeyLogoutAndReloginRoutesPreserved
|
||||
- TestOIDCAuthenticationPingAll
|
||||
- TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
- TestOIDC024UserCreation
|
||||
@@ -41,6 +160,8 @@ jobs:
|
||||
- TestOIDCMultipleOpenedLoginUrls
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestOIDCExpiryAfterRestart
|
||||
- TestOIDCACLPolicyOnJoin
|
||||
- TestOIDCReloginSameUserRoutesPreserved
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
@@ -49,13 +170,11 @@ jobs:
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
- TestPreAuthKeyCommandReusableEphemeral
|
||||
- TestPreAuthKeyCorrectUserLoggedInCommand
|
||||
- TestTaggedNodesCLIOutput
|
||||
- TestApiKeyCommand
|
||||
- TestNodeTagCommand
|
||||
- TestNodeAdvertiseTagCommand
|
||||
- TestNodeCommand
|
||||
- TestNodeExpireCommand
|
||||
- TestNodeRenameCommand
|
||||
- TestNodeMoveCommand
|
||||
- TestPolicyCommand
|
||||
- TestPolicyBrokenConfigCommand
|
||||
- TestDERPVerifyEndpoint
|
||||
@@ -82,7 +201,12 @@ jobs:
|
||||
- TestEnablingExitRoutes
|
||||
- TestSubnetRouterMultiNetwork
|
||||
- TestSubnetRouterMultiNetworkExitNode
|
||||
- TestAutoApproveMultiNetwork
|
||||
- TestAutoApproveMultiNetwork/authkey-tag.*
|
||||
- TestAutoApproveMultiNetwork/authkey-user.*
|
||||
- TestAutoApproveMultiNetwork/authkey-group.*
|
||||
- TestAutoApproveMultiNetwork/webauth-tag.*
|
||||
- TestAutoApproveMultiNetwork/webauth-user.*
|
||||
- TestAutoApproveMultiNetwork/webauth-group.*
|
||||
- TestSubnetRouteACLFiltering
|
||||
- TestHeadscale
|
||||
- TestTailscaleNodesJoiningHeadcale
|
||||
@@ -92,12 +216,47 @@ jobs:
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
- TestSSHAutogroupSelf
|
||||
- TestTagsAuthKeyWithTagRequestDifferentTag
|
||||
- TestTagsAuthKeyWithTagNoAdvertiseFlag
|
||||
- TestTagsAuthKeyWithTagCannotAddViaCLI
|
||||
- TestTagsAuthKeyWithTagCannotChangeViaCLI
|
||||
- TestTagsAuthKeyWithTagAdminOverrideReauthPreserves
|
||||
- TestTagsAuthKeyWithTagCLICannotModifyAdminTags
|
||||
- TestTagsAuthKeyWithoutTagCannotRequestTags
|
||||
- TestTagsAuthKeyWithoutTagRegisterNoTags
|
||||
- TestTagsAuthKeyWithoutTagCannotAddViaCLI
|
||||
- TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset
|
||||
- TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise
|
||||
- TestTagsAuthKeyWithoutTagCLICannotReduceAdminMultiTag
|
||||
- TestTagsUserLoginOwnedTagAtRegistration
|
||||
- TestTagsUserLoginNonExistentTagAtRegistration
|
||||
- TestTagsUserLoginUnownedTagAtRegistration
|
||||
- TestTagsUserLoginAddTagViaCLIReauth
|
||||
- TestTagsUserLoginRemoveTagViaCLIReauth
|
||||
- TestTagsUserLoginCLINoOpAfterAdminAssignment
|
||||
- TestTagsUserLoginCLICannotRemoveAdminTags
|
||||
- TestTagsAuthKeyWithTagRequestNonExistentTag
|
||||
- TestTagsAuthKeyWithTagRequestUnownedTag
|
||||
- TestTagsAuthKeyWithoutTagRequestNonExistentTag
|
||||
- TestTagsAuthKeyWithoutTagRequestUnownedTag
|
||||
- TestTagsAdminAPICannotSetNonExistentTag
|
||||
- TestTagsAdminAPICanSetUnownedTag
|
||||
- TestTagsAdminAPICannotRemoveAllTags
|
||||
- TestTagsIssue2978ReproTagReplacement
|
||||
- TestTagsAdminAPICannotSetInvalidFormat
|
||||
- TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags
|
||||
- TestTagsAuthKeyWithoutUserInheritsTags
|
||||
- TestTagsAuthKeyWithoutUserRejectsAdvertisedTags
|
||||
- TestTagsAuthKeyConvertToUserViaCLIRegister
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=0"
|
||||
database_name: "sqlite"
|
||||
postgres:
|
||||
needs: [build, build-postgres]
|
||||
if: needs.build.outputs.files-changed == 'true'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -108,6 +267,7 @@ jobs:
|
||||
- TestPingAllByIPManyUpDown
|
||||
- TestSubnetRouterMultiNetwork
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
postgres_flag: "--postgres=1"
|
||||
|
||||
7
.github/workflows/test.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
@@ -27,13 +27,12 @@ jobs:
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
|
||||
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
- uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
primary-key:
|
||||
nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix',
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
|
||||
1
.gitignore
vendored
@@ -2,6 +2,7 @@ ignored/
|
||||
tailscale/
|
||||
.vscode/
|
||||
.claude/
|
||||
logs/
|
||||
|
||||
*.prof
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ linters:
|
||||
- depguard
|
||||
- dupl
|
||||
- exhaustruct
|
||||
- funcorder
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
@@ -17,6 +18,7 @@ linters:
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nolintlint
|
||||
@@ -28,6 +30,22 @@ linters:
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
# Forbid time.Sleep everywhere with context-appropriate alternatives
|
||||
- pattern: 'time\.Sleep'
|
||||
msg: >-
|
||||
time.Sleep is forbidden.
|
||||
In tests: use assert.EventuallyWithT for polling/waiting patterns.
|
||||
In production code: use a backoff strategy (e.g., cenkalti/backoff) or proper synchronization primitives.
|
||||
# Forbid inline string literals in zerolog field methods - use zf.* constants
|
||||
- pattern: '\.(Str|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64|Float32|Float64|Bool|Dur|Time|TimeDiff|Strs|Ints|Uints|Floats|Bools|Any|Interface)\("[^"]+"'
|
||||
msg: >-
|
||||
Use zf.* constants for zerolog field names instead of string literals.
|
||||
Import "github.com/juanfont/headscale/hscontrol/util/zlog/zf" and use
|
||||
constants like zf.NodeID, zf.UserName, etc. Add new constants to
|
||||
hscontrol/util/zlog/zf/fields.go if needed.
|
||||
analyze-types: true
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
|
||||
@@ -125,7 +125,7 @@ kos:
|
||||
# bare tells KO to only use the repository
|
||||
# for tagging and naming the container.
|
||||
bare: true
|
||||
base_image: gcr.io/distroless/base-debian12
|
||||
base_image: gcr.io/distroless/base-debian13
|
||||
build: headscale
|
||||
main: ./cmd/headscale
|
||||
env:
|
||||
@@ -154,7 +154,7 @@ kos:
|
||||
- headscale/headscale
|
||||
|
||||
bare: true
|
||||
base_image: gcr.io/distroless/base-debian12:debug
|
||||
base_image: gcr.io/distroless/base-debian13:debug
|
||||
build: headscale
|
||||
main: ./cmd/headscale
|
||||
env:
|
||||
|
||||
24
.mcp.json
@@ -3,45 +3,31 @@
|
||||
"claude-code-mcp": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@steipete/claude-code-mcp@latest"
|
||||
],
|
||||
"args": ["-y", "@steipete/claude-code-mcp@latest"],
|
||||
"env": {}
|
||||
},
|
||||
"sequential-thinking": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@modelcontextprotocol/server-sequential-thinking"
|
||||
],
|
||||
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"],
|
||||
"env": {}
|
||||
},
|
||||
"nixos": {
|
||||
"type": "stdio",
|
||||
"command": "uvx",
|
||||
"args": [
|
||||
"mcp-nixos"
|
||||
],
|
||||
"args": ["mcp-nixos"],
|
||||
"env": {}
|
||||
},
|
||||
"context7": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@upstash/context7-mcp"
|
||||
],
|
||||
"args": ["-y", "@upstash/context7-mcp"],
|
||||
"env": {}
|
||||
},
|
||||
"git": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@cyanheads/git-mcp-server"
|
||||
],
|
||||
"args": ["-y", "@cyanheads/git-mcp-server"],
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
|
||||
68
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
# prek/pre-commit configuration for headscale
|
||||
# See: https://prek.j178.dev/quickstart/
|
||||
# See: https://prek.j178.dev/builtin/
|
||||
|
||||
# Global exclusions - ignore generated code
|
||||
exclude: ^gen/
|
||||
|
||||
repos:
|
||||
# Built-in hooks from pre-commit/pre-commit-hooks
|
||||
# prek will use fast-path optimized versions automatically
|
||||
# See: https://prek.j178.dev/builtin/
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-json
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-toml
|
||||
- id: check-xml
|
||||
- id: check-yaml
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: fix-byte-order-marker
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
|
||||
# Local hooks for project-specific tooling
|
||||
- repo: local
|
||||
hooks:
|
||||
# nixpkgs-fmt for Nix files
|
||||
- id: nixpkgs-fmt
|
||||
name: nixpkgs-fmt
|
||||
entry: nixpkgs-fmt
|
||||
language: system
|
||||
files: \.nix$
|
||||
|
||||
# Prettier for formatting
|
||||
- id: prettier
|
||||
name: prettier
|
||||
entry: prettier --write --list-different
|
||||
language: system
|
||||
exclude: ^docs/
|
||||
types_or:
|
||||
[
|
||||
javascript,
|
||||
jsx,
|
||||
ts,
|
||||
tsx,
|
||||
yaml,
|
||||
json,
|
||||
toml,
|
||||
html,
|
||||
css,
|
||||
scss,
|
||||
sass,
|
||||
markdown,
|
||||
]
|
||||
|
||||
# golangci-lint for Go code quality
|
||||
- id: golangci-lint
|
||||
name: golangci-lint
|
||||
entry: nix develop --command golangci-lint run --new-from-rev=HEAD~1 --timeout=5m --fix
|
||||
language: system
|
||||
types: [go]
|
||||
pass_filenames: false
|
||||
@@ -1,5 +1,5 @@
|
||||
.github/workflows/test-integration-v2*
|
||||
docs/about/features.md
|
||||
docs/ref/api.md
|
||||
docs/ref/configuration.md
|
||||
docs/ref/oidc.md
|
||||
docs/ref/remote-cli.md
|
||||
|
||||
789
CHANGELOG.md
532
CLAUDE.md
@@ -1,531 +1 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Overview
|
||||
|
||||
Headscale is an open-source implementation of the Tailscale control server written in Go. It provides self-hosted coordination for Tailscale networks (tailnets), managing node registration, IP allocation, policy enforcement, and DERP routing.
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Quick Setup
|
||||
```bash
|
||||
# Recommended: Use Nix for dependency management
|
||||
nix develop
|
||||
|
||||
# Full development workflow
|
||||
make dev # runs fmt + lint + test + build
|
||||
```
|
||||
|
||||
### Essential Commands
|
||||
```bash
|
||||
# Build headscale binary
|
||||
make build
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
go test ./... # All unit tests
|
||||
go test -race ./... # With race detection
|
||||
|
||||
# Run specific integration test
|
||||
go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Code formatting and linting
|
||||
make fmt # Format all code (Go, docs, proto)
|
||||
make lint # Lint all code (Go, proto)
|
||||
make fmt-go # Format Go code only
|
||||
make lint-go # Lint Go code only
|
||||
|
||||
# Protocol buffer generation (after modifying proto/)
|
||||
make generate
|
||||
|
||||
# Clean build artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
```bash
|
||||
# Use the hi (Headscale Integration) test runner
|
||||
go run ./cmd/hi doctor # Check system requirements
|
||||
go run ./cmd/hi run "TestPattern" # Run specific test
|
||||
go run ./cmd/hi run "TestPattern" --postgres # With PostgreSQL backend
|
||||
|
||||
# Test artifacts are saved to control_logs/ with logs and debug data
|
||||
```
|
||||
|
||||
## Project Structure & Architecture
|
||||
|
||||
### Top-Level Organization
|
||||
|
||||
```
|
||||
headscale/
|
||||
├── cmd/ # Command-line applications
|
||||
│ ├── headscale/ # Main headscale server binary
|
||||
│ └── hi/ # Headscale Integration test runner
|
||||
├── hscontrol/ # Core control plane logic
|
||||
├── integration/ # End-to-end Docker-based tests
|
||||
├── proto/ # Protocol buffer definitions
|
||||
├── gen/ # Generated code (protobuf)
|
||||
├── docs/ # Documentation
|
||||
└── packaging/ # Distribution packaging
|
||||
```
|
||||
|
||||
### Core Packages (`hscontrol/`)
|
||||
|
||||
**Main Server (`hscontrol/`)**
|
||||
- `app.go`: Application setup, dependency injection, server lifecycle
|
||||
- `handlers.go`: HTTP/gRPC API endpoints for management operations
|
||||
- `grpcv1.go`: gRPC service implementation for headscale API
|
||||
- `poll.go`: **Critical** - Handles Tailscale MapRequest/MapResponse protocol
|
||||
- `noise.go`: Noise protocol implementation for secure client communication
|
||||
- `auth.go`: Authentication flows (web, OIDC, command-line)
|
||||
- `oidc.go`: OpenID Connect integration for user authentication
|
||||
|
||||
**State Management (`hscontrol/state/`)**
|
||||
- `state.go`: Central coordinator for all subsystems (database, policy, IP allocation, DERP)
|
||||
- `node_store.go`: **Performance-critical** - In-memory cache with copy-on-write semantics
|
||||
- Thread-safe operations with deadlock detection
|
||||
- Coordinates between database persistence and real-time operations
|
||||
|
||||
**Database Layer (`hscontrol/db/`)**
|
||||
- `db.go`: Database abstraction, GORM setup, migration management
|
||||
- `node.go`: Node lifecycle, registration, expiration, IP assignment
|
||||
- `users.go`: User management, namespace isolation
|
||||
- `api_key.go`: API authentication tokens
|
||||
- `preauth_keys.go`: Pre-authentication keys for automated node registration
|
||||
- `ip.go`: IP address allocation and management
|
||||
- `policy.go`: Policy storage and retrieval
|
||||
- Schema migrations in `schema.sql` with extensive test data coverage
|
||||
|
||||
**Policy Engine (`hscontrol/policy/`)**
|
||||
- `policy.go`: Core ACL evaluation logic, HuJSON parsing
|
||||
- `v2/`: Next-generation policy system with improved filtering
|
||||
- `matcher/`: ACL rule matching and evaluation engine
|
||||
- Determines peer visibility, route approval, and network access rules
|
||||
- Supports both file-based and database-stored policies
|
||||
|
||||
**Network Management (`hscontrol/`)**
|
||||
- `derp/`: DERP (Designated Encrypted Relay for Packets) server implementation
|
||||
- NAT traversal when direct connections fail
|
||||
- Fallback relay for firewall-restricted environments
|
||||
- `mapper/`: Converts internal Headscale state to Tailscale's wire protocol format
|
||||
- `tail.go`: Tailscale-specific data structure generation
|
||||
- `routes/`: Subnet route management and primary route selection
|
||||
- `dns/`: DNS record management and MagicDNS implementation
|
||||
|
||||
**Utilities & Support (`hscontrol/`)**
|
||||
- `types/`: Core data structures, configuration, validation
|
||||
- `util/`: Helper functions for networking, DNS, key management
|
||||
- `templates/`: Client configuration templates (Apple, Windows, etc.)
|
||||
- `notifier/`: Event notification system for real-time updates
|
||||
- `metrics.go`: Prometheus metrics collection
|
||||
- `capver/`: Tailscale capability version management
|
||||
|
||||
### Key Subsystem Interactions
|
||||
|
||||
**Node Registration Flow**
|
||||
1. **Client Connection**: `noise.go` handles secure protocol handshake
|
||||
2. **Authentication**: `auth.go` validates credentials (web/OIDC/preauth)
|
||||
3. **State Creation**: `state.go` coordinates IP allocation via `db/ip.go`
|
||||
4. **Storage**: `db/node.go` persists node, `NodeStore` caches in memory
|
||||
5. **Network Setup**: `mapper/` generates initial Tailscale network map
|
||||
|
||||
**Ongoing Operations**
|
||||
1. **Poll Requests**: `poll.go` receives periodic client updates
|
||||
2. **State Updates**: `NodeStore` maintains real-time node information
|
||||
3. **Policy Application**: `policy/` evaluates ACL rules for peer relationships
|
||||
4. **Map Distribution**: `mapper/` sends network topology to all affected clients
|
||||
|
||||
**Route Management**
|
||||
1. **Advertisement**: Clients announce routes via `poll.go` Hostinfo updates
|
||||
2. **Storage**: `db/` persists routes, `NodeStore` caches for performance
|
||||
3. **Approval**: `policy/` auto-approves routes based on ACL rules
|
||||
4. **Distribution**: `routes/` selects primary routes, `mapper/` distributes to peers
|
||||
|
||||
### Command-Line Tools (`cmd/`)
|
||||
|
||||
**Main Server (`cmd/headscale/`)**
|
||||
- `headscale.go`: CLI parsing, configuration loading, server startup
|
||||
- Supports daemon mode, CLI operations (user/node management), database operations
|
||||
|
||||
**Integration Test Runner (`cmd/hi/`)**
|
||||
- `main.go`: Test execution framework with Docker orchestration
|
||||
- `run.go`: Individual test execution with artifact collection
|
||||
- `doctor.go`: System requirements validation
|
||||
- `docker.go`: Container lifecycle management
|
||||
- Essential for validating changes against real Tailscale clients
|
||||
|
||||
### Generated & External Code
|
||||
|
||||
**Protocol Buffers (`proto/` → `gen/`)**
|
||||
- Defines gRPC API for headscale management operations
|
||||
- Client libraries can generate from these definitions
|
||||
- Run `make generate` after modifying `.proto` files
|
||||
|
||||
**Integration Testing (`integration/`)**
|
||||
- `scenario.go`: Docker test environment setup
|
||||
- `tailscale.go`: Tailscale client container management
|
||||
- Individual test files for specific functionality areas
|
||||
- Real end-to-end validation with network isolation
|
||||
|
||||
### Critical Performance Paths
|
||||
|
||||
**High-Frequency Operations**
|
||||
1. **MapRequest Processing** (`poll.go`): Every 15-60 seconds per client
|
||||
2. **NodeStore Reads** (`node_store.go`): Every operation requiring node data
|
||||
3. **Policy Evaluation** (`policy/`): On every peer relationship calculation
|
||||
4. **Route Lookups** (`routes/`): During network map generation
|
||||
|
||||
**Database Write Patterns**
|
||||
- **Frequent**: Node heartbeats, endpoint updates, route changes
|
||||
- **Moderate**: User operations, policy updates, API key management
|
||||
- **Rare**: Schema migrations, bulk operations
|
||||
|
||||
### Configuration & Deployment
|
||||
|
||||
**Configuration** (`hscontrol/types/config.go`)**
|
||||
- Database connection settings (SQLite/PostgreSQL)
|
||||
- Network configuration (IP ranges, DNS settings)
|
||||
- Policy mode (file vs database)
|
||||
- DERP relay configuration
|
||||
- OIDC provider settings
|
||||
|
||||
**Key Dependencies**
|
||||
- **GORM**: Database ORM with migration support
|
||||
- **Tailscale Libraries**: Core networking and protocol code
|
||||
- **Zerolog**: Structured logging throughout the application
|
||||
- **Buf**: Protocol buffer toolchain for code generation
|
||||
|
||||
### Development Workflow Integration
|
||||
|
||||
The architecture supports incremental development:
|
||||
- **Unit Tests**: Focus on individual packages (`*_test.go` files)
|
||||
- **Integration Tests**: Validate cross-component interactions
|
||||
- **Database Tests**: Extensive migration and data integrity validation
|
||||
- **Policy Tests**: ACL rule evaluation and edge cases
|
||||
- **Performance Tests**: NodeStore and high-frequency operation validation
|
||||
|
||||
## Integration Testing System
|
||||
|
||||
### Overview
|
||||
Headscale uses Docker-based integration tests with real Tailscale clients to validate end-to-end functionality. The integration test system is complex and requires specialized knowledge for effective execution and debugging.
|
||||
|
||||
### **MANDATORY: Use the headscale-integration-tester Agent**
|
||||
|
||||
**CRITICAL REQUIREMENT**: For ANY integration test execution, analysis, troubleshooting, or validation, you MUST use the `headscale-integration-tester` agent. This agent contains specialized knowledge about:
|
||||
|
||||
- Test execution strategies and timing requirements
|
||||
- Infrastructure vs code issue distinction (99% vs 1% failure patterns)
|
||||
- Security-critical debugging rules and forbidden practices
|
||||
- Comprehensive artifact analysis workflows
|
||||
- Real-world failure patterns from HA debugging experiences
|
||||
|
||||
### Quick Reference Commands
|
||||
|
||||
```bash
|
||||
# Check system requirements (always run first)
|
||||
go run ./cmd/hi doctor
|
||||
|
||||
# Run single test (recommended for development)
|
||||
go run ./cmd/hi run "TestName"
|
||||
|
||||
# Use PostgreSQL for database-heavy tests
|
||||
go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Pattern matching for related tests
|
||||
go run ./cmd/hi run "TestPattern*"
|
||||
```
|
||||
|
||||
**Critical Notes**:
|
||||
- Only ONE test can run at a time (Docker port conflicts)
|
||||
- Tests generate ~100MB of logs per run in `control_logs/`
|
||||
- Clean environment before each test: `rm -rf control_logs/202507* && docker system prune -f`
|
||||
|
||||
### Test Artifacts Location
|
||||
All test runs save comprehensive debugging artifacts to `control_logs/TIMESTAMP-ID/` including server logs, client logs, database dumps, MapResponse protocol data, and Prometheus metrics.
|
||||
|
||||
**For all integration test work, use the headscale-integration-tester agent - it contains the complete knowledge needed for effective testing and debugging.**
|
||||
|
||||
## NodeStore Implementation Details
|
||||
|
||||
**Key Insight from Recent Work**: The NodeStore is a critical performance optimization that caches node data in memory while ensuring consistency with the database. When working with route advertisements or node state changes:
|
||||
|
||||
1. **Timing Considerations**: Route advertisements need time to propagate from clients to server. Use `require.EventuallyWithT()` patterns in tests instead of immediate assertions.
|
||||
|
||||
2. **Synchronization Points**: NodeStore updates happen at specific points like `poll.go:420` after Hostinfo changes. Ensure these are maintained when modifying the polling logic.
|
||||
|
||||
3. **Peer Visibility**: The NodeStore's `peersFunc` determines which nodes are visible to each other. Policy-based filtering is separate from monitoring visibility - expired nodes should remain visible for debugging but marked as expired.
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Integration Test Patterns
|
||||
|
||||
#### **CRITICAL: EventuallyWithT Pattern for External Calls**
|
||||
|
||||
**All external calls in integration tests MUST be wrapped in EventuallyWithT blocks** to handle eventual consistency in distributed systems. External calls include:
|
||||
- `client.Status()` - Getting Tailscale client status
|
||||
- `client.Curl()` - Making HTTP requests through clients
|
||||
- `client.Traceroute()` - Running network diagnostics
|
||||
- `headscale.ListNodes()` - Querying headscale server state
|
||||
- Any other calls that interact with external systems or network operations
|
||||
|
||||
**Key Rules**:
|
||||
1. **Never use bare `require.NoError(t, err)` with external calls** - Always wrap in EventuallyWithT
|
||||
2. **Keep related assertions together** - If multiple assertions depend on the same external call, keep them in the same EventuallyWithT block
|
||||
3. **Split unrelated external calls** - Different external calls should be in separate EventuallyWithT blocks
|
||||
4. **Never nest EventuallyWithT calls** - Each EventuallyWithT should be at the same level
|
||||
5. **Declare shared variables at function scope** - Variables used across multiple EventuallyWithT blocks must be declared before first use
|
||||
|
||||
**Examples**:
|
||||
|
||||
```go
|
||||
// CORRECT: External call wrapped in EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Related assertions using the same status call
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
assert.NotNil(c, peerStatus.PrimaryRoutes)
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedRoutes)
|
||||
}
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying client status and routes")
|
||||
|
||||
// INCORRECT: Bare external call without EventuallyWithT
|
||||
status, err := client.Status() // ❌ Will fail intermittently
|
||||
require.NoError(t, err)
|
||||
|
||||
// CORRECT: Separate EventuallyWithT for different external calls
|
||||
// First external call - headscale.ListNodes()
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
}, 10*time.Second, 500*time.Millisecond, "route state changes should propagate to nodes")
|
||||
|
||||
// Second external call - client.Status()
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "routes should be visible to client")
|
||||
|
||||
// INCORRECT: Multiple unrelated external calls in same EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes() // ❌ First external call
|
||||
assert.NoError(c, err)
|
||||
|
||||
status, err := client.Status() // ❌ Different external call - should be separate
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 500*time.Millisecond, "mixed calls")
|
||||
|
||||
// CORRECT: Variable scoping for shared data
|
||||
var (
|
||||
srs1, srs2, srs3 *ipnstate.Status
|
||||
clientStatus *ipnstate.Status
|
||||
srs1PeerStatus *ipnstate.PeerStatus
|
||||
)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
srs1 = subRouter1.MustStatus() // = not :=
|
||||
srs2 = subRouter2.MustStatus()
|
||||
clientStatus = client.MustStatus()
|
||||
|
||||
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
|
||||
// assertions...
|
||||
}, 5*time.Second, 200*time.Millisecond, "checking router status")
|
||||
|
||||
// CORRECT: Wrapping client operations
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(weburl)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying HTTP connectivity")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
tr, err := client.Traceroute(webip)
|
||||
assert.NoError(c, err)
|
||||
assertTracerouteViaIPWithCollect(c, tr, expectedRouter.MustIPv4())
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying network path")
|
||||
```
|
||||
|
||||
**Helper Functions**:
|
||||
- Use `requirePeerSubnetRoutesWithCollect` instead of `requirePeerSubnetRoutes` inside EventuallyWithT
|
||||
- Use `requireNodeRouteCountWithCollect` instead of `requireNodeRouteCount` inside EventuallyWithT
|
||||
- Use `assertTracerouteViaIPWithCollect` instead of `assertTracerouteViaIP` inside EventuallyWithT
|
||||
|
||||
```go
|
||||
// Node route checking by actual node properties, not array position
|
||||
var routeNode *v1.Node
|
||||
for _, node := range nodes {
|
||||
if nodeIDStr := fmt.Sprintf("%d", node.GetId()); expectedRoutes[nodeIDStr] != "" {
|
||||
routeNode = node
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Running Problematic Tests
|
||||
- Some tests require significant time (e.g., `TestNodeOnlineStatus` runs for 12 minutes)
|
||||
- Infrastructure issues like disk space can cause test failures unrelated to code changes
|
||||
- Use `--postgres` flag when testing database-heavy scenarios
|
||||
|
||||
## Quality Assurance and Testing Requirements
|
||||
|
||||
### **MANDATORY: Always Use Specialized Testing Agents**
|
||||
|
||||
**CRITICAL REQUIREMENT**: For ANY task involving testing, quality assurance, review, or validation, you MUST use the appropriate specialized agent at the END of your task list. This ensures comprehensive quality validation and prevents regressions.
|
||||
|
||||
**Required Agents for Different Task Types**:
|
||||
|
||||
1. **Integration Testing**: Use `headscale-integration-tester` agent for:
|
||||
- Running integration tests with `cmd/hi`
|
||||
- Analyzing test failures and artifacts
|
||||
- Troubleshooting Docker-based test infrastructure
|
||||
- Validating end-to-end functionality changes
|
||||
|
||||
2. **Quality Control**: Use `quality-control-enforcer` agent for:
|
||||
- Code review and validation
|
||||
- Ensuring best practices compliance
|
||||
- Preventing common pitfalls and anti-patterns
|
||||
- Validating architectural decisions
|
||||
|
||||
**Agent Usage Pattern**: Always add the appropriate agent as the FINAL step in any task list to ensure quality validation occurs after all work is complete.
|
||||
|
||||
### Integration Test Debugging Reference
|
||||
|
||||
Test artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:
|
||||
- Headscale server logs (stderr/stdout)
|
||||
- Tailscale client logs and status
|
||||
- Database dumps and network captures
|
||||
- MapResponse JSON files for protocol debugging
|
||||
|
||||
**For integration test issues, ALWAYS use the headscale-integration-tester agent - do not attempt manual debugging.**
|
||||
|
||||
## EventuallyWithT Pattern for Integration Tests
|
||||
|
||||
### Overview
|
||||
EventuallyWithT is a testing pattern used to handle eventual consistency in distributed systems. In Headscale integration tests, many operations are asynchronous - clients advertise routes, the server processes them, updates propagate through the network. EventuallyWithT allows tests to wait for these operations to complete while making assertions.
|
||||
|
||||
### External Calls That Must Be Wrapped
|
||||
The following operations are **external calls** that interact with the headscale server or tailscale clients and MUST be wrapped in EventuallyWithT:
|
||||
- `headscale.ListNodes()` - Queries server state
|
||||
- `client.Status()` - Gets client network status
|
||||
- `client.Curl()` - Makes HTTP requests through the network
|
||||
- `client.Traceroute()` - Performs network diagnostics
|
||||
- `client.Execute()` when running commands that query state
|
||||
- Any operation that reads from the headscale server or tailscale client
|
||||
|
||||
### Operations That Must NOT Be Wrapped
|
||||
The following are **blocking operations** that modify state and should NOT be wrapped in EventuallyWithT:
|
||||
- `tailscale set` commands (e.g., `--advertise-routes`, `--exit-node`)
|
||||
- Any command that changes configuration or state
|
||||
- Use `client.MustStatus()` instead of `client.Status()` when you just need the ID for a blocking operation
|
||||
|
||||
### Five Key Rules for EventuallyWithT
|
||||
|
||||
1. **One External Call Per EventuallyWithT Block**
|
||||
- Each EventuallyWithT should make ONE external call (e.g., ListNodes OR Status)
|
||||
- Related assertions based on that single call can be grouped together
|
||||
- Unrelated external calls must be in separate EventuallyWithT blocks
|
||||
|
||||
2. **Variable Scoping**
|
||||
- Declare variables that need to be shared across EventuallyWithT blocks at function scope
|
||||
- Use `=` for assignment inside EventuallyWithT, not `:=` (unless the variable is only used within that block)
|
||||
- Variables declared with `:=` inside EventuallyWithT are not accessible outside
|
||||
|
||||
3. **No Nested EventuallyWithT**
|
||||
- NEVER put an EventuallyWithT inside another EventuallyWithT
|
||||
- This is a critical anti-pattern that must be avoided
|
||||
|
||||
4. **Use CollectT for Assertions**
|
||||
- Inside EventuallyWithT, use `assert` methods with the CollectT parameter
|
||||
- Helper functions called within EventuallyWithT must accept `*assert.CollectT`
|
||||
|
||||
5. **Descriptive Messages**
|
||||
- Always provide a descriptive message as the last parameter
|
||||
- Message should explain what condition is being waited for
|
||||
|
||||
### Correct Pattern Examples
|
||||
|
||||
```go
|
||||
// CORRECT: Blocking operation NOT wrapped
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + expectedRoutes[string(status.Self.ID)],
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
}
|
||||
|
||||
// CORRECT: Single external call with related assertions
|
||||
var nodes []*v1.Node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
}, 10*time.Second, 500*time.Millisecond, "nodes should have expected route counts")
|
||||
|
||||
// CORRECT: Separate EventuallyWithT for different external call
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "client should see expected routes")
|
||||
```
|
||||
|
||||
### Incorrect Patterns to Avoid
|
||||
|
||||
```go
|
||||
// INCORRECT: Blocking operation wrapped in EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// This is a blocking operation - should NOT be in EventuallyWithT!
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + expectedRoutes[string(status.Self.ID)],
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
assert.NoError(c, err)
|
||||
}, 5*time.Second, 200*time.Millisecond, "wrong pattern")
|
||||
|
||||
// INCORRECT: Multiple unrelated external calls in same EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// First external call
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
|
||||
// Second unrelated external call - WRONG!
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
assert.NotNil(c, status)
|
||||
}, 10*time.Second, 500*time.Millisecond, "mixed operations")
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Dependencies**: Use `nix develop` for consistent toolchain (Go, buf, protobuf tools, linting)
|
||||
- **Protocol Buffers**: Changes to `proto/` require `make generate` and should be committed separately
|
||||
- **Code Style**: Enforced via golangci-lint with golines (width 88) and gofumpt formatting
|
||||
- **Database**: Supports both SQLite (development) and PostgreSQL (production/testing)
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space - use headscale-integration-tester agent
|
||||
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
|
||||
- **Quality Assurance**: Always use appropriate specialized agents for testing and validation tasks
|
||||
- **NEVER create gists in the user's name**: Do not use the `create_gist` tool - present information directly in the response instead
|
||||
@AGENTS.md
|
||||
|
||||
@@ -2,28 +2,43 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.25-trixie
|
||||
FROM docker.io/golang:1.25-trixie AS builder
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
RUN apt-get --update install --no-install-recommends --yes less jq sqlite3 dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
# Install delve debugger
|
||||
# Install delve debugger first - rarely changes, good cache candidate
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
# Download dependencies - only invalidated when go.mod/go.sum change
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
# Copy source and build - invalidated on any source change
|
||||
COPY . .
|
||||
|
||||
# Build debug binary with debug symbols for delve
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -gcflags="all=-N -l" -o /go/bin/headscale ./cmd/headscale
|
||||
|
||||
# Runtime stage
|
||||
FROM debian:trixie-slim
|
||||
|
||||
RUN apt-get --update install --no-install-recommends --yes \
|
||||
bash ca-certificates curl dnsutils findutils iproute2 jq less procps python3 sqlite3 \
|
||||
&& apt-get dist-clean
|
||||
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
# Copy binaries from builder
|
||||
COPY --from=builder /go/bin/headscale /usr/local/bin/headscale
|
||||
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
|
||||
|
||||
# Copy source code for delve source-level debugging
|
||||
COPY --from=builder /go/src/headscale /go/src/headscale
|
||||
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp 40000/tcp
|
||||
CMD ["/go/bin/dlv", "--listen=0.0.0.0:40000", "--headless=true", "--api-version=2", "--accept-multiclient", "exec", "/go/bin/headscale", "--"]
|
||||
CMD ["dlv", "--listen=0.0.0.0:40000", "--headless=true", "--api-version=2", "--accept-multiclient", "exec", "/usr/local/bin/headscale", "--"]
|
||||
|
||||
17
Dockerfile.integration-ci
Normal file
@@ -0,0 +1,17 @@
|
||||
# Minimal CI image - expects pre-built headscale binary in build context
|
||||
# For local development with delve debugging, use Dockerfile.integration instead
|
||||
|
||||
FROM debian:trixie-slim
|
||||
|
||||
RUN apt-get --update install --no-install-recommends --yes \
|
||||
bash ca-certificates curl dnsutils findutils iproute2 jq less procps python3 sqlite3 \
|
||||
&& apt-get dist-clean
|
||||
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
# Copy pre-built headscale binary from build context
|
||||
COPY headscale /usr/local/bin/headscale
|
||||
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["/usr/local/bin/headscale"]
|
||||
@@ -37,7 +37,9 @@ RUN GOARCH=$TARGETARCH go install -tags="${BUILD_TAGS}" -ldflags="\
|
||||
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
|
||||
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
# Upstream: ca-certificates ip6tables iptables iproute2
|
||||
# Tests: curl python3 (traceroute via BusyBox)
|
||||
RUN apk add --no-cache ca-certificates curl ip6tables iptables iproute2 python3
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
# For compat with the previous run.sh, although ideally you should be
|
||||
|
||||
5
Makefile
@@ -64,7 +64,6 @@ fmt-go: check-deps $(GO_SOURCES)
|
||||
fmt-prettier: check-deps $(DOC_SOURCES)
|
||||
@echo "Formatting documentation and config files..."
|
||||
prettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
|
||||
|
||||
.PHONY: fmt-proto
|
||||
fmt-proto: check-deps $(PROTO_SOURCES)
|
||||
@@ -117,7 +116,7 @@ help:
|
||||
@echo ""
|
||||
@echo "Specific targets:"
|
||||
@echo " fmt-go - Format Go code only"
|
||||
@echo " fmt-prettier - Format documentation only"
|
||||
@echo " fmt-prettier - Format documentation only"
|
||||
@echo " fmt-proto - Format Protocol Buffer files only"
|
||||
@echo " lint-go - Lint Go code only"
|
||||
@echo " lint-proto - Lint Protocol Buffer files only"
|
||||
@@ -126,4 +125,4 @@ help:
|
||||
@echo " check-deps - Verify required tools are available"
|
||||
@echo ""
|
||||
@echo "Note: If not running in a nix shell, ensure dependencies are available:"
|
||||
@echo " nix develop"
|
||||
@echo " nix develop"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||

|
||||
|
||||
@@ -63,8 +63,12 @@ and container to run Headscale.**
|
||||
|
||||
Please have a look at the [`documentation`](https://headscale.net/stable/).
|
||||
|
||||
For NixOS users, a module is available in [`nix/`](./nix/).
|
||||
|
||||
## Talks
|
||||
|
||||
- Fosdem 2026 (video): [Headscale & Tailscale: The complementary open source clone](https://fosdem.org/2026/schedule/event/KYQ3LL-headscale-the-complementary-open-source-clone/)
|
||||
- presented by Kristoffer Dalby
|
||||
- Fosdem 2023 (video): [Headscale: How we are using integration testing to reimplement Tailscale](https://fosdem.org/2023/schedule/event/goheadscale/)
|
||||
- presented by Juan Font Alonso and Kristoffer Dalby
|
||||
|
||||
@@ -147,6 +151,7 @@ make build
|
||||
We recommend using Nix for dependency management to ensure you have all required tools. If you prefer to manage dependencies yourself, you can use Make directly:
|
||||
|
||||
**With Nix (recommended):**
|
||||
|
||||
```shell
|
||||
nix develop
|
||||
make test
|
||||
@@ -154,6 +159,7 @@ make build
|
||||
```
|
||||
|
||||
**With your own dependencies:**
|
||||
|
||||
```shell
|
||||
make test
|
||||
make build
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
// DefaultAPIKeyExpiry is 90 days.
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
@@ -29,15 +28,11 @@ func init() {
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
if err := expireAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
expireAPIKeyCmd.Flags().Uint64P("id", "i", 0, "ApiKey ID")
|
||||
apiKeysCmd.AddCommand(expireAPIKeyCmd)
|
||||
|
||||
deleteAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
if err := deleteAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
deleteAPIKeyCmd.Flags().Uint64P("id", "i", 0, "ApiKey ID")
|
||||
apiKeysCmd.AddCommand(deleteAPIKeyCmd)
|
||||
}
|
||||
|
||||
@@ -154,11 +149,20 @@ var expireAPIKeyCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -167,8 +171,11 @@ var expireAPIKeyCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
request := &v1.ExpireApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
@@ -191,11 +198,20 @@ var deleteAPIKeyCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -204,8 +220,11 @@ var deleteAPIKeyCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeleteApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
request := &v1.DeleteApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
|
||||
@@ -16,7 +16,7 @@ var configTestCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_, err := newHeadscaleServerWithConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
log.Fatal().Caller().Err(err).Msg("error initializing")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -10,10 +10,6 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
@@ -23,10 +19,12 @@ func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
createNodeCmd.Flags().StringP("name", "", "", "Name")
|
||||
|
||||
err := createNodeCmd.MarkFlagRequired("name")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().StringP("user", "u", "", "User")
|
||||
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
@@ -38,11 +36,14 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
|
||||
err = createNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
createNodeCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -19,6 +20,7 @@ const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
errMockOidcUsersNotDefined = Error("MOCKOIDC_USERS not defined")
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
@@ -35,7 +37,7 @@ var mockOidcCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := mockOIDC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error running mock OIDC server")
|
||||
log.Error().Err(err).Msgf("error running mock OIDC server")
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
@@ -46,41 +48,47 @@ func mockOIDC() error {
|
||||
if clientID == "" {
|
||||
return errMockOidcClientIDNotDefined
|
||||
}
|
||||
|
||||
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
|
||||
if clientSecret == "" {
|
||||
return errMockOidcClientSecretNotDefined
|
||||
}
|
||||
|
||||
addrStr := os.Getenv("MOCKOIDC_ADDR")
|
||||
if addrStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
portStr := os.Getenv("MOCKOIDC_PORT")
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
|
||||
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
|
||||
if accessTTLOverride != "" {
|
||||
newTTL, err := time.ParseDuration(accessTTLOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
accessTTL = newTTL
|
||||
}
|
||||
|
||||
userStr := os.Getenv("MOCKOIDC_USERS")
|
||||
if userStr == "" {
|
||||
return errors.New("MOCKOIDC_USERS not defined")
|
||||
return errMockOidcUsersNotDefined
|
||||
}
|
||||
|
||||
var users []mockoidc.MockUser
|
||||
|
||||
err := json.Unmarshal([]byte(userStr), &users)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshalling users: %w", err)
|
||||
}
|
||||
|
||||
log.Info().Interface("users", users).Msg("loading users from JSON")
|
||||
log.Info().Interface(zf.Users, users).Msg("loading users from JSON")
|
||||
|
||||
log.Info().Msgf("Access token TTL: %s", accessTTL)
|
||||
log.Info().Msgf("access token TTL: %s", accessTTL)
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
@@ -92,7 +100,7 @@ func mockOIDC() error {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
listener, err := new(net.ListenConfig).Listen(context.Background(), "tcp", fmt.Sprintf("%s:%d", addrStr, port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,8 +109,10 @@ func mockOIDC() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("Issuer: %s", mock.Issuer())
|
||||
|
||||
log.Info().Msgf("mock OIDC server listening on %s", listener.Addr().String())
|
||||
log.Info().Msgf("issuer: %s", mock.Issuer())
|
||||
|
||||
c := make(chan struct{})
|
||||
<-c
|
||||
|
||||
@@ -133,12 +143,13 @@ func getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser
|
||||
ErrorQueue: &mockoidc.ErrorQueue{},
|
||||
}
|
||||
|
||||
mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
_ = mock.AddMiddleware(func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Info().Msgf("Request: %+v", r)
|
||||
log.Info().Msgf("request: %+v", r)
|
||||
h.ServeHTTP(w, r)
|
||||
|
||||
if r.Response != nil {
|
||||
log.Info().Msgf("Response: %+v", r.Response)
|
||||
log.Info().Msgf("response: %+v", r.Response)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -22,12 +21,12 @@ import (
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
|
||||
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
|
||||
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
listNodesNamespaceFlag.Hidden = true
|
||||
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
@@ -44,62 +43,51 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
moveNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
moveNodeCmd.Flags().Uint64P("user", "u", 0, "New user")
|
||||
|
||||
moveNodeCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
moveNodeNamespaceFlag := moveNodeCmd.Flags().Lookup("namespace")
|
||||
moveNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
moveNodeNamespaceFlag.Hidden = true
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(moveNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
tagCmd.MarkFlagRequired("identifier")
|
||||
_ = tagCmd.MarkFlagRequired("identifier")
|
||||
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
_ = approveRoutesCmd.MarkFlagRequired("identifier")
|
||||
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
|
||||
nodeCmd.AddCommand(approveRoutesCmd)
|
||||
|
||||
@@ -168,10 +156,6 @@ var listNodesCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
showTags, err := cmd.Flags().GetBool("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
@@ -194,7 +178,7 @@ var listNodesCmd = &cobra.Command{
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
tableData, err := nodesToPtables(user, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
@@ -240,10 +224,6 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
nodes := response.GetNodes()
|
||||
if identifier != 0 {
|
||||
for _, node := range response.GetNodes() {
|
||||
@@ -258,11 +238,13 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
|
||||
})
|
||||
|
||||
tableData, err := nodeRoutesToPtables(nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
if output != "" {
|
||||
SuccessOutput(nodes, "", output)
|
||||
return
|
||||
}
|
||||
|
||||
tableData := nodeRoutesToPtables(nodes)
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -301,7 +283,8 @@ var expireNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
expiryTime := time.Now()
|
||||
now := time.Now()
|
||||
expiryTime := now
|
||||
if expiry != "" {
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
@@ -336,7 +319,11 @@ var expireNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
if now.Equal(expiryTime) || now.After(expiryTime) {
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
} else {
|
||||
SuccessOutput(response.GetNode(), "Node expiration updated", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -455,66 +442,6 @@ var deleteNodeCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var moveNodeCmd = &cobra.Command{
|
||||
Use: "move",
|
||||
Short: "Move node to another user",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting user: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
_, err = client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error getting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
NodeId: identifier,
|
||||
User: user,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveNode(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error moving node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
var backfillNodeIPsCmd = &cobra.Command{
|
||||
Use: "backfillips",
|
||||
Short: "Backfill IPs missing from nodes",
|
||||
@@ -561,7 +488,6 @@ be assigned to nodes.`,
|
||||
|
||||
func nodesToPtables(
|
||||
currentUser string,
|
||||
showTags bool,
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
@@ -571,6 +497,7 @@ func nodesToPtables(
|
||||
"MachineKey",
|
||||
"NodeKey",
|
||||
"User",
|
||||
"Tags",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
@@ -578,13 +505,6 @@ func nodesToPtables(
|
||||
"Connected",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
tableHeader = append(tableHeader, []string{
|
||||
"ForcedTags",
|
||||
"InvalidTags",
|
||||
"ValidTags",
|
||||
}...)
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -593,15 +513,21 @@ func nodesToPtables(
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
var (
|
||||
lastSeen time.Time
|
||||
lastSeenTime string
|
||||
)
|
||||
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
var (
|
||||
expiry time.Time
|
||||
expiryTime string
|
||||
)
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
@@ -610,6 +536,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(node.GetMachineKey()),
|
||||
)
|
||||
@@ -618,6 +545,7 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(node.GetNodeKey()),
|
||||
)
|
||||
@@ -639,25 +567,17 @@ func nodesToPtables(
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
forcedTags += "," + tag
|
||||
// TODO(kradalby): as part of CLI rework, we should add the posibility to show "unusable" tags as mentioned in
|
||||
// https://github.com/juanfont/headscale/issues/2981
|
||||
var tagsBuilder strings.Builder
|
||||
|
||||
for _, tag := range node.GetTags() {
|
||||
tagsBuilder.WriteString("\n" + tag)
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range node.GetInvalidTags() {
|
||||
if !slices.Contains(node.GetForcedTags(), tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range node.GetValidTags() {
|
||||
if !slices.Contains(node.GetForcedTags(), tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
tags := tagsBuilder.String()
|
||||
|
||||
tags = strings.TrimLeft(tags, "\n")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
@@ -667,8 +587,11 @@ func nodesToPtables(
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
var (
|
||||
IPV4Address string
|
||||
IPV6Address string
|
||||
)
|
||||
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
@@ -684,6 +607,7 @@ func nodesToPtables(
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
user,
|
||||
tags,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
@@ -691,9 +615,6 @@ func nodesToPtables(
|
||||
online,
|
||||
expired,
|
||||
}
|
||||
if showTags {
|
||||
nodeData = append(nodeData, []string{forcedTags, invalidTags, validTags}...)
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
nodeData,
|
||||
@@ -705,7 +626,7 @@ func nodesToPtables(
|
||||
|
||||
func nodeRoutesToPtables(
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
) pterm.TableData {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
@@ -719,9 +640,9 @@ func nodeRoutesToPtables(
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetGivenName(),
|
||||
strings.Join(node.GetApprovedRoutes(), ", "),
|
||||
strings.Join(node.GetAvailableRoutes(), ", "),
|
||||
strings.Join(node.GetSubnetRoutes(), ", "),
|
||||
strings.Join(node.GetApprovedRoutes(), "\n"),
|
||||
strings.Join(node.GetAvailableRoutes(), "\n"),
|
||||
strings.Join(node.GetSubnetRoutes(), "\n"),
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
@@ -729,7 +650,7 @@ func nodeRoutesToPtables(
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
return tableData
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly"
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly" //nolint:gosec // not a credential
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -26,16 +26,22 @@ func init() {
|
||||
policyCmd.AddCommand(getPolicy)
|
||||
|
||||
setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := setPolicy.MarkFlagRequired("file"); err != nil {
|
||||
|
||||
err := setPolicy.MarkFlagRequired("file")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
setPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
|
||||
|
||||
err = checkPolicy.MarkFlagRequired("file")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
|
||||
policyCmd.AddCommand(checkPolicy)
|
||||
}
|
||||
|
||||
@@ -69,8 +75,7 @@ var getPolicy = &cobra.Command{
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -145,8 +150,7 @@ var setPolicy = &cobra.Command{
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -175,7 +179,7 @@ var setPolicy = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil { //nolint:noinlineerr
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
@@ -20,20 +19,10 @@ const (
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(preauthkeysCmd)
|
||||
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
|
||||
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
|
||||
pakNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
pakNamespaceFlag.Hidden = true
|
||||
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(deletePreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
@@ -42,6 +31,9 @@ func init() {
|
||||
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
StringSlice("tags", []string{}, "Tags to automatically assign to node")
|
||||
createPreAuthKeyCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
expirePreAuthKeyCmd.PersistentFlags().Uint64P("id", "i", 0, "Authkey ID")
|
||||
deletePreAuthKeyCmd.PersistentFlags().Uint64P("id", "i", 0, "Authkey ID")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -52,25 +44,16 @@ var preauthkeysCmd = &cobra.Command{
|
||||
|
||||
var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this user",
|
||||
Short: "List all preauthkeys",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
response, err := client.ListPreAuthKeys(ctx, &v1.ListPreAuthKeysRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -88,13 +71,13 @@ var listPreAuthKeys = &cobra.Command{
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Key",
|
||||
"Key/Prefix",
|
||||
"Reusable",
|
||||
"Ephemeral",
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
"Owner",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
@@ -103,14 +86,15 @@ var listPreAuthKeys = &cobra.Command{
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "," + tag
|
||||
var owner string
|
||||
if len(key.GetAclTags()) > 0 {
|
||||
owner = strings.Join(key.GetAclTags(), "\n")
|
||||
} else if key.GetUser() != nil {
|
||||
owner = key.GetUser().GetName()
|
||||
} else {
|
||||
owner = "-"
|
||||
}
|
||||
|
||||
aclTags = strings.TrimLeft(aclTags, ",")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
key.GetKey(),
|
||||
@@ -119,7 +103,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
aclTags,
|
||||
owner,
|
||||
})
|
||||
|
||||
}
|
||||
@@ -136,16 +120,12 @@ var listPreAuthKeys = &cobra.Command{
|
||||
|
||||
var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified user",
|
||||
Short: "Creates a new preauthkey",
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
user, _ := cmd.Flags().GetUint64("user")
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
tags, _ := cmd.Flags().GetStringSlice("tags")
|
||||
@@ -170,10 +150,6 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().
|
||||
Dur("expiration", time.Duration(duration)).
|
||||
Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -194,21 +170,21 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire KEY",
|
||||
Use: "expire",
|
||||
Short: "Expire a preauthkey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -216,8 +192,7 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
@@ -232,3 +207,42 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deletePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a preauthkey",
|
||||
Aliases: []string{"del", "rm", "d"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeletePreAuthKeyRequest{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.DeletePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -45,15 +45,16 @@ func initConfig() {
|
||||
if cfgFile == "" {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
|
||||
if cfgFile != "" {
|
||||
err := types.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := types.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
log.Fatal().Caller().Err(err).Msgf("error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +81,7 @@ func initConfig() {
|
||||
Repository: "headscale",
|
||||
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
|
||||
}
|
||||
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
@@ -101,6 +103,7 @@ func isPreReleaseVersion(version string) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -140,7 +143,8 @@ https://github.com/juanfont/headscale`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -29,12 +29,12 @@ var serveCmd = &cobra.Command{
|
||||
fmt.Println(squibbleErr.Diff)
|
||||
}
|
||||
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
log.Fatal().Caller().Err(err).Msg("error initializing")
|
||||
}
|
||||
|
||||
err = app.Serve()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.")
|
||||
log.Fatal().Caller().Err(err).Msg("headscale ran into an error and had to shut down")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,12 +8,19 @@ import (
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// CLI user errors.
|
||||
var (
|
||||
errFlagRequired = errors.New("--name or --identifier flag is required")
|
||||
errMultipleUsersMatch = errors.New("multiple users match query, specify an ID")
|
||||
)
|
||||
|
||||
func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
cmd.Flags().Int64P("identifier", "i", -1, "User identifier (ID)")
|
||||
cmd.Flags().StringP("name", "n", "", "Username")
|
||||
@@ -23,12 +30,12 @@ func usernameAndIDFlag(cmd *cobra.Command) {
|
||||
// If both are empty, it will exit the program with an error.
|
||||
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
|
||||
username, _ := cmd.Flags().GetString("name")
|
||||
|
||||
identifier, _ := cmd.Flags().GetInt64("identifier")
|
||||
if username == "" && identifier < 0 {
|
||||
err := errors.New("--name or --identifier flag is required")
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Cannot rename user: "+status.Convert(err).Message(),
|
||||
errFlagRequired,
|
||||
"Cannot rename user: "+status.Convert(errFlagRequired).Message(),
|
||||
"",
|
||||
)
|
||||
}
|
||||
@@ -50,7 +57,8 @@ func init() {
|
||||
userCmd.AddCommand(renameUserCmd)
|
||||
usernameAndIDFlag(renameUserCmd)
|
||||
renameUserCmd.Flags().StringP("new-name", "r", "", "New username")
|
||||
renameNodeCmd.MarkFlagRequired("new-name")
|
||||
|
||||
_ = renameNodeCmd.MarkFlagRequired("new-name")
|
||||
}
|
||||
|
||||
var errMissingParameter = errors.New("missing parameters")
|
||||
@@ -81,7 +89,7 @@ var createUserCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
log.Trace().Interface(zf.Client, client).Msg("obtained gRPC client")
|
||||
|
||||
request := &v1.CreateUserRequest{Name: userName}
|
||||
|
||||
@@ -94,7 +102,7 @@ var createUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
|
||||
if _, err := url.Parse(pictureURL); err != nil {
|
||||
if _, err := url.Parse(pictureURL); err != nil { //nolint:noinlineerr
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
@@ -107,7 +115,7 @@ var createUserCmd = &cobra.Command{
|
||||
request.PictureUrl = pictureURL
|
||||
}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
|
||||
log.Trace().Interface(zf.Request, request).Msg("sending CreateUser request")
|
||||
response, err := client.CreateUser(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -148,7 +156,7 @@ var destroyUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errMultipleUsersMatch
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
@@ -276,7 +284,7 @@ var renameUserCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if len(users.GetUsers()) != 1 {
|
||||
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
|
||||
err := errMultipleUsersMatch
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error: "+status.Convert(err).Message(),
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/hscontrol/util/zlog/zf"
|
||||
"github.com/rs/zerolog/log"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -57,7 +58,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
|
||||
|
||||
grpcOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithBlock(), //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
}
|
||||
|
||||
address := cfg.CLI.Address
|
||||
@@ -81,6 +82,7 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
|
||||
}
|
||||
}
|
||||
|
||||
socket.Close()
|
||||
|
||||
grpcOptions = append(
|
||||
@@ -92,8 +94,9 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set")
|
||||
}
|
||||
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
token: apiKey,
|
||||
@@ -118,10 +121,11 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
log.Trace().Caller().Str(zf.Address, address).Msg("connecting via gRPC")
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...) //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
log.Fatal().Caller().Err(err).Msgf("could not connect: %v", err)
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
@@ -130,24 +134,27 @@ func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *g
|
||||
return ctx, client, conn, cancel
|
||||
}
|
||||
|
||||
func output(result interface{}, override string, outputFormat string) string {
|
||||
var jsonBytes []byte
|
||||
var err error
|
||||
func output(result any, override string, outputFormat string) string {
|
||||
var (
|
||||
jsonBytes []byte
|
||||
err error
|
||||
)
|
||||
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
case "json-line":
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
case "yaml":
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
log.Fatal().Err(err).Msg("unmarshalling output")
|
||||
}
|
||||
default:
|
||||
// nolint
|
||||
@@ -158,7 +165,7 @@ func output(result interface{}, override string, outputFormat string) string {
|
||||
}
|
||||
|
||||
// SuccessOutput prints the result to stdout and exits with status code 0.
|
||||
func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
func SuccessOutput(result any, override string, outputFormat string) {
|
||||
fmt.Println(output(result, override, outputFormat))
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
func main() {
|
||||
var colors bool
|
||||
|
||||
switch l := termcolor.SupportLevel(os.Stderr); l {
|
||||
case termcolor.Level16M:
|
||||
colors = true
|
||||
|
||||
@@ -9,34 +9,15 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/check.v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
func (s *Suite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
func TestConfigFileLoading(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
cfgFile := filepath.Join(tmpDir, "config.yaml")
|
||||
|
||||
@@ -45,70 +26,52 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
cfgFile,
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = types.LoadConfig(cfgFile, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
assert.Equal(t, "http://127.0.0.1:8080", viper.GetString("server_url"))
|
||||
assert.Equal(t, "127.0.0.1:8080", viper.GetString("listen_addr"))
|
||||
assert.Equal(t, "127.0.0.1:9090", viper.GetString("metrics_listen_addr"))
|
||||
assert.Equal(t, "sqlite", viper.GetString("database.type"))
|
||||
assert.Equal(t, "/var/lib/headscale/db.sqlite", viper.GetString("database.sqlite.path"))
|
||||
assert.Empty(t, viper.GetString("tls_letsencrypt_hostname"))
|
||||
assert.Equal(t, ":http", viper.GetString("tls_letsencrypt_listen"))
|
||||
assert.Equal(t, "HTTP-01", viper.GetString("tls_letsencrypt_challenge_type"))
|
||||
assert.Equal(t, fs.FileMode(0o770), util.GetFileMode("unix_socket_permission"))
|
||||
assert.False(t, viper.GetBool("logtail.enabled"))
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false)
|
||||
assert.Equal(t, "http://127.0.0.1:8080", viper.GetString("server_url"))
|
||||
assert.Equal(t, "127.0.0.1:8080", viper.GetString("listen_addr"))
|
||||
assert.Equal(t, "127.0.0.1:9090", viper.GetString("metrics_listen_addr"))
|
||||
assert.Equal(t, "sqlite", viper.GetString("database.type"))
|
||||
assert.Equal(t, "/var/lib/headscale/db.sqlite", viper.GetString("database.sqlite.path"))
|
||||
assert.Empty(t, viper.GetString("tls_letsencrypt_hostname"))
|
||||
assert.Equal(t, ":http", viper.GetString("tls_letsencrypt_listen"))
|
||||
assert.Equal(t, "HTTP-01", viper.GetString("tls_letsencrypt_challenge_type"))
|
||||
assert.Equal(t, fs.FileMode(0o770), util.GetFileMode("unix_socket_permission"))
|
||||
assert.False(t, viper.GetBool("logtail.enabled"))
|
||||
assert.False(t, viper.GetBool("randomize_client_port"))
|
||||
}
|
||||
|
||||
6
cmd/hi/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# hi
|
||||
|
||||
hi (headscale integration runner) is an entirely "vibe coded" wrapper around our
|
||||
[integration test suite](../integration). It essentially runs the docker
|
||||
commands for you with some added benefits of extracting resources like logs and
|
||||
databases.
|
||||
@@ -3,9 +3,13 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
@@ -14,30 +18,46 @@ import (
|
||||
)
|
||||
|
||||
// cleanupBeforeTest performs cleanup operations before running tests.
|
||||
// Only removes stale (stopped/exited) test containers to avoid interfering with concurrent test runs.
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return fmt.Errorf("failed to kill test containers: %w", err)
|
||||
err := cleanupStaleTestContainers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cleaning stale test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
if err := pruneDockerNetworks(ctx); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupAfterTest removes the test container after completion.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
|
||||
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
// cleanupAfterTest removes the test container and all associated integration test containers for the run.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runID string) error {
|
||||
// Remove the main test container
|
||||
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing test container: %w", err)
|
||||
}
|
||||
|
||||
// Clean up integration test containers for this run only
|
||||
if runID != "" {
|
||||
err := killTestContainersByRunID(ctx, runID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cleaning up containers for run %s: %w", runID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -45,12 +65,14 @@ func killTestContainers(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
shouldRemove := false
|
||||
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
@@ -83,43 +105,135 @@ func killTestContainers(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// killTestContainersByRunID terminates and removes all test containers for a specific run ID.
|
||||
// This function filters containers by the hi.run-id label to only affect containers
|
||||
// belonging to the specified test run, leaving other concurrent test runs untouched.
|
||||
func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Filter containers by hi.run-id label
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("label", "hi.run-id="+runID),
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing containers for run %s: %w", runID, err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
// Kill the container if it's running
|
||||
if cont.State == "running" {
|
||||
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
|
||||
}
|
||||
|
||||
// Remove the container with retry logic
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d containers for run ID %s\n", removed, runID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupStaleTestContainers removes stopped/exited test containers without affecting running tests.
|
||||
// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs
|
||||
// without interfering with currently running concurrent tests.
|
||||
func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Only get stopped/exited containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("status", "exited"),
|
||||
filters.Arg("status", "dead"),
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing stopped containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
// Only remove containers that look like test containers
|
||||
shouldRemove := false
|
||||
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
strings.Contains(name, "ts-") ||
|
||||
strings.Contains(name, "derp-") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d stale test containers\n", removed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
containerRemoveInitialInterval = 100 * time.Millisecond
|
||||
containerRemoveMaxElapsedTime = 2 * time.Second
|
||||
)
|
||||
|
||||
// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.
|
||||
func removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {
|
||||
maxRetries := 3
|
||||
baseDelay := 100 * time.Millisecond
|
||||
expBackoff := backoff.NewExponentialBackOff()
|
||||
expBackoff.InitialInterval = containerRemoveInitialInterval
|
||||
|
||||
for attempt := range maxRetries {
|
||||
_, err := backoff.Retry(ctx, func() (struct{}, error) {
|
||||
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
if err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
|
||||
// If this is the last attempt, don't wait
|
||||
if attempt == maxRetries-1 {
|
||||
break
|
||||
}
|
||||
return struct{}{}, nil
|
||||
}, backoff.WithBackOff(expBackoff), backoff.WithMaxElapsedTime(containerRemoveMaxElapsedTime))
|
||||
|
||||
// Wait with exponential backoff
|
||||
delay := baseDelay * time.Duration(1<<attempt)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return false
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
report, err := cli.NetworksPrune(ctx, filters.Args{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
return fmt.Errorf("pruning networks: %w", err)
|
||||
}
|
||||
|
||||
if len(report.NetworksDeleted) > 0 {
|
||||
@@ -133,9 +247,9 @@ func pruneDockerNetworks(ctx context.Context) error {
|
||||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -143,12 +257,14 @@ func cleanOldImages(ctx context.Context) error {
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list images: %w", err)
|
||||
return fmt.Errorf("listing images: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, img := range images {
|
||||
shouldRemove := false
|
||||
|
||||
for _, tag := range img.RepoTags {
|
||||
if strings.Contains(tag, "hs-") ||
|
||||
strings.Contains(tag, "headscale-integration") ||
|
||||
@@ -183,18 +299,19 @@ func cleanOldImages(ctx context.Context) error {
|
||||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
volumeName := "hs-integration-go-cache"
|
||||
|
||||
err = cli.VolumeRemove(ctx, volumeName, true)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
|
||||
} else if errdefs.IsConflict(err) {
|
||||
} else if errdefs.IsConflict(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
|
||||
} else {
|
||||
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
|
||||
@@ -205,3 +322,110 @@ func cleanCacheVolume(ctx context.Context) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupSuccessfulTestArtifacts removes artifacts from successful test runs to save disk space.
|
||||
// This function removes large artifacts that are mainly useful for debugging failures:
|
||||
// - Database dumps (.db files)
|
||||
// - Profile data (pprof directories)
|
||||
// - MapResponse data (mapresponses directories)
|
||||
// - Prometheus metrics files
|
||||
//
|
||||
// It preserves:
|
||||
// - Log files (.log) which are small and useful for verification.
|
||||
func cleanupSuccessfulTestArtifacts(logsDir string, verbose bool) error {
|
||||
entries, err := os.ReadDir(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading logs directory: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
removedFiles, removedDirs int
|
||||
totalSize int64
|
||||
)
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
fullPath := filepath.Join(logsDir, name)
|
||||
|
||||
if entry.IsDir() {
|
||||
// Remove pprof and mapresponses directories (typically large)
|
||||
// These directories contain artifacts from all containers in the test run
|
||||
if name == "pprof" || name == "mapresponses" {
|
||||
size, sizeErr := getDirSize(fullPath)
|
||||
if sizeErr == nil {
|
||||
totalSize += size
|
||||
}
|
||||
|
||||
err := os.RemoveAll(fullPath)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to remove directory %s: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
removedDirs++
|
||||
|
||||
if verbose {
|
||||
log.Printf("Removed directory: %s/", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Only process test-related files (headscale and tailscale)
|
||||
if !strings.HasPrefix(name, "hs-") && !strings.HasPrefix(name, "ts-") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove database, metrics, and status files, but keep logs
|
||||
shouldRemove := strings.HasSuffix(name, ".db") ||
|
||||
strings.HasSuffix(name, "_metrics.txt") ||
|
||||
strings.HasSuffix(name, "_status.json")
|
||||
|
||||
if shouldRemove {
|
||||
info, infoErr := entry.Info()
|
||||
if infoErr == nil {
|
||||
totalSize += info.Size()
|
||||
}
|
||||
|
||||
err := os.Remove(fullPath)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to remove file %s: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
removedFiles++
|
||||
|
||||
if verbose {
|
||||
log.Printf("Removed file: %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removedFiles > 0 || removedDirs > 0 {
|
||||
const bytesPerMB = 1024 * 1024
|
||||
log.Printf("Cleaned up %d files and %d directories (freed ~%.2f MB)",
|
||||
removedFiles, removedDirs, float64(totalSize)/bytesPerMB)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirSize calculates the total size of a directory.
|
||||
func getDirSize(path string) (int64, error) {
|
||||
var size int64
|
||||
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return size, err
|
||||
}
|
||||
|
||||
310
cmd/hi/docker.go
@@ -22,17 +22,22 @@ import (
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
const defaultDirPerm = 0o755
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
ErrMemoryLimitViolations = errors.New("container(s) exceeded memory limits")
|
||||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
//
|
||||
//nolint:gocyclo // complex test orchestration function
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
@@ -48,19 +53,21 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
absLogsDir, err := filepath.Abs(logsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
|
||||
return fmt.Errorf("getting absolute path for logs directory: %w", err)
|
||||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
if config.CleanBefore {
|
||||
if config.Verbose {
|
||||
log.Printf("Running pre-test cleanup...")
|
||||
}
|
||||
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
|
||||
|
||||
err := cleanupBeforeTest(ctx)
|
||||
if err != nil && config.Verbose {
|
||||
log.Printf("Warning: pre-test cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -71,34 +78,40 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("ensuring image availability: %w", err)
|
||||
}
|
||||
|
||||
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: %w", err)
|
||||
return fmt.Errorf("creating container: %w", err)
|
||||
}
|
||||
|
||||
if config.Verbose {
|
||||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("starting container: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
log.Printf("Run ID: %s", runID)
|
||||
log.Printf("Monitor with: docker logs -f %s", containerName)
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
|
||||
// Start stats collection for container resource monitoring (if enabled)
|
||||
var statsCollector *StatsCollector
|
||||
|
||||
if config.Stats {
|
||||
var err error
|
||||
statsCollector, err = NewStatsCollector()
|
||||
|
||||
statsCollector, err = NewStatsCollector(ctx)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to create stats collector: %v", err)
|
||||
}
|
||||
|
||||
statsCollector = nil
|
||||
}
|
||||
|
||||
@@ -107,7 +120,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
// Start stats collection immediately - no need for complex retry logic
|
||||
// The new implementation monitors Docker events and will catch containers as they start
|
||||
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
|
||||
err := statsCollector.StartCollection(ctx, runID, config.Verbose)
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
log.Printf("Warning: failed to start stats collection: %v", err)
|
||||
}
|
||||
@@ -119,12 +133,13 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
exitCode, err := streamAndWait(ctx, cli, resp.ID)
|
||||
|
||||
// Ensure all containers have finished and logs are flushed before extracting artifacts
|
||||
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
|
||||
waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose)
|
||||
if waitErr != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
|
||||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose { //nolint:noinlineerr
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
||||
@@ -137,27 +152,44 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
if len(violations) > 0 {
|
||||
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
|
||||
log.Printf("=================================")
|
||||
|
||||
for _, violation := range violations {
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
|
||||
}
|
||||
|
||||
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
|
||||
return fmt.Errorf("test failed: %d %w", len(violations), ErrMemoryLimitViolations)
|
||||
}
|
||||
}
|
||||
|
||||
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
|
||||
if shouldCleanup {
|
||||
if config.Verbose {
|
||||
log.Printf("Running post-test cleanup...")
|
||||
log.Printf("Running post-test cleanup for run %s...", runID)
|
||||
}
|
||||
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
|
||||
|
||||
cleanErr := cleanupAfterTest(ctx, cli, resp.ID, runID)
|
||||
|
||||
if cleanErr != nil && config.Verbose {
|
||||
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
|
||||
}
|
||||
|
||||
// Clean up artifacts from successful tests to save disk space in CI
|
||||
if exitCode == 0 {
|
||||
if config.Verbose {
|
||||
log.Printf("Test succeeded, cleaning up artifacts to save disk space...")
|
||||
}
|
||||
|
||||
cleanErr := cleanupSuccessfulTestArtifacts(logsDir, config.Verbose)
|
||||
|
||||
if cleanErr != nil && config.Verbose {
|
||||
log.Printf("Warning: artifact cleanup failed: %v", cleanErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("test execution failed: %w", err)
|
||||
return fmt.Errorf("executing test: %w", err)
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
@@ -191,7 +223,7 @@ func buildGoTestCommand(config *RunConfig) []string {
|
||||
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
|
||||
return container.CreateResponse{}, fmt.Errorf("getting working directory: %w", err)
|
||||
}
|
||||
|
||||
projectRoot := findProjectRoot(pwd)
|
||||
@@ -202,6 +234,28 @@ func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunC
|
||||
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
|
||||
"HEADSCALE_INTEGRATION_RUN_ID=" + runID,
|
||||
}
|
||||
|
||||
// Pass through CI environment variable for CI detection
|
||||
if ci := os.Getenv("CI"); ci != "" {
|
||||
env = append(env, "CI="+ci)
|
||||
}
|
||||
|
||||
// Pass through all HEADSCALE_INTEGRATION_* environment variables
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_") {
|
||||
// Skip the ones we already set explicitly
|
||||
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_POSTGRES=") ||
|
||||
strings.HasPrefix(e, "HEADSCALE_INTEGRATION_RUN_ID=") {
|
||||
continue
|
||||
}
|
||||
|
||||
env = append(env, e)
|
||||
}
|
||||
}
|
||||
|
||||
// Set GOCACHE to a known location (used by both bind mount and volume cases)
|
||||
env = append(env, "GOCACHE=/cache/go-build")
|
||||
|
||||
containerConfig := &container.Config{
|
||||
Image: "golang:" + config.GoVersion,
|
||||
Cmd: goTestCmd,
|
||||
@@ -221,20 +275,43 @@ func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunC
|
||||
log.Printf("Using Docker socket: %s", dockerSocketPath)
|
||||
}
|
||||
|
||||
binds := []string{
|
||||
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
|
||||
dockerSocketPath + ":/var/run/docker.sock",
|
||||
logsDir + ":/tmp/control",
|
||||
}
|
||||
|
||||
// Use bind mounts for Go cache if provided via environment variables,
|
||||
// otherwise fall back to Docker volumes for local development
|
||||
var mounts []mount.Mount
|
||||
|
||||
goCache := os.Getenv("HEADSCALE_INTEGRATION_GO_CACHE")
|
||||
goBuildCache := os.Getenv("HEADSCALE_INTEGRATION_GO_BUILD_CACHE")
|
||||
|
||||
if goCache != "" {
|
||||
binds = append(binds, goCache+":/go")
|
||||
} else {
|
||||
mounts = append(mounts, mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "hs-integration-go-cache",
|
||||
Target: "/go",
|
||||
})
|
||||
}
|
||||
|
||||
if goBuildCache != "" {
|
||||
binds = append(binds, goBuildCache+":/cache/go-build")
|
||||
} else {
|
||||
mounts = append(mounts, mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "hs-integration-go-build-cache",
|
||||
Target: "/cache/go-build",
|
||||
})
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
AutoRemove: false, // We'll remove manually for better control
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
|
||||
dockerSocketPath + ":/var/run/docker.sock",
|
||||
logsDir + ":/tmp/control",
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "hs-integration-go-cache",
|
||||
Target: "/go",
|
||||
},
|
||||
},
|
||||
Binds: binds,
|
||||
Mounts: mounts,
|
||||
}
|
||||
|
||||
return cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)
|
||||
@@ -248,7 +325,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get container logs: %w", err)
|
||||
return -1, fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
@@ -260,7 +337,7 @@ func streamAndWait(ctx context.Context, cli *client.Client, containerID string)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error waiting for container: %w", err)
|
||||
return -1, fmt.Errorf("waiting for container: %w", err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
return int(status.StatusCode), nil
|
||||
@@ -274,7 +351,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
// First, get all related test containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
@@ -283,6 +360,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
maxWaitTime := 10 * time.Second
|
||||
checkInterval := 500 * time.Millisecond
|
||||
timeout := time.After(maxWaitTime)
|
||||
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -292,6 +370,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
allFinalized := true
|
||||
@@ -302,12 +381,14 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if container is in a final state
|
||||
if !isContainerFinalized(inspect.State) {
|
||||
allFinalized = false
|
||||
|
||||
if verbose {
|
||||
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
|
||||
}
|
||||
@@ -320,6 +401,7 @@ func waitForContainerFinalization(ctx context.Context, cli *client.Client, testC
|
||||
if verbose {
|
||||
log.Printf("All test containers finalized, ready for artifact extraction")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -336,13 +418,15 @@ func isContainerFinalized(state *container.State) bool {
|
||||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil { //nolint:noinlineerr
|
||||
return current
|
||||
}
|
||||
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return startPath
|
||||
}
|
||||
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
@@ -352,34 +436,37 @@ func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// DockerContext represents Docker context information.
|
||||
type DockerContext struct {
|
||||
Name string `json:"Name"`
|
||||
Metadata map[string]interface{} `json:"Metadata"`
|
||||
Endpoints map[string]interface{} `json:"Endpoints"`
|
||||
Current bool `json:"Current"`
|
||||
Name string `json:"Name"`
|
||||
Metadata map[string]any `json:"Metadata"`
|
||||
Endpoints map[string]any `json:"Endpoints"`
|
||||
Current bool `json:"Current"`
|
||||
}
|
||||
|
||||
// createDockerClient creates a Docker client with context detection.
|
||||
func createDockerClient() (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func createDockerClient(ctx context.Context) (*client.Client, error) {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
}
|
||||
|
||||
var clientOpts []client.Opt
|
||||
|
||||
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
|
||||
|
||||
if contextInfo != nil {
|
||||
if endpoints, ok := contextInfo.Endpoints["docker"]; ok {
|
||||
if endpointMap, ok := endpoints.(map[string]interface{}); ok {
|
||||
if endpointMap, ok := endpoints.(map[string]any); ok {
|
||||
if host, ok := endpointMap["Host"].(string); ok {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
|
||||
}
|
||||
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
}
|
||||
}
|
||||
@@ -394,16 +481,17 @@ func createDockerClient() (*client.Client, error) {
|
||||
}
|
||||
|
||||
// getCurrentDockerContext retrieves the current Docker context information.
|
||||
func getCurrentDockerContext() (*DockerContext, error) {
|
||||
cmd := exec.Command("docker", "context", "inspect")
|
||||
func getCurrentDockerContext(ctx context.Context) (*DockerContext, error) {
|
||||
cmd := exec.CommandContext(ctx, "docker", "context", "inspect")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get docker context: %w", err)
|
||||
return nil, fmt.Errorf("getting docker context: %w", err)
|
||||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
if err := json.Unmarshal(output, &contexts); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing docker context: %w", err)
|
||||
}
|
||||
|
||||
if len(contexts) > 0 {
|
||||
@@ -422,12 +510,13 @@ func getDockerSocketPath() string {
|
||||
|
||||
// checkImageAvailableLocally checks if the specified Docker image is available locally.
|
||||
func checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
_, _, err := cli.ImageInspectWithRaw(ctx, imageName) //nolint:staticcheck // SA1019: deprecated but functional
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
if client.IsErrNotFound(err) { //nolint:staticcheck // SA1019: deprecated but functional
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to inspect image %s: %w", imageName, err)
|
||||
|
||||
return false, fmt.Errorf("inspecting image %s: %w", imageName, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@@ -438,13 +527,14 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
// First check if image is available locally
|
||||
available, err := checkImageAvailableLocally(ctx, cli, imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check local image availability: %w", err)
|
||||
return fmt.Errorf("checking local image availability: %w", err)
|
||||
}
|
||||
|
||||
if available {
|
||||
if verbose {
|
||||
log.Printf("Image %s is available locally", imageName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -455,20 +545,21 @@ func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName str
|
||||
|
||||
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
|
||||
return fmt.Errorf("pulling image %s: %w", imageName, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
if verbose {
|
||||
_, err = io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read pull output: %w", err)
|
||||
return fmt.Errorf("reading pull output: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Image %s pulled successfully", imageName)
|
||||
}
|
||||
|
||||
@@ -483,9 +574,11 @@ func listControlFiles(logsDir string) {
|
||||
return
|
||||
}
|
||||
|
||||
var logFiles []string
|
||||
var dataFiles []string
|
||||
var dataDirs []string
|
||||
var (
|
||||
logFiles []string
|
||||
dataFiles []string
|
||||
dataDirs []string
|
||||
)
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
@@ -514,6 +607,7 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(logFiles) > 0 {
|
||||
log.Printf("Headscale logs:")
|
||||
|
||||
for _, file := range logFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
@@ -521,9 +615,11 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
if len(dataFiles) > 0 || len(dataDirs) > 0 {
|
||||
log.Printf("Headscale data:")
|
||||
|
||||
for _, file := range dataFiles {
|
||||
log.Printf(" %s", file)
|
||||
}
|
||||
|
||||
for _, dir := range dataDirs {
|
||||
log.Printf(" %s/", dir)
|
||||
}
|
||||
@@ -532,25 +628,27 @@ func listControlFiles(logsDir string) {
|
||||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers: %w", err)
|
||||
return fmt.Errorf("listing containers: %w", err)
|
||||
}
|
||||
|
||||
// Get containers from the specific test run
|
||||
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
|
||||
|
||||
extractedCount := 0
|
||||
|
||||
for _, cont := range currentTestContainers {
|
||||
// Extract container logs and tar files
|
||||
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
|
||||
err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
|
||||
}
|
||||
@@ -558,6 +656,7 @@ func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDi
|
||||
if verbose {
|
||||
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
|
||||
}
|
||||
|
||||
extractedCount++
|
||||
}
|
||||
}
|
||||
@@ -581,11 +680,13 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
|
||||
// Find the test container to get its run ID label
|
||||
var runID string
|
||||
|
||||
for _, cont := range containers {
|
||||
if cont.ID == testContainerID {
|
||||
if cont.Labels != nil {
|
||||
runID = cont.Labels["hi.run-id"]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -626,18 +727,21 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
|
||||
// extractContainerArtifacts saves logs and tar files from a container.
|
||||
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(logsDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
err := os.MkdirAll(logsDir, defaultDirPerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract container logs
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
err = extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting logs: %w", err)
|
||||
}
|
||||
|
||||
// Extract tar files for headscale containers only
|
||||
if strings.HasPrefix(containerName, "hs-") {
|
||||
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
|
||||
}
|
||||
@@ -659,7 +763,7 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
Tail: "all",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container logs: %w", err)
|
||||
return fmt.Errorf("getting container logs: %w", err)
|
||||
}
|
||||
defer logReader.Close()
|
||||
|
||||
@@ -673,17 +777,17 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
||||
// Demultiplex the Docker logs stream to separate stdout and stderr
|
||||
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to demultiplex container logs: %w", err)
|
||||
return fmt.Errorf("demultiplexing container logs: %w", err)
|
||||
}
|
||||
|
||||
// Write stdout logs
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil { //nolint:gosec,noinlineerr // log files should be readable
|
||||
return fmt.Errorf("writing stderr log: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
@@ -701,63 +805,3 @@ func extractContainerFiles(ctx context.Context, cli *client.Client, containerID,
|
||||
// This function is kept for potential future use or other file types
|
||||
return nil
|
||||
}
|
||||
|
||||
// logExtractionError logs extraction errors with appropriate level based on error type.
|
||||
func logExtractionError(artifactType, containerName string, err error, verbose bool) {
|
||||
if errors.Is(err, ErrFileNotFoundInTar) {
|
||||
// File not found is expected and only logged in verbose mode
|
||||
if verbose {
|
||||
log.Printf("No %s found in container %s", artifactType, containerName)
|
||||
}
|
||||
} else {
|
||||
// Other errors are actual failures and should be logged as warnings
|
||||
log.Printf("Warning: failed to extract %s from %s: %v", artifactType, containerName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// extractSingleFile copies a single file from a container.
|
||||
func extractSingleFile(ctx context.Context, cli *client.Client, containerID, sourcePath, fileName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Extract the single file from the tar
|
||||
filePath := filepath.Join(logsDir, fileName)
|
||||
if err := extractFileFromTar(tarReader, filepath.Base(sourcePath), filePath); err != nil {
|
||||
return fmt.Errorf("failed to extract file from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s from %s", fileName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractDirectory copies a directory from a container and extracts its contents.
|
||||
func extractDirectory(ctx context.Context, cli *client.Client, containerID, sourcePath, dirName, logsDir string, verbose bool) error {
|
||||
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
// Create target directory
|
||||
targetDir := filepath.Join(logsDir, dirName)
|
||||
if err := os.MkdirAll(targetDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
|
||||
}
|
||||
|
||||
// Extract the directory from the tar
|
||||
if err := extractDirectoryFromTar(tarReader, targetDir); err != nil {
|
||||
return fmt.Errorf("failed to extract directory from tar: %w", err)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("Extracted %s/ from %s", dirName, containerID[:12])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,13 +38,13 @@ func runDoctorCheck(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
results = append(results, checkGoInstallation())
|
||||
results = append(results, checkGoInstallation(ctx))
|
||||
|
||||
// Check 4: Git repository
|
||||
results = append(results, checkGitRepository())
|
||||
results = append(results, checkGitRepository(ctx))
|
||||
|
||||
// Check 5: Required files
|
||||
results = append(results, checkRequiredFiles())
|
||||
results = append(results, checkRequiredFiles(ctx))
|
||||
|
||||
// Display results
|
||||
displayDoctorResults(results)
|
||||
@@ -86,7 +86,7 @@ func checkDockerBinary() DoctorResult {
|
||||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Daemon",
|
||||
@@ -124,8 +124,8 @@ func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
func checkDockerContext(ctx context.Context) DoctorResult {
|
||||
contextInfo, err := getCurrentDockerContext(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Context",
|
||||
@@ -155,7 +155,7 @@ func checkDockerContext(_ context.Context) DoctorResult {
|
||||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Docker Socket",
|
||||
@@ -192,7 +192,7 @@ func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
|
||||
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
cli, err := createDockerClient()
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
Name: "Golang Image",
|
||||
@@ -251,7 +251,7 @@ func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
}
|
||||
|
||||
// checkGoInstallation verifies Go is installed and working.
|
||||
func checkGoInstallation() DoctorResult {
|
||||
func checkGoInstallation(ctx context.Context) DoctorResult {
|
||||
_, err := exec.LookPath("go")
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -265,7 +265,8 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "version")
|
||||
cmd := exec.CommandContext(ctx, "go", "version")
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -285,8 +286,9 @@ func checkGoInstallation() DoctorResult {
|
||||
}
|
||||
|
||||
// checkGitRepository verifies we're in a git repository.
|
||||
func checkGitRepository() DoctorResult {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
func checkGitRepository(ctx context.Context) DoctorResult {
|
||||
cmd := exec.CommandContext(ctx, "git", "rev-parse", "--git-dir")
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
@@ -308,7 +310,7 @@ func checkGitRepository() DoctorResult {
|
||||
}
|
||||
|
||||
// checkRequiredFiles verifies required files exist.
|
||||
func checkRequiredFiles() DoctorResult {
|
||||
func checkRequiredFiles(ctx context.Context) DoctorResult {
|
||||
requiredFiles := []string{
|
||||
"go.mod",
|
||||
"integration/",
|
||||
@@ -316,9 +318,12 @@ func checkRequiredFiles() DoctorResult {
|
||||
}
|
||||
|
||||
var missingFiles []string
|
||||
|
||||
for _, file := range requiredFiles {
|
||||
cmd := exec.Command("test", "-e", file)
|
||||
if err := cmd.Run(); err != nil {
|
||||
cmd := exec.CommandContext(ctx, "test", "-e", file)
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
missingFiles = append(missingFiles, file)
|
||||
}
|
||||
}
|
||||
@@ -350,6 +355,7 @@ func displayDoctorResults(results []DoctorResult) {
|
||||
|
||||
for _, result := range results {
|
||||
var icon string
|
||||
|
||||
switch result.Status {
|
||||
case "PASS":
|
||||
icon = "✅"
|
||||
|
||||
@@ -79,13 +79,18 @@ func main() {
|
||||
}
|
||||
|
||||
func cleanAll(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
err := killTestContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
|
||||
err = pruneDockerNetworks(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cleanOldImages(ctx); err != nil {
|
||||
|
||||
err = cleanOldImages(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ type RunConfig struct {
|
||||
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
|
||||
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
|
||||
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean stale resources before test"`
|
||||
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
|
||||
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
|
||||
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
|
||||
@@ -48,7 +48,9 @@ func runIntegrationTest(env *command.Env) error {
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
}
|
||||
if err := runDoctorCheck(env.Context()); err != nil {
|
||||
|
||||
err := runDoctorCheck(env.Context())
|
||||
if err != nil {
|
||||
return fmt.Errorf("pre-flight checks failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -66,9 +68,9 @@ func runIntegrationTest(env *command.Env) error {
|
||||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
if _, err := os.Stat("go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "go.mod"
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil { //nolint:noinlineerr
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
||||
@@ -94,8 +96,10 @@ func detectGoVersion() string {
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
func splitLines(s string) []string {
|
||||
var lines []string
|
||||
var current string
|
||||
var (
|
||||
lines []string
|
||||
current string
|
||||
)
|
||||
|
||||
for _, char := range s {
|
||||
if char == '\n' {
|
||||
|
||||
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ErrStatsCollectionAlreadyStarted is returned when trying to start stats collection that is already running.
|
||||
var ErrStatsCollectionAlreadyStarted = errors.New("stats collection already started")
|
||||
|
||||
// ContainerStats represents statistics for a single container.
|
||||
type ContainerStats struct {
|
||||
ContainerID string
|
||||
@@ -44,10 +47,10 @@ type StatsCollector struct {
|
||||
}
|
||||
|
||||
// NewStatsCollector creates a new stats collector instance.
|
||||
func NewStatsCollector() (*StatsCollector, error) {
|
||||
cli, err := createDockerClient()
|
||||
func NewStatsCollector(ctx context.Context) (*StatsCollector, error) {
|
||||
cli, err := createDockerClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %w", err)
|
||||
return nil, fmt.Errorf("creating Docker client: %w", err)
|
||||
}
|
||||
|
||||
return &StatsCollector{
|
||||
@@ -63,17 +66,19 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
defer sc.mutex.Unlock()
|
||||
|
||||
if sc.collectionStarted {
|
||||
return errors.New("stats collection already started")
|
||||
return ErrStatsCollectionAlreadyStarted
|
||||
}
|
||||
|
||||
sc.collectionStarted = true
|
||||
|
||||
// Start monitoring existing containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorExistingContainers(ctx, runID, verbose)
|
||||
|
||||
// Start Docker events monitoring for new containers
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.monitorDockerEvents(ctx, runID, verbose)
|
||||
|
||||
if verbose {
|
||||
@@ -87,10 +92,12 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
func (sc *StatsCollector) StopCollection() {
|
||||
// Check if already stopped without holding lock
|
||||
sc.mutex.RLock()
|
||||
|
||||
if !sc.collectionStarted {
|
||||
sc.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
// Signal stop to all goroutines
|
||||
@@ -114,6 +121,7 @@ func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID s
|
||||
if verbose {
|
||||
log.Printf("Failed to list existing containers: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,13 +155,13 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
case event := <-events:
|
||||
if event.Type == "container" && event.Action == "start" {
|
||||
// Get container details
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID)
|
||||
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID) //nolint:staticcheck // SA1019: use Actor.ID
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert to types.Container format for consistency
|
||||
cont := types.Container{
|
||||
cont := types.Container{ //nolint:staticcheck // SA1019: use container.Summary
|
||||
ID: containerInfo.ID,
|
||||
Names: []string{containerInfo.Name},
|
||||
Labels: containerInfo.Config.Labels,
|
||||
@@ -167,13 +175,14 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
if verbose {
|
||||
log.Printf("Error in Docker events stream: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldMonitorContainer determines if a container should be monitored.
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool { //nolint:staticcheck // SA1019: use container.Summary
|
||||
// Check if it has the correct run ID label
|
||||
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
|
||||
return false
|
||||
@@ -213,6 +222,7 @@ func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerI
|
||||
}
|
||||
|
||||
sc.wg.Add(1)
|
||||
|
||||
go sc.collectStatsForContainer(ctx, containerID, verbose)
|
||||
}
|
||||
|
||||
@@ -226,12 +236,14 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
if verbose {
|
||||
log.Printf("Failed to get stats stream for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
defer statsResponse.Body.Close()
|
||||
|
||||
decoder := json.NewDecoder(statsResponse.Body)
|
||||
var prevStats *container.Stats
|
||||
|
||||
var prevStats *container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -240,12 +252,15 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
var stats container.Stats
|
||||
if err := decoder.Decode(&stats); err != nil {
|
||||
var stats container.Stats //nolint:staticcheck // SA1019: use StatsResponse
|
||||
|
||||
err := decoder.Decode(&stats)
|
||||
if err != nil {
|
||||
// EOF is expected when container stops or stream ends
|
||||
if err.Error() != "EOF" && verbose {
|
||||
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,8 +276,10 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
// Store the sample (skip first sample since CPU calculation needs previous stats)
|
||||
if prevStats != nil {
|
||||
// Get container stats reference without holding the main mutex
|
||||
var containerStats *ContainerStats
|
||||
var exists bool
|
||||
var (
|
||||
containerStats *ContainerStats
|
||||
exists bool
|
||||
)
|
||||
|
||||
sc.mutex.RLock()
|
||||
containerStats, exists = sc.containers[containerID]
|
||||
@@ -286,7 +303,7 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
}
|
||||
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats.
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 { //nolint:staticcheck // SA1019: use StatsResponse
|
||||
// CPU calculation based on Docker's implementation
|
||||
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
|
||||
@@ -331,10 +348,12 @@ type StatsSummary struct {
|
||||
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Take snapshot of container references without holding main lock long
|
||||
sc.mutex.RLock()
|
||||
|
||||
containerRefs := make([]*ContainerStats, 0, len(sc.containers))
|
||||
for _, containerStats := range sc.containers {
|
||||
containerRefs = append(containerRefs, containerStats)
|
||||
}
|
||||
|
||||
sc.mutex.RUnlock()
|
||||
|
||||
summaries := make([]ContainerStatsSummary, 0, len(containerRefs))
|
||||
@@ -384,23 +403,25 @@ func calculateStatsSummary(values []float64) StatsSummary {
|
||||
return StatsSummary{}
|
||||
}
|
||||
|
||||
min := values[0]
|
||||
max := values[0]
|
||||
minVal := values[0]
|
||||
maxVal := values[0]
|
||||
sum := 0.0
|
||||
|
||||
for _, value := range values {
|
||||
if value < min {
|
||||
min = value
|
||||
if value < minVal {
|
||||
minVal = value
|
||||
}
|
||||
if value > max {
|
||||
max = value
|
||||
|
||||
if value > maxVal {
|
||||
maxVal = value
|
||||
}
|
||||
|
||||
sum += value
|
||||
}
|
||||
|
||||
return StatsSummary{
|
||||
Min: min,
|
||||
Max: max,
|
||||
Min: minVal,
|
||||
Max: maxVal,
|
||||
Average: sum / float64(len(values)),
|
||||
}
|
||||
}
|
||||
@@ -434,6 +455,7 @@ func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []Memo
|
||||
}
|
||||
|
||||
summaries := sc.GetSummary()
|
||||
|
||||
var violations []MemoryViolation
|
||||
|
||||
for _, summary := range summaries {
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrFileNotFoundInTar indicates a file was not found in the tar archive.
|
||||
var ErrFileNotFoundInTar = errors.New("file not found in tar")
|
||||
|
||||
// extractFileFromTar extracts a single file from a tar reader.
|
||||
func extractFileFromTar(tarReader io.Reader, fileName, outputPath string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Check if this is the file we're looking for
|
||||
if filepath.Base(header.Name) == fileName {
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
// Create the output file
|
||||
outFile, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Copy file contents
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: %s", ErrFileNotFoundInTar, fileName)
|
||||
}
|
||||
|
||||
// extractDirectoryFromTar extracts all files from a tar reader to a target directory.
|
||||
func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
tr := tar.NewReader(tarReader)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Clean the path to prevent directory traversal
|
||||
cleanName := filepath.Clean(header.Name)
|
||||
if strings.Contains(cleanName, "..") {
|
||||
continue // Skip potentially dangerous paths
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(targetDir, cleanName)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Ensure parent directories exist
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
// Create file
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
// Set file permissions
|
||||
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to set file permissions: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -15,7 +16,10 @@ type MapConfig struct {
|
||||
Directory string `flag:"directory,Directory to read map responses from"`
|
||||
}
|
||||
|
||||
var mapConfig MapConfig
|
||||
var (
|
||||
mapConfig MapConfig
|
||||
errDirectoryRequired = errors.New("directory is required")
|
||||
)
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
@@ -40,7 +44,7 @@ func main() {
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runOnline(env *command.Env) error {
|
||||
if mapConfig.Directory == "" {
|
||||
return fmt.Errorf("directory is required")
|
||||
return errDirectoryRequired
|
||||
}
|
||||
|
||||
resps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)
|
||||
@@ -57,5 +61,6 @@ func runOnline(env *command.Env) error {
|
||||
|
||||
os.Stderr.Write(out)
|
||||
os.Stderr.Write([]byte("\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ listen_addr: 127.0.0.1:8080
|
||||
|
||||
# Address to listen to /metrics and /debug, you may want
|
||||
# to keep this endpoint private to your internal network
|
||||
# Use an emty value to disable the metrics listener.
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
|
||||
# Address to listen for gRPC.
|
||||
@@ -361,6 +362,12 @@ unix_socket_permission: "0770"
|
||||
# # required "openid" scope.
|
||||
# scope: ["openid", "profile", "email"]
|
||||
#
|
||||
# # Only verified email addresses are synchronized to the user profile by
|
||||
# # default. Unverified emails may be allowed in case an identity provider
|
||||
# # does not send the "email_verified: true" claim or email verification is
|
||||
# # not required.
|
||||
# email_verified_required: true
|
||||
#
|
||||
# # Provide custom key/value pairs which get sent to the identity provider's
|
||||
# # authorization endpoint.
|
||||
# extra_params:
|
||||
@@ -407,3 +414,23 @@ logtail:
|
||||
# default static port 41641. This option is intended as a workaround for some buggy
|
||||
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
|
||||
randomize_client_port: false
|
||||
|
||||
# Taildrop configuration
|
||||
# Taildrop is the file sharing feature of Tailscale, allowing nodes to send files to each other.
|
||||
# https://tailscale.com/kb/1106/taildrop/
|
||||
taildrop:
|
||||
# Enable or disable Taildrop for all nodes.
|
||||
# When enabled, nodes can send files to other nodes owned by the same user.
|
||||
# Tagged devices and cross-user transfers are not permitted by Tailscale clients.
|
||||
enabled: true
|
||||
# Advanced performance tuning parameters.
|
||||
# The defaults are carefully chosen and should rarely need adjustment.
|
||||
# Only modify these if you have identified a specific performance issue.
|
||||
#
|
||||
# tuning:
|
||||
# # NodeStore write batching configuration.
|
||||
# # The NodeStore batches write operations before rebuilding peer relationships,
|
||||
# # which is computationally expensive. Batching reduces rebuild frequency.
|
||||
# #
|
||||
# # node_store_batch_size: 100
|
||||
# # node_store_batch_timeout: 500ms
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
1: null # Disable DERP region with ID 1
|
||||
1: null # Disable DERP region with ID 1
|
||||
900:
|
||||
regionid: 900
|
||||
regioncode: custom
|
||||
|
||||
@@ -157,7 +157,7 @@ indicates which part of the policy is invalid. Follow these steps to fix your po
|
||||
!!! warning "Full server configuration required"
|
||||
|
||||
The above commands to get/set the policy require a complete server configuration file including database settings. A
|
||||
minimal config to [control Headscale via remote CLI](../ref/remote-cli.md) is not sufficient. You may use `headscale
|
||||
minimal config to [control Headscale via remote CLI](../ref/api.md#grpc) is not sufficient. You may use `headscale
|
||||
-c /path/to/config.yaml` to specify the path to an alternative configuration file.
|
||||
|
||||
## How can I avoid to send logs to Tailscale Inc?
|
||||
|
||||
@@ -5,15 +5,16 @@ to provide self-hosters and hobbyists with an open-source server they can use fo
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [Node registration](../ref/registration.md)
|
||||
- [x] [Web authentication](../ref/registration.md#web-authentication)
|
||||
- [x] [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] [Tags](../ref/tags.md)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
|
||||
BIN
docs/assets/favicon.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 56 KiB After Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
@@ -1 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2" viewBox="0 0 1280 640"><circle cx="141.023" cy="338.36" r="117.472" style="fill:#f8b5cb" transform="matrix(.997276 0 0 1.00556 10.0024 -14.823)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 -3.15847 0)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 -3.15847 115.914)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 148.43 115.914)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 148.851 0)"/><circle cx="805.557" cy="336.915" r="118.199" style="fill:#8d8d8d" transform="matrix(.99196 0 0 1 3.36978 -10.2458)"/><circle cx="805.557" cy="336.915" r="118.199" style="fill:#8d8d8d" transform="matrix(.99196 0 0 1 255.633 -10.2458)"/><path d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z" style="fill:#303030"/><path d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z" style="fill:#303030" transform="matrix(-1 0 0 1 1857.19 0)"/></svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2" viewBox="0 0 1280 640"><circle cx="141.023" cy="338.36" r="117.472" style="fill:#f8b5cb" transform="matrix(.997276 0 0 1.00556 10.0024 -14.823)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 -3.15847 0)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 -3.15847 115.914)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 148.43 115.914)"/><circle cx="352.014" cy="268.302" r="33.095" style="fill:#a2a2a2" transform="matrix(1.01749 0 0 1 148.851 0)"/><circle cx="805.557" cy="336.915" r="118.199" style="fill:#8d8d8d" transform="matrix(.99196 0 0 1 3.36978 -10.2458)"/><circle cx="805.557" cy="336.915" r="118.199" style="fill:#8d8d8d" transform="matrix(.99196 0 0 1 255.633 -10.2458)"/><path d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z" style="fill:#303030"/><path d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z" style="fill:#303030" transform="matrix(-1 0 0 1 1857.19 0)"/></svg>
|
||||
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
|
Before Width: | Height: | Size: 49 KiB After Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
@@ -65,7 +65,7 @@ servers.
|
||||
- billing.internal
|
||||
- router.internal
|
||||
|
||||

|
||||

|
||||
|
||||
When [registering the servers](../usage/getting-started.md#register-a-node) we
|
||||
will need to add the flag `--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user
|
||||
@@ -222,7 +222,7 @@ Allows access to the internet through [exit nodes](routes.md#exit-node). Can onl
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all users who are direct members of the tailnet. Does not include users from shared devices.
|
||||
Includes all [personal (untagged) devices](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -234,7 +234,7 @@ Includes all users who are direct members of the tailnet. Does not include users
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
Includes all devices that [have at least one tag](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
129
docs/ref/api.md
Normal file
@@ -0,0 +1,129 @@
|
||||
# API
|
||||
Headscale provides a [HTTP REST API](#rest-api) and a [gRPC interface](#grpc) which may be used to integrate a [web
|
||||
interface](integration/web-ui.md), [remote control Headscale](#setup-remote-control) or provide a base for custom
|
||||
integration and tooling.
|
||||
|
||||
Both interfaces require a valid API key before use. To create an API key, log into your Headscale server and generate
|
||||
one with the default expiration of 90 days:
|
||||
|
||||
```shell
|
||||
headscale apikeys create
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please note that you can not retrieve an API key again. If the API
|
||||
key is lost, expire the old one, and create a new one.
|
||||
|
||||
To list the API keys currently associated with the server:
|
||||
|
||||
```shell
|
||||
headscale apikeys list
|
||||
```
|
||||
|
||||
and to expire an API key:
|
||||
|
||||
```shell
|
||||
headscale apikeys expire --prefix <PREFIX>
|
||||
```
|
||||
|
||||
## REST API
|
||||
|
||||
- API endpoint: `/api/v1`, e.g. `https://headscale.example.com/api/v1`
|
||||
- Documentation: `/swagger`, e.g. `https://headscale.example.com/swagger`
|
||||
- Headscale Version: `/version`, e.g. `https://headscale.example.com/version`
|
||||
- Authenticate using HTTP Bearer authentication by sending the [API key](#api) with the HTTP `Authorization: Bearer
|
||||
<API_KEY>` header.
|
||||
|
||||
Start by [creating an API key](#api) and test it with the examples below. Read the API documentation provided by your
|
||||
Headscale server at `/swagger` for details.
|
||||
|
||||
=== "Get details for all users"
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
https://headscale.example.com/api/v1/user
|
||||
```
|
||||
|
||||
=== "Get details for user 'bob'"
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
https://headscale.example.com/api/v1/user?name=bob
|
||||
```
|
||||
|
||||
=== "Register a node"
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
-d user=<USER> -d key=<REGISTRATION_KEY> \
|
||||
https://headscale.example.com/api/v1/node/register
|
||||
```
|
||||
|
||||
## gRPC
|
||||
|
||||
The gRPC interface can be used to control a Headscale instance from a remote machine with the `headscale` binary.
|
||||
|
||||
### Prerequisite
|
||||
|
||||
- A workstation to run `headscale` (any supported platform, e.g. Linux).
|
||||
- A Headscale server with gRPC enabled.
|
||||
- Connections to the gRPC port (default: `50443`) are allowed.
|
||||
- Remote access requires an encrypted connection via TLS.
|
||||
- An [API key](#api) to authenticate with the Headscale server.
|
||||
|
||||
### Setup remote control
|
||||
|
||||
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
|
||||
sure to use the same version as on the server.
|
||||
|
||||
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
|
||||
1. Make `headscale` executable: `chmod +x /usr/local/bin/headscale`
|
||||
|
||||
1. [Create an API key](#api) on the Headscale server.
|
||||
|
||||
1. Provide the connection parameters for the remote Headscale server either via a minimal YAML configuration file or
|
||||
via environment variables:
|
||||
|
||||
=== "Minimal YAML configuration file"
|
||||
|
||||
```yaml title="config.yaml"
|
||||
cli:
|
||||
address: <HEADSCALE_ADDRESS>:<PORT>
|
||||
api_key: <API_KEY>
|
||||
```
|
||||
|
||||
=== "Environment variables"
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE_ADDRESS>:<PORT>"
|
||||
export HEADSCALE_CLI_API_KEY="<API_KEY>"
|
||||
```
|
||||
|
||||
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
|
||||
connecting to the local instance.
|
||||
|
||||
1. Test the connection by listing all nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
```
|
||||
|
||||
You should now be able to see a list of your nodes from your workstation, and you can
|
||||
now control the Headscale server from your workstation.
|
||||
|
||||
### Behind a proxy
|
||||
|
||||
It's possible to run the gRPC remote endpoint behind a reverse proxy, like Nginx, and have it run on the _same_ port as Headscale.
|
||||
|
||||
While this is _not a supported_ feature, an example on how this can be set up on
|
||||
[NixOS is shown here](https://github.com/kradalby/dotfiles/blob/4489cdbb19cddfbfae82cd70448a38fde5a76711/machines/headscale.oracldn/headscale.nix#L61-L91).
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
- Make sure you have the _same_ Headscale version on your server and workstation.
|
||||
- Ensure that connections to the gRPC port are allowed.
|
||||
- Verify that your TLS certificate is valid and trusted.
|
||||
- If you don't have access to a trusted certificate (e.g. from Let's Encrypt), either:
|
||||
- Add your self-signed certificate to the trust store of your OS _or_
|
||||
- Disable certificate verification by either setting `cli.insecure: true` in the configuration file or by setting
|
||||
`HEADSCALE_CLI_INSECURE=1` via an environment variable. We do **not** recommend to disable certificate validation.
|
||||
@@ -64,6 +64,9 @@ Headscale provides a metrics and debug endpoint. It allows to introspect differe
|
||||
|
||||
Keep the metrics and debug endpoint private to your internal network and don't expose it to the Internet.
|
||||
|
||||
The metrics and debug interface can be disabled completely by setting `metrics_listen_addr: null` in the
|
||||
[configuration file](./configuration.md).
|
||||
|
||||
Query metrics via <http://localhost:9090/metrics> and get an overview of available debug information via
|
||||
<http://localhost:9090/debug/>. Metrics may be queried from outside localhost but the debug interface is subject to
|
||||
additional protection despite listening on all interfaces.
|
||||
|
||||
@@ -7,10 +7,16 @@
|
||||
|
||||
This page collects third-party tools, client libraries, and scripts related to headscale.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
|
||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
||||
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
|
||||
| headscale-zabbix | [Github](https://github.com/dblanque/headscale-zabbix) | A Zabbix Monitoring Template for the Headscale Service. |
|
||||
- [headscale-operator](https://github.com/infradohq/headscale-operator) - Headscale Kubernetes Operator
|
||||
- [tailscale-manager](https://github.com/singlestore-labs/tailscale-manager) - Dynamically manage Tailscale route
|
||||
advertisements
|
||||
- [headscalebacktosqlite](https://github.com/bigbozza/headscalebacktosqlite) - Migrate headscale from PostgreSQL back to
|
||||
SQLite
|
||||
- [headscale-pf](https://github.com/YouSysAdmin/headscale-pf) - Populates user groups based on user groups in Jumpcloud
|
||||
or Authentik
|
||||
- [headscale-client-go](https://github.com/hibare/headscale-client-go) - A Go client implementation for the Headscale
|
||||
HTTP API.
|
||||
- [headscale-zabbix](https://github.com/dblanque/headscale-zabbix) - A Zabbix Monitoring Template for the Headscale
|
||||
Service.
|
||||
- [tailscale-exporter](https://github.com/adinhodovic/tailscale-exporter) - A Prometheus exporter for Headscale that
|
||||
provides network-level metrics using the Headscale API.
|
||||
|
||||
@@ -7,14 +7,18 @@
|
||||
|
||||
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
|
||||
|
||||
| Name | Repository Link | Description |
|
||||
| ---------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
|
||||
| headscale-console | [Github](https://github.com/rickli-cloud/headscale-console) | WebAssembly-based client supporting SSH, VNC and RDP with optional self-service capabilities |
|
||||
- [headscale-ui](https://github.com/gurucomputing/headscale-ui) - A web frontend for the headscale Tailscale-compatible
|
||||
coordination server
|
||||
- [HeadscaleUi](https://github.com/simcu/headscale-ui) - A static headscale admin ui, no backend environment required
|
||||
- [Headplane](https://github.com/tale/headplane) - An advanced Tailscale inspired frontend for headscale
|
||||
- [headscale-admin](https://github.com/GoodiesHQ/headscale-admin) - Headscale-Admin is meant to be a simple, modern web
|
||||
interface for headscale
|
||||
- [ouroboros](https://github.com/yellowsink/ouroboros) - Ouroboros is designed for users to manage their own devices,
|
||||
rather than for admins
|
||||
- [unraid-headscale-admin](https://github.com/ich777/unraid-headscale-admin) - A simple headscale admin UI for Unraid,
|
||||
it offers Local (`docker exec`) and API Mode
|
||||
- [headscale-console](https://github.com/rickli-cloud/headscale-console) - WebAssembly-based client supporting SSH, VNC
|
||||
and RDP with optional self-service capabilities
|
||||
- [headscale-piying](https://github.com/wszgrcy/headscale-piying) - headscale web ui,support visual ACL configuration
|
||||
|
||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||
|
||||
@@ -77,6 +77,7 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
* Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
whose email domain matches `example.com`.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`
|
||||
* Access denied: `bob@example.net`
|
||||
|
||||
@@ -93,6 +94,7 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
* Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
users whose email is part of the `allowed_users` list.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`, `bob@example.net`
|
||||
* Access denied: `mallory@example.net`
|
||||
|
||||
@@ -123,6 +125,23 @@ are configured, a user needs to pass all of them.
|
||||
- "headscale_users"
|
||||
```
|
||||
|
||||
### Control email verification
|
||||
|
||||
Headscale uses the `email` claim from the identity provider to synchronize the email address to its user profile. By
|
||||
default, a user's email address is only synchronized when the identity provider reports the email address as verified
|
||||
via the `email_verified: true` claim.
|
||||
|
||||
Unverified emails may be allowed in case an identity provider does not send the `email_verified` claim or email
|
||||
verification is not required. In that case, a user's email address is always synchronized to the user profile.
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
email_verified_required: false
|
||||
```
|
||||
|
||||
### Customize node expiration
|
||||
|
||||
The node expiration is the amount of time a node is authenticated with OpenID Connect until it expires and needs to
|
||||
@@ -189,7 +208,7 @@ endpoint.
|
||||
|
||||
| Headscale profile | OIDC claim | Notes / examples |
|
||||
| ------------------- | -------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| email address | `email` | Only used when `email_verified: true` |
|
||||
| email address | `email` | Only verified emails are synchronized, unless `email_verified_required: false` is configured |
|
||||
| display name | `name` | eg: `Sam Smith` |
|
||||
| username | `preferred_username` | Depends on identity provider, eg: `ssmith`, `ssmith@idp.example.com`, `\\example.com\ssmith` |
|
||||
| profile picture | `picture` | URL to a profile picture or avatar |
|
||||
@@ -205,8 +224,6 @@ endpoint.
|
||||
- The username must be at least two characters long.
|
||||
- It must only contain letters, digits, hyphens, dots, underscores, and up to a single `@`.
|
||||
- The username must start with a letter.
|
||||
- A user's email address is only synchronized to the local user profile when the identity provider marks the email
|
||||
address as verified (`email_verified: true`).
|
||||
|
||||
Please see the [GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC) for OIDC related issues.
|
||||
|
||||
@@ -233,7 +250,7 @@ Authelia is fully supported by Headscale.
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
- [Headscale does not support JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
|
||||
### Google OAuth
|
||||
@@ -305,5 +322,13 @@ Entra ID is: `https://login.microsoftonline.com/<tenant-UUID>/v2.0`. The followi
|
||||
- `domain_hint: example.com` to use your own domain
|
||||
- `prompt: select_account` to force an account picker during login
|
||||
|
||||
Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their group ID instead
|
||||
When using Microsoft Entra ID together with the [allowed groups filter](#authorize-users-with-filters), configure the
|
||||
Headscale OIDC scope without the `groups` claim, for example:
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
scope: ["openid", "profile", "email"]
|
||||
```
|
||||
|
||||
Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their group ID(UUID) instead
|
||||
of the group name.
|
||||
|
||||
141
docs/ref/registration.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Registration methods
|
||||
|
||||
Headscale supports multiple ways to register a node. The preferred registration method depends on the identity of a node
|
||||
and your use case.
|
||||
|
||||
## Identity model
|
||||
|
||||
Tailscale's identity model distinguishes between personal and tagged nodes:
|
||||
|
||||
- A personal node (or user-owned node) is owned by a human and typically refers to end-user devices such as laptops,
|
||||
workstations or mobile phones. End-user devices are managed by a single user.
|
||||
- A tagged node (or service-based node or non-human node) provides services to the network. Common examples include web-
|
||||
and database servers. Those nodes are typically managed by a team of users. Some additional restrictions apply for
|
||||
tagged nodes, e.g. a tagged node is not allowed to [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) into a
|
||||
personal node.
|
||||
|
||||
Headscale implements Tailscale's identity model and distinguishes between personal and tagged nodes where a personal
|
||||
node is owned by a Headscale user and a tagged node is owned by a tag. Tagged devices are grouped under the special user
|
||||
`tagged-devices`.
|
||||
|
||||
## Registration methods
|
||||
|
||||
There are two main ways to register new nodes, [web authentication](#web-authentication) and [registration with a pre
|
||||
authenticated key](#pre-authenticated-key). Both methods can be used to register personal and tagged nodes.
|
||||
|
||||
### Web authentication
|
||||
|
||||
Web authentication is the default method to register a new node. It's interactive, where the client initiates the
|
||||
registration and the Headscale administrator needs to approve the new node before it is allowed to join the network. A
|
||||
node can be approved with:
|
||||
|
||||
- Headscale CLI (described in this documentation)
|
||||
- [Headscale API](api.md)
|
||||
- Or delegated to an identity provider via [OpenID Connect](oidc.md)
|
||||
|
||||
Web authentication relies on the presence of a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
Run `tailscale up` to login your personal device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Your Headscale user needs to be authorized to register tagged devices. This authorization is specified in the
|
||||
[`tagOwners`](https://tailscale.com/kb/1337/policy-syntax#tag-owners) section of the [ACL](acls.md). A simple
|
||||
example looks like this:
|
||||
|
||||
```json title="The user alice can register nodes tagged with tag:server"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:server": ["alice@"]
|
||||
},
|
||||
// more rules
|
||||
}
|
||||
```
|
||||
|
||||
Run `tailscale up` and provide at least one tag to login a tagged device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:<TAG>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Headscale checks that `<USER>` is allowed to register a node with the specified tag(s) and then transfers ownership
|
||||
of the new node to the special user `tagged-devices`. The registration of a tagged node is complete and it should be
|
||||
listed as "online" in the output of `headscale nodes list`. The "User" column displays `tagged-devices` as the owner
|
||||
of the node. See the "Tags" column for the list of assigned tags.
|
||||
|
||||
### Pre authenticated key
|
||||
|
||||
Registration with a pre authenticated key (or auth key) is a non-interactive way to register a new node. The Headscale
|
||||
administrator creates a preauthkey upfront and this preauthkey can then be used to register a node non-interactively.
|
||||
Its best suited for automation.
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
A personal node is always assigned to a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
Use the `headscale user list` command to learn its `<USER_ID>` and create a new pre authenticated key for your user:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Create a new pre authenticated key and provide at least one tag:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --tags tag:<TAG>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively. You don't need to provide the `--advertise-tags` parameter as
|
||||
the tags are automatically read from the pre authenticated key:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
The registration of a tagged node is complete and it should be listed as "online" in the output of `headscale nodes
|
||||
list`. The "User" column displays `tagged-devices` as the owner of the node. See the "Tags" column for the list of
|
||||
assigned tags.
|
||||
@@ -1,99 +0,0 @@
|
||||
# Controlling headscale with remote CLI
|
||||
|
||||
This documentation has the goal of showing a user how-to control a headscale instance
|
||||
from a remote machine with the `headscale` command line binary.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- A workstation to run `headscale` (any supported platform, e.g. Linux).
|
||||
- A headscale server with gRPC enabled.
|
||||
- Connections to the gRPC port (default: `50443`) are allowed.
|
||||
- Remote access requires an encrypted connection via TLS.
|
||||
- An API key to authenticate with the headscale server.
|
||||
|
||||
## Create an API key
|
||||
|
||||
We need to create an API key to authenticate with the remote headscale server when using it from our workstation.
|
||||
|
||||
To create an API key, log into your headscale server and generate a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys create --expiration 90d
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
|
||||
if the key is lost, expire the old one, and create a new key.
|
||||
|
||||
To list the keys currently associated with the server:
|
||||
|
||||
```shell
|
||||
headscale apikeys list
|
||||
```
|
||||
|
||||
and to expire a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys expire --prefix "<PREFIX>"
|
||||
```
|
||||
|
||||
## Download and configure headscale
|
||||
|
||||
1. Download the [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases). Make
|
||||
sure to use the same version as on the server.
|
||||
|
||||
1. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale`
|
||||
|
||||
1. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
1. Provide the connection parameters for the remote headscale server either via a minimal YAML configuration file or via
|
||||
environment variables:
|
||||
|
||||
=== "Minimal YAML configuration file"
|
||||
|
||||
```yaml title="config.yaml"
|
||||
cli:
|
||||
address: <HEADSCALE_ADDRESS>:<PORT>
|
||||
api_key: <API_KEY_FROM_PREVIOUS_STEP>
|
||||
```
|
||||
|
||||
=== "Environment variables"
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE_ADDRESS>:<PORT>"
|
||||
export HEADSCALE_CLI_API_KEY="<API_KEY_FROM_PREVIOUS_STEP>"
|
||||
```
|
||||
|
||||
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
|
||||
connecting to the local instance.
|
||||
|
||||
1. Test the connection
|
||||
|
||||
Let us run the headscale command to verify that we can connect by listing our nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
```
|
||||
|
||||
You should now be able to see a list of your nodes from your workstation, and you can
|
||||
now control the headscale server from your workstation.
|
||||
|
||||
## Behind a proxy
|
||||
|
||||
It is possible to run the gRPC remote endpoint behind a reverse proxy, like Nginx, and have it run on the _same_ port as headscale.
|
||||
|
||||
While this is _not a supported_ feature, an example on how this can be set up on
|
||||
[NixOS is shown here](https://github.com/kradalby/dotfiles/blob/4489cdbb19cddfbfae82cd70448a38fde5a76711/machines/headscale.oracldn/headscale.nix#L61-L91).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Make sure you have the _same_ headscale version on your server and workstation.
|
||||
- Ensure that connections to the gRPC port are allowed.
|
||||
- Verify that your TLS certificate is valid and trusted.
|
||||
- If you don't have access to a trusted certificate (e.g. from Let's Encrypt), either:
|
||||
- Add your self-signed certificate to the trust store of your OS _or_
|
||||
- Disable certificate verification by either setting `cli.insecure: true` in the configuration file or by setting
|
||||
`HEADSCALE_CLI_INSECURE=1` via an environment variable. We do **not** recommend to disable certificate validation.
|
||||
@@ -42,8 +42,9 @@ can be used.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8, 192.168.0.0/24 |
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8 |
|
||||
| | | 192.168.0.0/24 |
|
||||
```
|
||||
|
||||
Approve all desired routes of a subnet router by specifying them as comma separated list:
|
||||
@@ -57,8 +58,9 @@ The node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8 | 10.0.0.0/8 | 10.0.0.0/8
|
||||
| | 192.168.0.0/24 | 192.168.0.0/24 | 192.168.0.0/24
|
||||
```
|
||||
|
||||
#### Use the subnet router
|
||||
@@ -109,9 +111,9 @@ approval of routes served with a subnet router.
|
||||
|
||||
The ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the
|
||||
`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router
|
||||
owned by the user `alice` and that also advertises the tag `tag:router`.
|
||||
that advertises the tag `tag:router`.
|
||||
|
||||
```json title="Subnet routers owned by alice and tagged with tag:router are automatically approved"
|
||||
```json title="Subnet routers tagged with tag:router are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:router": ["alice@"]
|
||||
@@ -168,8 +170,9 @@ available, but needs to be approved:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0, ::/0 |
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0 |
|
||||
| | | ::/0 |
|
||||
```
|
||||
|
||||
For exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.
|
||||
@@ -183,8 +186,9 @@ The node `myexit` is now approved as exit node for the tailnet:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0 | 0.0.0.0/0 | 0.0.0.0/0
|
||||
| | ::/0 | ::/0 | ::/0
|
||||
```
|
||||
|
||||
#### Use the exit node
|
||||
@@ -256,10 +260,9 @@ in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automa
|
||||
soon as it joins the tailnet.
|
||||
|
||||
The ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the
|
||||
`autoApprovers` section. A new exit node which is owned by the user `alice` and that also advertises the tag `tag:exit`
|
||||
is automatically approved:
|
||||
`autoApprovers` section. A new exit node that advertises the tag `tag:exit` is automatically approved:
|
||||
|
||||
```json title="Exit nodes owned by alice and tagged with tag:exit are automatically approved"
|
||||
```json title="Exit nodes tagged with tag:exit are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:exit": ["alice@"]
|
||||
|
||||
54
docs/ref/tags.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Tags
|
||||
|
||||
Headscale supports Tailscale tags. Please read [Tailscale's tag documentation](https://tailscale.com/kb/1068/tags) to
|
||||
learn how tags work and how to use them.
|
||||
|
||||
Tags can be applied during [node registration](registration.md):
|
||||
|
||||
- using the `--advertise-tags` flag, see [web authentication for tagged devices](registration.md#__tabbed_1_2)
|
||||
- using a tagged pre authenticated key, see [how to create and use it](registration.md#__tabbed_2_2)
|
||||
|
||||
Administrators can manage tags with:
|
||||
|
||||
- Headscale CLI
|
||||
- [Headscale API](api.md)
|
||||
|
||||
## Common operations
|
||||
|
||||
### Manage tags for a node
|
||||
|
||||
Run `headscale nodes list` to list the tags for a node.
|
||||
|
||||
Use the `headscale nodes tag` command to modify the tags for a node. At least one tag is required and multiple tags can
|
||||
be provided as comma separated list. The following command sets the tags `tag:server` and `tag:prod` on node with ID 1:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i 1 -t tag:server,tag:prod
|
||||
```
|
||||
|
||||
### Convert from personal to tagged node
|
||||
|
||||
Use the `headscale nodes tag` command to convert a personal (user-owned) node to a tagged node:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i <NODE_ID> -t <TAG>
|
||||
```
|
||||
|
||||
The node is now owned by the special user `tagged-devices` and has the specified tags assigned to it.
|
||||
|
||||
### Convert from tagged to personal node
|
||||
|
||||
Tagged nodes can return to personal (user-owned) nodes by re-authenticating with:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags= --force-reauth
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
All previously assigned tags get removed and the node is now owned by the user specified in the above command.
|
||||
@@ -18,10 +18,10 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
|
||||
## Configure and run headscale
|
||||
|
||||
1. Create a directory on the Docker host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
1. Create a directory on the container host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/{config,lib,run}
|
||||
mkdir -p ./headscale/{config,lib}
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
@@ -34,9 +34,10 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--volume "$(pwd)/config:/etc/headscale" \
|
||||
--read-only \
|
||||
--tmpfs /var/run/headscale \
|
||||
--volume "$(pwd)/config:/etc/headscale:ro" \
|
||||
--volume "$(pwd)/lib:/var/lib/headscale" \
|
||||
--volume "$(pwd)/run:/var/run/headscale" \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
--health-cmd "CMD headscale health" \
|
||||
@@ -57,15 +58,17 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
image: docker.io/headscale/headscale:<VERSION>
|
||||
restart: unless-stopped
|
||||
container_name: headscale
|
||||
read_only: true
|
||||
tmpfs:
|
||||
- /var/run/headscale
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
- "127.0.0.1:9090:9090"
|
||||
volumes:
|
||||
# Please set <HEADSCALE_PATH> to the absolute path
|
||||
# of the previously created headscale directory.
|
||||
- <HEADSCALE_PATH>/config:/etc/headscale
|
||||
- <HEADSCALE_PATH>/config:/etc/headscale:ro
|
||||
- <HEADSCALE_PATH>/lib:/var/lib/headscale
|
||||
- <HEADSCALE_PATH>/run:/var/run/headscale
|
||||
command: serve
|
||||
healthcheck:
|
||||
test: ["CMD", "headscale", "health"]
|
||||
@@ -88,45 +91,10 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
Verify headscale is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
curl http://127.0.0.1:8080/health
|
||||
```
|
||||
|
||||
1. Create a headscale user:
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale up` command to login:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
To register a machine when running headscale in a container, take the headscale command and pass it to the container:
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register a machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line for the user with ID 1:
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale preauthkeys create --user 1 --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to headscale with the `tailscale up` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
Continue on the [getting started page](../../usage/getting-started.md) to register your first machine.
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
|
||||
@@ -42,6 +42,8 @@ distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
|
||||
sudo systemctl status headscale
|
||||
```
|
||||
|
||||
Continue on the [getting started page](../../usage/getting-started.md) to register your first machine.
|
||||
|
||||
## Using standalone binaries (advanced)
|
||||
|
||||
!!! warning "Advanced"
|
||||
@@ -115,3 +117,5 @@ managed by systemd.
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Continue on the [getting started page](../../usage/getting-started.md) to register your first machine.
|
||||
|
||||
@@ -28,7 +28,7 @@ The ports in use vary with the intended scenario and enabled features. Some of t
|
||||
- STUN, required if the [embedded DERP server](../ref/derp.md) is enabled
|
||||
- tcp/50443
|
||||
- Expose publicly: yes
|
||||
- Only required if the gRPC interface is used to [remote-control Headscale](../ref/remote-cli.md).
|
||||
- Only required if the gRPC interface is used to [remote-control Headscale](../ref/api.md#grpc).
|
||||
- tcp/9090
|
||||
- Expose publicly: no
|
||||
- [Metrics and debug endpoint](../ref/debug.md#metrics-and-debug-endpoint)
|
||||
|
||||
@@ -6,7 +6,7 @@ This documentation has the goal of showing how a user can use the official Andro
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
## Connect via normal, interactive login
|
||||
## Connect via web authentication
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -15,7 +15,7 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is
|
||||
visible in the server logs.
|
||||
|
||||
## Connect using a preauthkey
|
||||
## Connect using a pre authenticated key
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -24,5 +24,5 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- Open the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`
|
||||
- Enter your [preauthkey generated from headscale](../getting-started.md#using-a-preauthkey)
|
||||
- Enter your [preauthkey generated from headscale](../../ref/registration.md#pre-authenticated-key)
|
||||
- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.
|
||||
|
||||
@@ -9,8 +9,8 @@ This page helps you get started with headscale and provides a few usage examples
|
||||
installation instructions.
|
||||
* The configuration file exists and is adjusted to suit your environment, see
|
||||
[Configuration](../ref/configuration.md) for details.
|
||||
* Headscale is reachable from the Internet. Verify this by opening client specific setup instructions in your
|
||||
browser, e.g. https://headscale.example.com/windows
|
||||
* Headscale is reachable from the Internet. Verify this by visiting the health endpoint:
|
||||
https://headscale.example.com/health
|
||||
* The Tailscale client is installed, see [Client and operating system support](../about/clients.md) for more
|
||||
information.
|
||||
|
||||
@@ -41,12 +41,28 @@ options, run:
|
||||
headscale <COMMAND> --help
|
||||
```
|
||||
|
||||
!!! note "Manage headscale from another local user"
|
||||
|
||||
By default only the user `headscale` or `root` will have the necessary permissions to access the unix socket
|
||||
(`/var/run/headscale/headscale.sock`) that is used to communicate with the service. In order to be able to
|
||||
communicate with the headscale service you have to make sure the unix socket is accessible by the user that runs
|
||||
the commands. In general you can achieve this by any of the following methods:
|
||||
|
||||
* using `sudo`
|
||||
* run the commands as user `headscale`
|
||||
* add your user to the `headscale` group
|
||||
|
||||
To verify you can run the following command using your preferred method:
|
||||
|
||||
```shell
|
||||
headscale users list
|
||||
```
|
||||
|
||||
## Manage headscale users
|
||||
|
||||
In headscale, a node (also known as machine or device) is always assigned to a
|
||||
headscale user. Such a headscale user may have many nodes assigned to them and
|
||||
can be managed with the `headscale users` command. Invoke the built-in help for
|
||||
more information: `headscale users --help`.
|
||||
In headscale, a node (also known as machine or device) is [typically assigned to a headscale
|
||||
user](../ref/registration.md#identity-model). Such a headscale user may have many nodes assigned to them and can be
|
||||
managed with the `headscale users` command. Invoke the built-in help for more information: `headscale users --help`.
|
||||
|
||||
### Create a headscale user
|
||||
|
||||
@@ -80,11 +96,12 @@ more information: `headscale users --help`.
|
||||
|
||||
## Register a node
|
||||
|
||||
One has to register a node first to use headscale as coordination with Tailscale. The following examples work for the
|
||||
Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions to connect
|
||||
[Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices.
|
||||
One has to [register a node](../ref/registration.md) first to use headscale as coordination server with Tailscale. The
|
||||
following examples work for the Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions
|
||||
to connect [Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices. Read
|
||||
[registration methods](../ref/registration.md) for an overview of available registration methods.
|
||||
|
||||
### Normal, interactive login
|
||||
### [Web authentication](../ref/registration.md#web-authentication)
|
||||
|
||||
On a client machine, run the `tailscale up` command and provide the FQDN of your headscale instance as argument:
|
||||
|
||||
@@ -92,23 +109,23 @@ On a client machine, run the `tailscale up` command and provide the FQDN of your
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened and contains the value for `<YOUR_MACHINE_KEY>`. Approve
|
||||
and register the node on your headscale server:
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
=== "Native"
|
||||
|
||||
```shell
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
### Using a preauthkey
|
||||
### [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
|
||||
It is also possible to generate a preauthkey and register a node non-interactively. First, generate a preauthkey on the
|
||||
headscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys
|
||||
|
||||
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1760533177,
|
||||
"narHash": "sha256-OwM1sFustLHx+xmTymhucZuNhtq98fHIbfO8Swm5L8A=",
|
||||
"lastModified": 1770380644,
|
||||
"narHash": "sha256-P7dWMHRUWG5m4G+06jDyThXO7kwSk46C1kgjEWcybkE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "35f590344ff791e6b1d6d6b8f3523467c9217caf",
|
||||
"rev": "ae67888ff7ef9dff69b3cf0cc0fbfbcd3a722abe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
437
flake.nix
@@ -6,239 +6,232 @@
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion = self.shortRev or self.dirtyShortRev;
|
||||
commitHash = self.rev or self.dirtyRev;
|
||||
in
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
headscaleVersion = self.shortRev or self.dirtyShortRev;
|
||||
commitHash = self.rev or self.dirtyRev;
|
||||
in
|
||||
{
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
|
||||
in {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to those files.
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = ["cmd/headscale"];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.Version=${headscaleVersion}"
|
||||
"-X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=${commitHash}"
|
||||
];
|
||||
};
|
||||
|
||||
hi = buildGo {
|
||||
pname = "hi";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
checkFlags = ["-short"];
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = ["cmd/hi"];
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.24.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-lUEoqXJF1k4/il9bdDTinkUV5L869njZNYqObG/mHyA=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-Ttt7bPKU+TMKRg5550BS6fsPwYp0QJqcZ7NLrhttSdw=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
|
||||
subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"];
|
||||
};
|
||||
|
||||
protobuf-language-server = buildGo rec {
|
||||
pname = "protobuf-language-server";
|
||||
version = "2546944";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "lasorda";
|
||||
repo = "protobuf-language-server";
|
||||
rev = "${version}";
|
||||
sha256 = "sha256-Cbr3ktT86RnwUntOiDKRpNTClhdyrKLTQG2ZEd6fKDc=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-PfT90dhfzJZabzLTb1D69JCO+kOh2khrlpF5mCDeypk=";
|
||||
|
||||
subPackages = ["."];
|
||||
};
|
||||
|
||||
# Upstream does not override buildGoModule properly,
|
||||
# importing a specific module, so comment out for now.
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
gotestsum = prev.gotestsum.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
gotests = prev.gotests.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
gofumpt = prev.gofumpt.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
# gopls = prev.gopls.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# NixOS module
|
||||
nixosModules = rec {
|
||||
headscale = import ./nix/module.nix;
|
||||
default = headscale;
|
||||
};
|
||||
|
||||
overlays.default = _: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.stdenv.hostPlatform.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-9BvphYDAxzwooyVokI3l+q1wRuRsWn/qM+NpWUgqJH0=";
|
||||
in
|
||||
{
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = [ "-short" ];
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to those files.
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = [ "cmd/headscale" ];
|
||||
|
||||
meta = {
|
||||
mainProgram = "headscale";
|
||||
};
|
||||
};
|
||||
|
||||
hi = buildGo {
|
||||
pname = "hi";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
checkFlags = [ "-short" ];
|
||||
inherit vendorHash;
|
||||
|
||||
subPackages = [ "cmd/hi" ];
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.27.7";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-6R0EhNnOBEISJddjkbVTcBvUuU5U3r9Hu2UPfAZDep4=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-SOAbRrzMf2rbKaG9PGSnPSLY/qZVgbHcNjOLmVonycY=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
subPackages = [ "protoc-gen-grpc-gateway" "protoc-gen-openapiv2" ];
|
||||
};
|
||||
|
||||
protobuf-language-server = buildGo rec {
|
||||
pname = "protobuf-language-server";
|
||||
version = "1cf777d";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "lasorda";
|
||||
repo = "protobuf-language-server";
|
||||
rev = "1cf777de4d35a6e493a689e3ca1a6183ce3206b6";
|
||||
sha256 = "sha256-9MkBQPxr/TDr/sNz/Sk7eoZwZwzdVbE5u6RugXXk5iY=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-4nTpKBe7ekJsfQf+P6edT/9Vp2SBYbKz1ITawD3bhkI=";
|
||||
|
||||
subPackages = [ "." ];
|
||||
};
|
||||
|
||||
# Upstream does not override buildGoModule properly,
|
||||
# importing a specific module, so comment out for now.
|
||||
# golangci-lint = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
# golangci-lint-langserver = prev.golangci-lint.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
gotestsum = prev.gotestsum.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
gotests = prev.gotests.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
gofumpt = prev.gofumpt.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
|
||||
# gopls = prev.gopls.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
(system: let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_25 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golangci-lint-langserver
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
nfpm
|
||||
gotestsum
|
||||
gotests
|
||||
gofumpt
|
||||
gopls
|
||||
ksh
|
||||
ko
|
||||
yq-go
|
||||
ripgrep
|
||||
postgresql
|
||||
|
||||
# 'dot' is needed for pprof graphs
|
||||
# go tool pprof -http=: <source>
|
||||
graphviz
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
protobuf-language-server
|
||||
|
||||
# Add hi to make it even easier to use ci runner.
|
||||
hi
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.isLinux [traceroute];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [pkgs.headscale];
|
||||
config.Entrypoint = [(pkgs.headscale + "/bin/headscale")];
|
||||
};
|
||||
in rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs =
|
||||
devDeps
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ self.overlays.default ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_25 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
(pkgs.writeShellScriptBin
|
||||
"nix-vendor-sri"
|
||||
''
|
||||
set -eu
|
||||
golangci-lint
|
||||
golangci-lint-langserver
|
||||
golines
|
||||
nodePackages.prettier
|
||||
nixpkgs-fmt
|
||||
goreleaser
|
||||
nfpm
|
||||
gotestsum
|
||||
gotests
|
||||
gofumpt
|
||||
gopls
|
||||
ksh
|
||||
ko
|
||||
yq-go
|
||||
ripgrep
|
||||
postgresql
|
||||
prek
|
||||
|
||||
OUT=$(mktemp -d -t nar-hash-XXXXXX)
|
||||
rm -rf "$OUT"
|
||||
# 'dot' is needed for pprof graphs
|
||||
# go tool pprof -http=: <source>
|
||||
graphviz
|
||||
|
||||
go mod vendor -o "$OUT"
|
||||
go run tailscale.com/cmd/nardump --sri "$OUT"
|
||||
rm -rf "$OUT"
|
||||
'')
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
protobuf-language-server
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.isLinux [ traceroute ];
|
||||
|
||||
(pkgs.writeShellScriptBin
|
||||
"go-mod-update-all"
|
||||
''
|
||||
cat go.mod | ${pkgs.silver-searcher}/bin/ag "\t" | ${pkgs.silver-searcher}/bin/ag -v indirect | ${pkgs.gawk}/bin/awk '{print $1}' | ${pkgs.findutils}/bin/xargs go get -u
|
||||
go mod tidy
|
||||
'')
|
||||
];
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [ pkgs.headscale ];
|
||||
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
|
||||
};
|
||||
in
|
||||
{
|
||||
# `nix develop`
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs =
|
||||
devDeps
|
||||
++ [
|
||||
(pkgs.writeShellScriptBin
|
||||
"nix-vendor-sri"
|
||||
''
|
||||
set -eu
|
||||
|
||||
shellHook = ''
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
'';
|
||||
};
|
||||
OUT=$(mktemp -d -t nar-hash-XXXXXX)
|
||||
rm -rf "$OUT"
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
defaultPackage = pkgs.headscale;
|
||||
go mod vendor -o "$OUT"
|
||||
go run tailscale.com/cmd/nardump --sri "$OUT"
|
||||
rm -rf "$OUT"
|
||||
'')
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
apps.default = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format =
|
||||
pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
nodePackages.prettier
|
||||
golines
|
||||
clang-tools
|
||||
(pkgs.writeShellScriptBin
|
||||
"go-mod-update-all"
|
||||
''
|
||||
cat go.mod | ${pkgs.silver-searcher}/bin/ag "\t" | ${pkgs.silver-searcher}/bin/ag -v indirect | ${pkgs.gawk}/bin/awk '{print $1}' | ${pkgs.findutils}/bin/xargs go get -u
|
||||
go mod tidy
|
||||
'')
|
||||
];
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -i ${./.}
|
||||
|
||||
shellHook = ''
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
export CGO_ENABLED=0
|
||||
'';
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
default = headscale;
|
||||
};
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = pkgs.headscale;
|
||||
};
|
||||
apps.default = flake-utils.lib.mkApp {
|
||||
drv = pkgs.headscale;
|
||||
};
|
||||
|
||||
checks = {
|
||||
headscale = pkgs.testers.nixosTest (import ./nix/tests/headscale.nix);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
@@ -189,6 +189,7 @@ func (x *CreateApiKeyResponse) GetApiKey() string {
|
||||
type ExpireApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -230,6 +231,13 @@ func (x *ExpireApiKeyRequest) GetPrefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ExpireApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -349,6 +357,7 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
type DeleteApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -390,6 +399,13 @@ func (x *DeleteApiKeyRequest) GetPrefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -445,15 +461,17 @@ const file_headscale_v1_apikey_proto_rawDesc = "" +
|
||||
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\"/\n" +
|
||||
"\x14CreateApiKeyResponse\x12\x17\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"=\n" +
|
||||
"\x13ExpireApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\"\x16\n" +
|
||||
"\x14ExpireApiKeyResponse\"\x14\n" +
|
||||
"\x12ListApiKeysRequest\"F\n" +
|
||||
"\x13ListApiKeysResponse\x12/\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"=\n" +
|
||||
"\x13DeleteApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\"\x16\n" +
|
||||
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -109,7 +109,7 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\rHealthRequest\"E\n" +
|
||||
"\x0eHealthResponse\x123\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x80\x17\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x8c\x17\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
@@ -119,7 +119,8 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
|
||||
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12z\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12}\n" +
|
||||
"\x10DeletePreAuthKey\x12%.headscale.v1.DeletePreAuthKeyRequest\x1a&.headscale.v1.DeletePreAuthKeyResponse\"\x1a\x82\xd3\xe4\x93\x02\x14*\x12/api/v1/preauthkey\x12z\n" +
|
||||
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
|
||||
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
|
||||
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
|
||||
@@ -132,8 +133,7 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"ExpireNode\x12\x1f.headscale.v1.ExpireNodeRequest\x1a .headscale.v1.ExpireNodeResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1d/api/v1/node/{node_id}/expire\x12\x81\x01\n" +
|
||||
"\n" +
|
||||
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
|
||||
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12q\n" +
|
||||
"\bMoveNode\x12\x1d.headscale.v1.MoveNodeRequest\x1a\x1e.headscale.v1.MoveNodeResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/user\x12\x80\x01\n" +
|
||||
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12\x80\x01\n" +
|
||||
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
|
||||
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
|
||||
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
|
||||
@@ -165,17 +165,17 @@ var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 8: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 9: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 10: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 11: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 12: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 13: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 14: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 15: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 16: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 17: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
|
||||
(*DeletePreAuthKeyRequest)(nil), // 8: headscale.v1.DeletePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 9: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 10: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 11: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 12: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 13: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 14: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 15: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 16: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 17: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 18: headscale.v1.ListNodesRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
|
||||
@@ -189,17 +189,17 @@ var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 33: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 34: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 36: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
|
||||
(*DeletePreAuthKeyResponse)(nil), // 32: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 37: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
|
||||
@@ -215,17 +215,17 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
8, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
8, // 6: headscale.v1.HeadscaleService.DeletePreAuthKey:input_type -> headscale.v1.DeletePreAuthKeyRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
@@ -240,17 +240,17 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
|
||||
@@ -43,6 +43,9 @@ func request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtim
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreateUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -65,6 +68,9 @@ func request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["old_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "old_id")
|
||||
@@ -117,6 +123,9 @@ func request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
|
||||
@@ -154,6 +163,9 @@ func request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime
|
||||
protoReq ListUsersRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -187,6 +199,9 @@ func request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreatePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -211,6 +226,9 @@ func request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ExpirePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -227,18 +245,48 @@ func local_request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, mars
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ListPreAuthKeys_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
var filter_HeadscaleService_DeletePreAuthKey_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeletePreAuthKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.DeletePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeletePreAuthKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.DeletePreAuthKey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq ListPreAuthKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ListPreAuthKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
@@ -249,12 +297,6 @@ func local_request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marsh
|
||||
protoReq ListPreAuthKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.ListPreAuthKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -267,6 +309,9 @@ func request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler r
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.DebugCreateNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -289,6 +334,9 @@ func request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.M
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -328,6 +376,9 @@ func request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.M
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -370,6 +421,9 @@ func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -410,6 +464,9 @@ func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runt
|
||||
protoReq RegisterNodeRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -441,6 +498,9 @@ func request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -479,6 +539,9 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -527,6 +590,9 @@ func request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -580,6 +646,9 @@ func request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime
|
||||
protoReq ListNodesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -605,48 +674,6 @@ func local_request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler r
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_MoveNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq MoveNodeRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := client.MoveNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_MoveNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq MoveNodeRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
}
|
||||
protoReq.NodeId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
msg, err := server.MoveNode(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
@@ -654,6 +681,9 @@ func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler r
|
||||
protoReq BackfillNodeIPsRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -687,6 +717,9 @@ func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runt
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreateApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -711,6 +744,9 @@ func request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runt
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ExpireApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -732,6 +768,9 @@ func request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler runti
|
||||
protoReq ListApiKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ListApiKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -745,12 +784,17 @@ func local_request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_DeleteApiKey_0 = &utilities.DoubleArray{Encoding: map[string]int{"prefix": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||
|
||||
func request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteApiKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["prefix"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "prefix")
|
||||
@@ -759,6 +803,12 @@ func request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runt
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.DeleteApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -777,6 +827,12 @@ func local_request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshale
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.DeleteApiKey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -786,6 +842,9 @@ func request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime
|
||||
protoReq GetPolicyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.GetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -807,6 +866,9 @@ func request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.SetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -828,6 +890,9 @@ func request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Ma
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -967,6 +1032,26 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeletePreAuthKey", runtime.WithHTTPPathPattern("/api/v1/preauthkey"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1167,26 +1252,6 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_MoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/MoveNode", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/user"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_MoveNode_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_MoveNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1489,6 +1554,23 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeletePreAuthKey", runtime.WithHTTPPathPattern("/api/v1/preauthkey"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1659,23 +1741,6 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_MoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/MoveNode", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/user"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_MoveNode_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_MoveNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodPost, pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1822,6 +1887,7 @@ var (
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_DeletePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
@@ -1832,7 +1898,6 @@ var (
|
||||
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
|
||||
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
|
||||
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
|
||||
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
|
||||
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
|
||||
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
|
||||
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
|
||||
@@ -1850,6 +1915,7 @@ var (
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeletePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
@@ -1860,7 +1926,6 @@ var (
|
||||
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc-gen-go-grpc v1.6.0
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -25,6 +25,7 @@ const (
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_DeletePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/DeletePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
@@ -35,7 +36,6 @@ const (
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
@@ -58,6 +58,7 @@ type HeadscaleServiceClient interface {
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
|
||||
DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
|
||||
@@ -69,7 +70,6 @@ type HeadscaleServiceClient interface {
|
||||
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
|
||||
RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
@@ -151,6 +151,16 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeletePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeletePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
@@ -251,16 +261,6 @@ func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesReq
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(MoveNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(BackfillNodeIPsResponse)
|
||||
@@ -353,6 +353,7 @@ type HeadscaleServiceServer interface {
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
|
||||
DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
|
||||
@@ -364,7 +365,6 @@ type HeadscaleServiceServer interface {
|
||||
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
|
||||
RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
@@ -387,79 +387,79 @@ type HeadscaleServiceServer interface {
|
||||
type UnimplementedHeadscaleServiceServer struct{}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RenameUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListUsers not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListUsers not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method DeletePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DebugCreateNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method GetNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpireNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RenameNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListNodes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpireApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPolicy not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method GetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method Health not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
@@ -472,7 +472,7 @@ type UnsafeHeadscaleServiceServer interface {
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// If the following call panics, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
@@ -590,6 +590,24 @@ func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Con
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeletePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeletePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DeletePreAuthKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, req.(*DeletePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListPreAuthKeysRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -770,24 +788,6 @@ func _HeadscaleService_ListNodes_Handler(srv interface{}, ctx context.Context, d
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_MoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MoveNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).MoveNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_MoveNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).MoveNode(ctx, req.(*MoveNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BackfillNodeIPsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -963,6 +963,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ExpirePreAuthKey",
|
||||
Handler: _HeadscaleService_ExpirePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeletePreAuthKey",
|
||||
Handler: _HeadscaleService_DeletePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListPreAuthKeys",
|
||||
Handler: _HeadscaleService_ListPreAuthKeys_Handler,
|
||||
@@ -1003,10 +1007,6 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ListNodes",
|
||||
Handler: _HeadscaleService_ListNodes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "MoveNode",
|
||||
Handler: _HeadscaleService_MoveNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
@@ -75,27 +75,29 @@ func (RegisterMethod) EnumDescriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
MachineKey string `protobuf:"bytes,2,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
|
||||
NodeKey string `protobuf:"bytes,3,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
|
||||
DiscoKey string `protobuf:"bytes,4,opt,name=disco_key,json=discoKey,proto3" json:"disco_key,omitempty"`
|
||||
IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"`
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
User *User `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
|
||||
ForcedTags []string `protobuf:"bytes,18,rep,name=forced_tags,json=forcedTags,proto3" json:"forced_tags,omitempty"`
|
||||
InvalidTags []string `protobuf:"bytes,19,rep,name=invalid_tags,json=invalidTags,proto3" json:"invalid_tags,omitempty"`
|
||||
ValidTags []string `protobuf:"bytes,20,rep,name=valid_tags,json=validTags,proto3" json:"valid_tags,omitempty"`
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
|
||||
ApprovedRoutes []string `protobuf:"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3" json:"approved_routes,omitempty"`
|
||||
AvailableRoutes []string `protobuf:"bytes,24,rep,name=available_routes,json=availableRoutes,proto3" json:"available_routes,omitempty"`
|
||||
SubnetRoutes []string `protobuf:"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3" json:"subnet_routes,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
MachineKey string `protobuf:"bytes,2,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
|
||||
NodeKey string `protobuf:"bytes,3,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
|
||||
DiscoKey string `protobuf:"bytes,4,opt,name=disco_key,json=discoKey,proto3" json:"disco_key,omitempty"`
|
||||
IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"`
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
User *User `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
|
||||
// Deprecated
|
||||
// repeated string forced_tags = 18;
|
||||
// repeated string invalid_tags = 19;
|
||||
// repeated string valid_tags = 20;
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
|
||||
ApprovedRoutes []string `protobuf:"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3" json:"approved_routes,omitempty"`
|
||||
AvailableRoutes []string `protobuf:"bytes,24,rep,name=available_routes,json=availableRoutes,proto3" json:"available_routes,omitempty"`
|
||||
SubnetRoutes []string `protobuf:"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3" json:"subnet_routes,omitempty"`
|
||||
Tags []string `protobuf:"bytes,26,rep,name=tags,proto3" json:"tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -214,27 +216,6 @@ func (x *Node) GetRegisterMethod() RegisterMethod {
|
||||
return RegisterMethod_REGISTER_METHOD_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *Node) GetForcedTags() []string {
|
||||
if x != nil {
|
||||
return x.ForcedTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetInvalidTags() []string {
|
||||
if x != nil {
|
||||
return x.InvalidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetValidTags() []string {
|
||||
if x != nil {
|
||||
return x.ValidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetGivenName() string {
|
||||
if x != nil {
|
||||
return x.GivenName
|
||||
@@ -270,6 +251,13 @@ func (x *Node) GetSubnetRoutes() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetTags() []string {
|
||||
if x != nil {
|
||||
return x.Tags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RegisterNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -1006,102 +994,6 @@ func (x *ListNodesResponse) GetNodes() []*Node {
|
||||
return nil
|
||||
}
|
||||
|
||||
type MoveNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
User uint64 `protobuf:"varint,2,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *MoveNodeRequest) Reset() {
|
||||
*x = MoveNodeRequest{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *MoveNodeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MoveNodeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *MoveNodeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[17]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MoveNodeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*MoveNodeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *MoveNodeRequest) GetNodeId() uint64 {
|
||||
if x != nil {
|
||||
return x.NodeId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *MoveNodeRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type MoveNodeResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *MoveNodeResponse) Reset() {
|
||||
*x = MoveNodeResponse{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *MoveNodeResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MoveNodeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *MoveNodeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[18]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MoveNodeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*MoveNodeResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *MoveNodeResponse) GetNode() *Node {
|
||||
if x != nil {
|
||||
return x.Node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DebugCreateNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -1114,7 +1006,7 @@ type DebugCreateNodeRequest struct {
|
||||
|
||||
func (x *DebugCreateNodeRequest) Reset() {
|
||||
*x = DebugCreateNodeRequest{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1126,7 +1018,7 @@ func (x *DebugCreateNodeRequest) String() string {
|
||||
func (*DebugCreateNodeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DebugCreateNodeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[17]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1139,7 +1031,7 @@ func (x *DebugCreateNodeRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DebugCreateNodeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DebugCreateNodeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{19}
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *DebugCreateNodeRequest) GetUser() string {
|
||||
@@ -1179,7 +1071,7 @@ type DebugCreateNodeResponse struct {
|
||||
|
||||
func (x *DebugCreateNodeResponse) Reset() {
|
||||
*x = DebugCreateNodeResponse{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1191,7 +1083,7 @@ func (x *DebugCreateNodeResponse) String() string {
|
||||
func (*DebugCreateNodeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DebugCreateNodeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[18]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1204,7 +1096,7 @@ func (x *DebugCreateNodeResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DebugCreateNodeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DebugCreateNodeResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{20}
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *DebugCreateNodeResponse) GetNode() *Node {
|
||||
@@ -1223,7 +1115,7 @@ type BackfillNodeIPsRequest struct {
|
||||
|
||||
func (x *BackfillNodeIPsRequest) Reset() {
|
||||
*x = BackfillNodeIPsRequest{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[21]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1235,7 +1127,7 @@ func (x *BackfillNodeIPsRequest) String() string {
|
||||
func (*BackfillNodeIPsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[21]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[19]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1248,7 +1140,7 @@ func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{21}
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{19}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsRequest) GetConfirmed() bool {
|
||||
@@ -1267,7 +1159,7 @@ type BackfillNodeIPsResponse struct {
|
||||
|
||||
func (x *BackfillNodeIPsResponse) Reset() {
|
||||
*x = BackfillNodeIPsResponse{}
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[22]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1279,7 +1171,7 @@ func (x *BackfillNodeIPsResponse) String() string {
|
||||
func (*BackfillNodeIPsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[22]
|
||||
mi := &file_headscale_v1_node_proto_msgTypes[20]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1292,7 +1184,7 @@ func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use BackfillNodeIPsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillNodeIPsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{22}
|
||||
return file_headscale_v1_node_proto_rawDescGZIP(), []int{20}
|
||||
}
|
||||
|
||||
func (x *BackfillNodeIPsResponse) GetChanges() []string {
|
||||
@@ -1306,7 +1198,7 @@ var File_headscale_v1_node_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x17headscale/v1/node.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/user.proto\"\x98\x06\n" +
|
||||
"\x17headscale/v1/node.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/user.proto\"\xc9\x05\n" +
|
||||
"\x04Node\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x1f\n" +
|
||||
"\vmachine_key\x18\x02 \x01(\tR\n" +
|
||||
@@ -1323,19 +1215,15 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"preAuthKey\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12E\n" +
|
||||
"\x0fregister_method\x18\r \x01(\x0e2\x1c.headscale.v1.RegisterMethodR\x0eregisterMethod\x12\x1f\n" +
|
||||
"\vforced_tags\x18\x12 \x03(\tR\n" +
|
||||
"forcedTags\x12!\n" +
|
||||
"\finvalid_tags\x18\x13 \x03(\tR\vinvalidTags\x12\x1d\n" +
|
||||
"\n" +
|
||||
"valid_tags\x18\x14 \x03(\tR\tvalidTags\x12\x1d\n" +
|
||||
"\x0fregister_method\x18\r \x01(\x0e2\x1c.headscale.v1.RegisterMethodR\x0eregisterMethod\x12\x1d\n" +
|
||||
"\n" +
|
||||
"given_name\x18\x15 \x01(\tR\tgivenName\x12\x16\n" +
|
||||
"\x06online\x18\x16 \x01(\bR\x06online\x12'\n" +
|
||||
"\x0fapproved_routes\x18\x17 \x03(\tR\x0eapprovedRoutes\x12)\n" +
|
||||
"\x10available_routes\x18\x18 \x03(\tR\x0favailableRoutes\x12#\n" +
|
||||
"\rsubnet_routes\x18\x19 \x03(\tR\fsubnetRoutesJ\x04\b\t\x10\n" +
|
||||
"J\x04\b\x0e\x10\x12\";\n" +
|
||||
"\rsubnet_routes\x18\x19 \x03(\tR\fsubnetRoutes\x12\x12\n" +
|
||||
"\x04tags\x18\x1a \x03(\tR\x04tagsJ\x04\b\t\x10\n" +
|
||||
"J\x04\b\x0e\x10\x15\";\n" +
|
||||
"\x13RegisterNodeRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\tR\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\">\n" +
|
||||
@@ -1371,12 +1259,7 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\x10ListNodesRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\tR\x04user\"=\n" +
|
||||
"\x11ListNodesResponse\x12(\n" +
|
||||
"\x05nodes\x18\x01 \x03(\v2\x12.headscale.v1.NodeR\x05nodes\">\n" +
|
||||
"\x0fMoveNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x12\x12\n" +
|
||||
"\x04user\x18\x02 \x01(\x04R\x04user\":\n" +
|
||||
"\x10MoveNodeResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"j\n" +
|
||||
"\x05nodes\x18\x01 \x03(\v2\x12.headscale.v1.NodeR\x05nodes\"j\n" +
|
||||
"\x16DebugCreateNodeRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\tR\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" +
|
||||
@@ -1407,7 +1290,7 @@ func file_headscale_v1_node_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_headscale_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
|
||||
var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
|
||||
var file_headscale_v1_node_proto_goTypes = []any{
|
||||
(RegisterMethod)(0), // 0: headscale.v1.RegisterMethod
|
||||
(*Node)(nil), // 1: headscale.v1.Node
|
||||
@@ -1427,38 +1310,35 @@ var file_headscale_v1_node_proto_goTypes = []any{
|
||||
(*RenameNodeResponse)(nil), // 15: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesRequest)(nil), // 16: headscale.v1.ListNodesRequest
|
||||
(*ListNodesResponse)(nil), // 17: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
|
||||
(*MoveNodeResponse)(nil), // 19: headscale.v1.MoveNodeResponse
|
||||
(*DebugCreateNodeRequest)(nil), // 20: headscale.v1.DebugCreateNodeRequest
|
||||
(*DebugCreateNodeResponse)(nil), // 21: headscale.v1.DebugCreateNodeResponse
|
||||
(*BackfillNodeIPsRequest)(nil), // 22: headscale.v1.BackfillNodeIPsRequest
|
||||
(*BackfillNodeIPsResponse)(nil), // 23: headscale.v1.BackfillNodeIPsResponse
|
||||
(*User)(nil), // 24: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 26: headscale.v1.PreAuthKey
|
||||
(*DebugCreateNodeRequest)(nil), // 18: headscale.v1.DebugCreateNodeRequest
|
||||
(*DebugCreateNodeResponse)(nil), // 19: headscale.v1.DebugCreateNodeResponse
|
||||
(*BackfillNodeIPsRequest)(nil), // 20: headscale.v1.BackfillNodeIPsRequest
|
||||
(*BackfillNodeIPsResponse)(nil), // 21: headscale.v1.BackfillNodeIPsResponse
|
||||
(*User)(nil), // 22: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp
|
||||
(*PreAuthKey)(nil), // 24: headscale.v1.PreAuthKey
|
||||
}
|
||||
var file_headscale_v1_node_proto_depIdxs = []int32{
|
||||
24, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User
|
||||
25, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp
|
||||
25, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp
|
||||
26, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
25, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp
|
||||
22, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User
|
||||
23, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp
|
||||
23, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp
|
||||
24, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
23, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 5: headscale.v1.Node.register_method:type_name -> headscale.v1.RegisterMethod
|
||||
1, // 6: headscale.v1.RegisterNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node
|
||||
25, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
|
||||
23, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
|
||||
1, // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 15: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
16, // [16:16] is the sub-list for method output_type
|
||||
16, // [16:16] is the sub-list for method input_type
|
||||
16, // [16:16] is the sub-list for extension type_name
|
||||
16, // [16:16] is the sub-list for extension extendee
|
||||
0, // [0:16] is the sub-list for field type_name
|
||||
1, // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_node_proto_init() }
|
||||
@@ -1474,7 +1354,7 @@ func file_headscale_v1_node_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_node_proto_rawDesc), len(file_headscale_v1_node_proto_rawDesc)),
|
||||
NumEnums: 1,
|
||||
NumMessages: 23,
|
||||
NumMessages: 21,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -252,8 +252,7 @@ func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -288,20 +287,13 @@ func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
|
||||
func (x *ExpirePreAuthKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -338,16 +330,95 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type DeletePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) Reset() {
|
||||
*x = DeletePreAuthKeyRequest{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeletePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeletePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeletePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeletePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) Reset() {
|
||||
*x = DeletePreAuthKeyResponse{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeletePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeletePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeletePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
*x = ListPreAuthKeysRequest{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -359,7 +430,7 @@ func (x *ListPreAuthKeysRequest) String() string {
|
||||
func (*ListPreAuthKeysRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -372,14 +443,7 @@ func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
@@ -391,7 +455,7 @@ type ListPreAuthKeysResponse struct {
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
*x = ListPreAuthKeysResponse{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -403,7 +467,7 @@ func (x *ListPreAuthKeysResponse) String() string {
|
||||
func (*ListPreAuthKeysResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -416,7 +480,7 @@ func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
@@ -455,13 +519,14 @@ const file_headscale_v1_preauthkey_proto_rawDesc = "" +
|
||||
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
|
||||
"\x18CreatePreAuthKeyResponse\x12:\n" +
|
||||
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
|
||||
"preAuthKey\"?\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\",\n" +
|
||||
"\x16ListPreAuthKeysRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
|
||||
"preAuthKey\")\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\")\n" +
|
||||
"\x17DeletePreAuthKeyRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x1a\n" +
|
||||
"\x18DeletePreAuthKeyResponse\"\x18\n" +
|
||||
"\x16ListPreAuthKeysRequest\"W\n" +
|
||||
"\x17ListPreAuthKeysResponse\x12<\n" +
|
||||
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
@@ -477,30 +542,32 @@ func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_headscale_v1_preauthkey_proto_goTypes = []any{
|
||||
(*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey
|
||||
(*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 3: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*User)(nil), // 7: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
(*DeletePreAuthKeyRequest)(nil), // 5: headscale.v1.DeletePreAuthKeyRequest
|
||||
(*DeletePreAuthKeyResponse)(nil), // 6: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 8: headscale.v1.ListPreAuthKeysResponse
|
||||
(*User)(nil), // 9: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
8, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
8, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
8, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
9, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
10, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
10, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
10, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
@@ -515,7 +582,7 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -124,6 +124,13 @@
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "id",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -496,45 +503,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node/{nodeId}/user": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_MoveNode",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1MoveNodeResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "nodeId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/HeadscaleServiceMoveNodeBody"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/policy": {
|
||||
"get": {
|
||||
"summary": "--- Policy start ---",
|
||||
@@ -605,9 +573,29 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeletePreAuthKey",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeletePreAuthKeyResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "user",
|
||||
"name": "id",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
@@ -826,15 +814,6 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"HeadscaleServiceMoveNodeBody": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"HeadscaleServiceSetApprovedRoutesBody": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1029,6 +1008,9 @@
|
||||
"v1DeleteNodeResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeletePreAuthKeyResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteUserResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
@@ -1037,6 +1019,10 @@
|
||||
"properties": {
|
||||
"prefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1054,12 +1040,9 @@
|
||||
"v1ExpirePreAuthKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1142,14 +1125,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1MoveNodeResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"node": {
|
||||
"$ref": "#/definitions/v1Node"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Node": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1196,26 +1171,9 @@
|
||||
"registerMethod": {
|
||||
"$ref": "#/definitions/v1RegisterMethod"
|
||||
},
|
||||
"forcedTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"invalidTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"validTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"givenName": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "Deprecated\nrepeated string forced_tags = 18;\nrepeated string invalid_tags = 19;\nrepeated string valid_tags = 20;"
|
||||
},
|
||||
"online": {
|
||||
"type": "boolean"
|
||||
@@ -1237,6 +1195,12 @@
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
202
go.mod
@@ -1,58 +1,57 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.25
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/arl/statsviz v0.7.2
|
||||
github.com/arl/statsviz v0.8.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/coreos/go-oidc/v3 v3.17.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.5.1+incompatible
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e
|
||||
github.com/gofrs/uuid/v5 v5.4.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.66.1
|
||||
github.com/prometheus/common v0.67.5
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
|
||||
google.golang.org/grpc v1.75.1
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.31.0
|
||||
tailscale.com v1.86.5
|
||||
gorm.io/gorm v1.31.1
|
||||
tailscale.com v1.94.1
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
@@ -75,12 +74,20 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1
|
||||
modernc.org/sqlite v1.44.3
|
||||
)
|
||||
|
||||
// NOTE: gvisor must be updated in lockstep with
|
||||
// tailscale.com. The version used here should match
|
||||
// the version required by the tailscale.com dependency.
|
||||
// To find the correct version, check tailscale.com's
|
||||
// go.mod file for the gvisor.dev/gvisor version:
|
||||
// https://github.com/tailscale/tailscale/blob/main/go.mod
|
||||
require gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
@@ -91,147 +98,140 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.2.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/creachadair/mds v0.25.15 // indirect
|
||||
github.com/creachadair/msync v0.8.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/cli v29.2.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gaissmai/bart v0.26.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/go-version v1.8.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/illarion/gonotify/v3 v3.0.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/pgx/v5 v5.8.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 // indirect
|
||||
github.com/kamstrup/intmap v0.5.2 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/lib/pq v1.11.1 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mdlayher/netlink v1.8.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.1 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/moby/api v1.53.0 // indirect
|
||||
github.com/moby/moby/client v0.2.2 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/morikuni/aec v1.1.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect
|
||||
github.com/pires/go-proxyproto v0.9.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.7.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/safchain/ethtool v0.7.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
|
||||
444
go.sum
@@ -16,8 +16,8 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
|
||||
github.com/MarvinJWendt/testza v0.2.1/go.mod h1:God7bhG8n6uQxwdScay+gjm9/LnO4D3kkcZX4hv9Rp8=
|
||||
github.com/MarvinJWendt/testza v0.2.8/go.mod h1:nwIcjmr0Zz+Rcwfh3/4UhBp7ePKVhuBExvZqnKYWlII=
|
||||
@@ -33,51 +33,55 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.7.2 h1:xnuIfRiXE4kvxEcfGL+IE3mKH1BXNHuE+eJELIh7oOA=
|
||||
github.com/arl/statsviz v0.7.2/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/arl/statsviz v0.8.0 h1:O6GjjVxEDxcByAucOSl29HaGYLXsuwA3ujJw8H9E7/U=
|
||||
github.com/arl/statsviz v0.8.0/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2 h1:U3ygWUhCpiSPYSHOrRhb3gOl9T5Y3kB8k5Vjs//57bE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=
|
||||
github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
@@ -99,8 +103,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
@@ -108,47 +114,48 @@ github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/q
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/mds v0.25.15 h1:i8CUqtfgbCqbvZ++L7lm8No3cOeic9YKF4vHEvEoj+Y=
|
||||
github.com/creachadair/mds v0.25.15/go.mod h1:XtMfRW15sjd1iOi1Z1k+dq0pRsR5xPbulpoTrpyhk8w=
|
||||
github.com/creachadair/msync v0.8.2 h1:ujvc/SVJPn+bFwmjUHucXNTTn3opVe2YbQ46mBCnP08=
|
||||
github.com/creachadair/msync v0.8.2/go.mod h1:LzxqD9kfIl/O3DczkwOgJplLPqwrTbIhINlf9bHIsEY=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/OjWsmQKMGg3LoPSz9jh/pQXIrHjUj4=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d h1:QRKpU+9ZBDs62LyBfwhZkJdB5DJX2Sm3p4kUh7l1aA0=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
|
||||
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -164,10 +171,10 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=
|
||||
github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
@@ -180,8 +187,8 @@ github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFS
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e h1:Lf/gRkoycfOBPa42vU2bbgPurFong6zXeFtPoxholzU=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -191,42 +198,42 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||
github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=
|
||||
github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
|
||||
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -239,14 +246,19 @@ github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=
|
||||
github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
|
||||
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
|
||||
@@ -259,8 +271,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -273,15 +285,13 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM=
|
||||
github.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk=
|
||||
github.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -292,7 +302,6 @@ github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryy
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@@ -303,8 +312,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI=
|
||||
github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@@ -320,18 +329,22 @@ github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byF
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
|
||||
github.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM=
|
||||
github.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594=
|
||||
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
|
||||
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
|
||||
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
|
||||
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
|
||||
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
|
||||
github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
@@ -340,8 +353,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
|
||||
github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
@@ -362,13 +375,14 @@ github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXR
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pires/go-proxyproto v0.9.2 h1:H1UdHn695zUVVmB0lQ354lOWHOy6TZSpzBl3tgN0s1U=
|
||||
github.com/pires/go-proxyproto v0.9.2/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -378,16 +392,16 @@ github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Q
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=
|
||||
github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -397,20 +411,19 @@ github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5b
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/safchain/ethtool v0.7.0 h1:rlJzfDetsVvT61uz8x1YIcFn12akMfuPulHtZjtb7Is=
|
||||
github.com/safchain/ethtool v0.7.0/go.mod h1:MenQKEjXdfkjD3mp2QdCk8B/hwvkrlOTm/FD4gTpFxQ=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
@@ -420,14 +433,14 @@ github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGT
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -453,24 +466,20 @@ github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33 h1:idh63uw+gsG05HwjZsAENCG4KZfyvjK03bpjxa5qRRk=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I=
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d h1:N+TtzIaGYREbLbKZB0WU0vVnMSfaqUkSf3qMEi03hwE=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d/go.mod h1:6NU8H/GLPVX2TnXAY1duyy9ylLaHwFpr0X93UPiYmNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f h1:CL6gu95Y1o2ko4XiWPvWkJka0QmQWcUyPywWVWDPQbQ=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09 h1:Fc9lE2cDYJbBLpCqnVmoLdf7McPqoHZiDxDPPpkJM04=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09/go.mod h1:QMNhC4XGFiXKngHVLXE+ERDmQoH0s5fD7AUxupykocQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 h1:0tpDdAj9sSfSZg4gMwNTdqMP592sBrq2Sm0w6ipnh7k=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
|
||||
@@ -481,13 +490,12 @@ github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -503,30 +511,30 @@ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1z
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
@@ -536,36 +544,34 @@ go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/W
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -576,18 +582,16 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -595,77 +599,75 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
|
||||
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
|
||||
modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
|
||||
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
||||
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
@@ -674,16 +676,18 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4=
|
||||
modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||
modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=
|
||||
modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.86.5 h1:yBtWFjuLYDmxVnfnvPbZNZcKADCYgNfMd0rUAOA9XCs=
|
||||
tailscale.com v1.86.5/go.mod h1:Lm8dnzU2i/Emw15r6sl3FRNp/liSQ/nYw6ZSQvIdZ1M=
|
||||
tailscale.com v1.94.1 h1:0dAst/ozTuFkgmxZULc3oNwR9+qPIt5ucvzH7kaM0Jw=
|
||||
tailscale.com v1.94.1/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
|
||||
240
hscontrol/app.go
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // nolint
|
||||
@@ -114,13 +115,14 @@ var (
|
||||
|
||||
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
var err error
|
||||
|
||||
if profilingEnabled {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
s, err := state.NewState(cfg)
|
||||
@@ -139,27 +141,30 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, ok := app.state.GetNodeByID(ni)
|
||||
if !ok {
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore")
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("ephemeral node deletion failed because node not found in NodeStore")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deletion failed")
|
||||
log.Error().Err(err).EmbedObject(node).Msg("ephemeral node deletion failed")
|
||||
return
|
||||
}
|
||||
|
||||
app.Change(policyChanged)
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deleted because garbage collection timeout reached")
|
||||
log.Debug().Caller().EmbedObject(node).Msg("ephemeral node deleted because garbage collection timeout reached")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
var authProvider AuthProvider
|
||||
|
||||
authProvider = NewAuthProviderWeb(cfg.ServerURL)
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
oidcProvider, err := NewAuthProviderOIDC(
|
||||
ctx,
|
||||
&app,
|
||||
@@ -176,17 +181,18 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
authProvider = oidcProvider
|
||||
}
|
||||
}
|
||||
|
||||
app.authProvider = authProvider
|
||||
|
||||
if app.cfg.TailcfgDNSConfig != nil && app.cfg.TailcfgDNSConfig.Proxied { // if MagicDNS
|
||||
// TODO(kradalby): revisit why this takes a list.
|
||||
|
||||
var magicDNSDomains []dnsname.FQDN
|
||||
if cfg.PrefixV4 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
|
||||
}
|
||||
|
||||
if cfg.PrefixV6 != nil {
|
||||
magicDNSDomains = append(
|
||||
magicDNSDomains,
|
||||
@@ -197,6 +203,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if app.cfg.TailcfgDNSConfig.Routes == nil {
|
||||
app.cfg.TailcfgDNSConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
}
|
||||
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.TailcfgDNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
}
|
||||
@@ -205,7 +212,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if cfg.DERP.ServerEnabled {
|
||||
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
|
||||
return nil, fmt.Errorf("reading or creating DERP server private key: %w", err)
|
||||
}
|
||||
|
||||
if derpServerKey.Equal(*noisePrivateKey) {
|
||||
@@ -231,6 +238,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app.DERPServer = embeddedDERPServer
|
||||
}
|
||||
|
||||
@@ -250,9 +258,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
lastExpiryCheck := time.Unix(0, 0)
|
||||
|
||||
derpTickerChan := make(<-chan time.Time)
|
||||
|
||||
if h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 {
|
||||
derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
defer derpTicker.Stop()
|
||||
|
||||
derpTickerChan = derpTicker.C
|
||||
}
|
||||
|
||||
@@ -270,8 +280,10 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
return
|
||||
|
||||
case <-expireTicker.C:
|
||||
var expiredNodeChanges []change.ChangeSet
|
||||
var changed bool
|
||||
var (
|
||||
expiredNodeChanges []change.Change
|
||||
changed bool
|
||||
)
|
||||
|
||||
lastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
|
||||
|
||||
@@ -285,12 +297,14 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
}
|
||||
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
|
||||
log.Info().Msg("fetching DERPMap updates")
|
||||
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { //nolint:contextcheck
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
@@ -302,17 +316,19 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
log.Error().Err(err).Msg("failed to build new DERPMap, retrying later")
|
||||
continue
|
||||
}
|
||||
|
||||
h.state.SetDERPMap(derpMap)
|
||||
|
||||
h.Change(change.DERPSet)
|
||||
h.Change(change.DERPMap())
|
||||
|
||||
case records, ok := <-extraRecordsUpdate:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = records
|
||||
|
||||
h.Change(change.ExtraRecordsSet)
|
||||
h.Change(change.ExtraRecords())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -338,7 +354,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
"retrieving metadata",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -346,7 +362,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
if !ok {
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
"authorization token not supplied",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -361,7 +377,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
return ctx, status.Error(codes.Internal, "validating token")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
@@ -389,7 +405,8 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
|
||||
writeUnauthorized := func(statusCode int) {
|
||||
writer.WriteHeader(statusCode)
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
|
||||
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil { //nolint:noinlineerr
|
||||
log.Error().Err(err).Msg("writing HTTP response failed")
|
||||
}
|
||||
}
|
||||
@@ -400,6 +417,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -411,6 +429,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -419,6 +438,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -430,7 +450,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
// and will remove it if it is not.
|
||||
func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
// File does not exist, all fine
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) {
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) { //nolint:noinlineerr
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -454,6 +474,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
|
||||
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
Methods(http.MethodGet)
|
||||
@@ -476,15 +497,18 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
apiRouter := router.PathPrefix("/api").Subrouter()
|
||||
apiRouter.Use(h.httpAuthenticationMiddleware)
|
||||
apiRouter.PathPrefix("/v1/").HandlerFunc(grpcMux.ServeHTTP)
|
||||
|
||||
router.PathPrefix("/").HandlerFunc(notFoundHandler)
|
||||
router.HandleFunc("/favicon.ico", FaviconHandler)
|
||||
router.PathPrefix("/").HandlerFunc(BlankHandler)
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
//
|
||||
//nolint:gocyclo // complex server startup function
|
||||
func (h *Headscale) Serve() error {
|
||||
var err error
|
||||
|
||||
capver.CanOldCodeBeCleanedUp()
|
||||
|
||||
if profilingEnabled {
|
||||
@@ -505,12 +529,13 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale")
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("starting headscale")
|
||||
log.Info().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
|
||||
h.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state)
|
||||
|
||||
h.mapBatcher.Start()
|
||||
defer h.mapBatcher.Close()
|
||||
|
||||
@@ -525,7 +550,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get DERPMap: %w", err)
|
||||
return fmt.Errorf("getting DERPMap: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
@@ -544,6 +569,7 @@ func (h *Headscale) Serve() error {
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
|
||||
ephmNodes := h.state.ListEphemeralNodes()
|
||||
for _, node := range ephmNodes.All() {
|
||||
h.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout)
|
||||
@@ -554,7 +580,9 @@ func (h *Headscale) Serve() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up extrarecord manager: %w", err)
|
||||
}
|
||||
|
||||
h.cfg.TailcfgDNSConfig.ExtraRecords = h.extraRecordMan.Records()
|
||||
|
||||
go h.extraRecordMan.Run()
|
||||
defer h.extraRecordMan.Close()
|
||||
}
|
||||
@@ -563,6 +591,7 @@ func (h *Headscale) Serve() error {
|
||||
// records updates
|
||||
scheduleCtx, scheduleCancel := context.WithCancel(context.Background())
|
||||
defer scheduleCancel()
|
||||
|
||||
go h.scheduledTasks(scheduleCtx)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
@@ -575,6 +604,7 @@ func (h *Headscale) Serve() error {
|
||||
errorGroup := new(errgroup.Group)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -585,29 +615,30 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
err = h.ensureUnixSocketIsAbsent()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old socket file: %w", err)
|
||||
return fmt.Errorf("removing old socket file: %w", err)
|
||||
}
|
||||
|
||||
socketDir := filepath.Dir(h.cfg.UnixSocket)
|
||||
|
||||
err = util.EnsureDir(socketDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up unix socket: %w", err)
|
||||
}
|
||||
|
||||
socketListener, err := net.Listen("unix", h.cfg.UnixSocket)
|
||||
socketListener, err := new(net.ListenConfig).Listen(context.Background(), "unix", h.cfg.UnixSocket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up gRPC socket: %w", err)
|
||||
return fmt.Errorf("setting up gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Change socket permissions
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil { //nolint:noinlineerr
|
||||
return fmt.Errorf("changing gRPC socket permission: %w", err)
|
||||
}
|
||||
|
||||
grpcGatewayMux := grpcRuntime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
grpcGatewayConn, err := grpc.Dial(
|
||||
grpcGatewayConn, err := grpc.Dial( //nolint:staticcheck // SA1019: deprecated but supported in 1.x
|
||||
h.cfg.UnixSocket,
|
||||
[]grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
@@ -658,10 +689,13 @@ func (h *Headscale) Serve() error {
|
||||
// https://github.com/soheilhy/cmux/issues/68
|
||||
// https://github.com/soheilhy/cmux/issues/91
|
||||
|
||||
var grpcServer *grpc.Server
|
||||
var grpcListener net.Listener
|
||||
var (
|
||||
grpcServer *grpc.Server
|
||||
grpcListener net.Listener
|
||||
)
|
||||
|
||||
if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
log.Info().Msgf("enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.ChainUnaryInterceptor(
|
||||
@@ -684,9 +718,9 @@ func (h *Headscale) Serve() error {
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
|
||||
grpcListener, err = net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
grpcListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.GRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })
|
||||
@@ -714,14 +748,16 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
var httpListener net.Listener
|
||||
|
||||
if tlsConfig != nil {
|
||||
httpServer.TLSConfig = tlsConfig
|
||||
httpListener, err = tls.Listen("tcp", h.cfg.Addr, tlsConfig)
|
||||
} else {
|
||||
httpListener, err = net.Listen("tcp", h.cfg.Addr)
|
||||
httpListener, err = new(net.ListenConfig).Listen(context.Background(), "tcp", h.cfg.Addr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return httpServer.Serve(httpListener) })
|
||||
@@ -729,29 +765,45 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
// Only start debug/metrics server if address is configured
|
||||
var debugHTTPServer *http.Server
|
||||
|
||||
var debugHTTPListener net.Listener
|
||||
|
||||
if h.cfg.MetricsAddr != "" {
|
||||
debugHTTPListener, err = (&net.ListenConfig{}).Listen(ctx, "tcp", h.cfg.MetricsAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("binding to TCP address: %w", err)
|
||||
}
|
||||
|
||||
debugHTTPServer = h.debugHTTPServer()
|
||||
|
||||
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr)
|
||||
} else {
|
||||
log.Info().Msg("metrics server disabled (metrics_listen_addr is empty)")
|
||||
}
|
||||
|
||||
debugHTTPServer := h.debugHTTPServer()
|
||||
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr)
|
||||
|
||||
var tailsqlContext context.Context
|
||||
|
||||
if tailsqlEnabled {
|
||||
if h.cfg.Database.Type != types.DatabaseSqlite {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().
|
||||
Str("type", h.cfg.Database.Type).
|
||||
Msgf("tailsql only support %q", types.DatabaseSqlite)
|
||||
}
|
||||
|
||||
if tailsqlTSKey == "" {
|
||||
//nolint:gocritic // exitAfterDefer: Fatal exits during initialization before servers start
|
||||
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
|
||||
}
|
||||
|
||||
tailsqlContext = context.Background()
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path)
|
||||
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path) //nolint:errcheck
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
@@ -762,6 +814,7 @@ func (h *Headscale) Serve() error {
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT,
|
||||
syscall.SIGHUP)
|
||||
|
||||
sigFunc := func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
for {
|
||||
@@ -786,6 +839,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
|
||||
log.Info().
|
||||
Str("signal", sig.String()).
|
||||
Msg("Received signal to stop, shutting down gracefully")
|
||||
@@ -794,16 +848,25 @@ func (h *Headscale) Serve() error {
|
||||
h.ephemeralGC.Close()
|
||||
|
||||
// Gracefully shut down servers
|
||||
ctx, cancel := context.WithTimeout(
|
||||
context.Background(),
|
||||
shutdownCtx, cancel := context.WithTimeout(
|
||||
context.WithoutCancel(ctx),
|
||||
types.HTTPShutdownTimeout,
|
||||
)
|
||||
info("shutting down debug http server")
|
||||
if err := debugHTTPServer.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("failed to shutdown prometheus http")
|
||||
defer cancel()
|
||||
|
||||
if debugHTTPServer != nil {
|
||||
info("shutting down debug http server")
|
||||
|
||||
err := debugHTTPServer.Shutdown(shutdownCtx)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to shutdown prometheus http")
|
||||
}
|
||||
}
|
||||
|
||||
info("shutting down main http server")
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
|
||||
err := httpServer.Shutdown(shutdownCtx)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to shutdown http")
|
||||
}
|
||||
|
||||
@@ -829,7 +892,11 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close network listeners
|
||||
info("closing network listeners")
|
||||
debugHTTPListener.Close()
|
||||
|
||||
if debugHTTPListener != nil {
|
||||
debugHTTPListener.Close()
|
||||
}
|
||||
|
||||
httpListener.Close()
|
||||
grpcGatewayConn.Close()
|
||||
|
||||
@@ -839,6 +906,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// Close state connections
|
||||
info("closing state and database")
|
||||
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close state")
|
||||
@@ -847,13 +915,11 @@ func (h *Headscale) Serve() error {
|
||||
log.Info().
|
||||
Msg("Headscale stopped")
|
||||
|
||||
// And we're done:
|
||||
cancel()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error {
|
||||
sigFunc(sigc)
|
||||
|
||||
@@ -865,6 +931,7 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
|
||||
if h.cfg.TLS.LetsEncrypt.Hostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().
|
||||
@@ -877,6 +944,11 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
Cache: autocert.DirCache(h.cfg.TLS.LetsEncrypt.CacheDir),
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: h.cfg.ACMEURL,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &acmeLogger{
|
||||
rt: http.DefaultTransport,
|
||||
},
|
||||
},
|
||||
},
|
||||
Email: h.cfg.ACMEEmail,
|
||||
}
|
||||
@@ -892,7 +964,6 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.cfg.TLS.LetsEncrypt.Listen,
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
|
||||
@@ -914,13 +985,13 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
}
|
||||
} else if h.cfg.TLS.CertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
log.Warn().Msg("listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().Msg("listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
@@ -935,20 +1006,9 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func notFoundHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().
|
||||
Interface("header", req.Header).
|
||||
Interface("proto", req.Proto).
|
||||
Interface("url", req.URL).
|
||||
Msg("Request did not match")
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
||||
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
err := util.EnsureDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring private key directory: %w", err)
|
||||
@@ -956,21 +1016,22 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
privateKey, err := os.ReadFile(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Info().Str("path", path).Msg("No private key file at path, creating...")
|
||||
log.Info().Str("path", path).Msg("no private key file at path, creating...")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to convert private key to string for saving: %w",
|
||||
"converting private key to string for saving: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk at path %q: %w",
|
||||
"saving private key to disk at path %q: %w",
|
||||
path,
|
||||
err,
|
||||
)
|
||||
@@ -978,14 +1039,14 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
|
||||
return &machineKey, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
return nil, fmt.Errorf("reading private key file: %w", err)
|
||||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { //nolint:noinlineerr
|
||||
return nil, fmt.Errorf("parsing private key: %w", err)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
@@ -994,6 +1055,31 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
// Change is used to send changes to nodes.
|
||||
// All change should be enqueued here and empty will be automatically
|
||||
// ignored.
|
||||
func (h *Headscale) Change(cs ...change.ChangeSet) {
|
||||
func (h *Headscale) Change(cs ...change.Change) {
|
||||
h.mapBatcher.AddWork(cs...)
|
||||
}
|
||||
|
||||
// Provide some middleware that can inspect the ACME/autocert https calls
|
||||
// and log when things are failing.
|
||||
type acmeLogger struct {
|
||||
rt http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip will log when ACME/autocert failures happen either when err != nil OR
|
||||
// when http status codes indicate a failure has occurred.
|
||||
func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := l.rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("url", req.URL.String()).Msg("acme request failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("acme request returned error")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
24
hscontrol/assets/assets.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Package assets provides embedded static assets for Headscale.
|
||||
// All static files (favicon, CSS, SVG) are embedded here for
|
||||
// centralized asset management.
|
||||
package assets
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
// Favicon is the embedded favicon.png file served at /favicon.ico
|
||||
//
|
||||
//go:embed favicon.png
|
||||
var Favicon []byte
|
||||
|
||||
// CSS is the embedded style.css stylesheet used in HTML templates.
|
||||
// Contains Material for MkDocs design system styles.
|
||||
//
|
||||
//go:embed style.css
|
||||
var CSS string
|
||||
|
||||
// SVG is the embedded headscale.svg logo used in HTML templates.
|
||||
//
|
||||
//go:embed headscale.svg
|
||||
var SVG string
|
||||
BIN
hscontrol/assets/favicon.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
1
hscontrol/assets/headscale.svg
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
@@ -1,307 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Headscale Authentication Succeeded</title>
|
||||
<style>
|
||||
body {
|
||||
font-size: 14px;
|
||||
font-family:
|
||||
system-ui,
|
||||
-apple-system,
|
||||
BlinkMacSystemFont,
|
||||
"Segoe UI",
|
||||
"Roboto",
|
||||
"Oxygen",
|
||||
"Ubuntu",
|
||||
"Cantarell",
|
||||
"Fira Sans",
|
||||
"Droid Sans",
|
||||
"Helvetica Neue",
|
||||
sans-serif;
|
||||
}
|
||||
|
||||
hr {
|
||||
border-color: #fdfdfe;
|
||||
margin: 24px 0;
|
||||
}
|
||||
|
||||
.container {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
height: 70vh;
|
||||
}
|
||||
|
||||
#logo {
|
||||
display: block;
|
||||
margin-left: -20px;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
|
||||
.message {
|
||||
display: flex;
|
||||
min-width: 40vw;
|
||||
background: #fafdfa;
|
||||
border: 1px solid #c6e9c9;
|
||||
margin-bottom: 12px;
|
||||
padding: 12px 16px 16px 12px;
|
||||
position: relative;
|
||||
border-radius: 2px;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.message-content {
|
||||
margin-left: 4px;
|
||||
}
|
||||
|
||||
.message #checkbox {
|
||||
fill: #2eb039;
|
||||
}
|
||||
|
||||
.message .message-title {
|
||||
color: #1e7125;
|
||||
font-size: 16px;
|
||||
font-weight: 700;
|
||||
line-height: 1.25;
|
||||
}
|
||||
|
||||
.message .message-body {
|
||||
border: 0;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.message p {
|
||||
font-size: 12px;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
color: #17421b;
|
||||
}
|
||||
|
||||
a {
|
||||
display: block;
|
||||
margin: 8px 0;
|
||||
color: #1563ff;
|
||||
text-decoration: none;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: black;
|
||||
}
|
||||
|
||||
a svg {
|
||||
fill: currentcolor;
|
||||
}
|
||||
|
||||
.icon {
|
||||
align-items: center;
|
||||
display: inline-flex;
|
||||
justify-content: center;
|
||||
height: 21px;
|
||||
width: 21px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 17.5px;
|
||||
font-weight: 700;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
h1 + p {
|
||||
margin: 8px 0 16px 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body translate="no">
|
||||
<div class="container">
|
||||
<div>
|
||||
<svg
|
||||
id="logo"
|
||||
width="146"
|
||||
height="51"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xml:space="preserve"
|
||||
style="
|
||||
fill-rule: evenodd;
|
||||
clip-rule: evenodd;
|
||||
stroke-linejoin: round;
|
||||
stroke-miterlimit: 2;
|
||||
"
|
||||
viewBox="0 0 1280 640"
|
||||
>
|
||||
<path
|
||||
d="M.08 0v-.736h.068v.3C.203-.509.27-.545.347-.545c.029 0 .055.005.079.015.024.01.045.025.062.045.017.02.031.045.041.075.009.03.014.065.014.105V0H.475v-.289C.475-.352.464-.4.443-.433.422-.466.385-.483.334-.483c-.027 0-.052.006-.075.017C.236-.455.216-.439.2-.419c-.017.02-.029.044-.038.072-.009.028-.014.059-.014.093V0H.08Z"
|
||||
style="fill: #f8b5cb; fill-rule: nonzero"
|
||||
transform="translate(32.92220721 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.264c0-.036.007-.071.02-.105.013-.034.031-.064.055-.09.023-.026.052-.047.086-.063.033-.015.071-.023.112-.023.039 0 .076.007.109.021.033.014.062.033.087.058.025.025.044.054.058.088.014.035.021.072.021.113v.005H.121c.001.031.007.059.018.084.01.025.024.047.042.065.018.019.04.033.065.043.025.01.052.015.082.015.026 0 .049-.003.069-.01.02-.007.038-.016.054-.028C.466-.102.48-.115.492-.13c.011-.015.022-.03.032-.046l.057.03C.556-.097.522-.058.48-.03.437-.001.387.013.328.013.284.013.245.006.21-.01.175-.024.146-.045.123-.07.1-.095.082-.125.07-.159.057-.192.051-.227.051-.264ZM.128-.32h.396C.51-.375.485-.416.449-.441.412-.466.371-.479.325-.479c-.048 0-.089.013-.123.039-.034.026-.059.066-.074.12Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(177.16674681 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.267c0-.038.007-.074.021-.108.014-.033.033-.063.058-.088.025-.025.054-.045.087-.06.033-.015.069-.022.108-.022.043 0 .083.009.119.027.035.019.066.047.093.084v-.097h.067V0H.537v-.091C.508-.056.475-.029.44-.013.404.005.365.013.323.013.284.013.248.006.215-.01.182-.024.153-.045.129-.071.104-.096.085-.126.072-.16.058-.193.051-.229.051-.267Zm.279.218c.027 0 .054-.005.079-.015.025-.01.048-.024.068-.043.019-.018.035-.04.047-.067.012-.027.018-.056.018-.089 0-.031-.005-.059-.016-.086C.515-.375.501-.398.482-.417.462-.436.44-.452.415-.463.389-.474.361-.479.331-.479c-.031 0-.059.006-.084.017C.221-.45.199-.434.18-.415c-.019.02-.033.043-.043.068-.011.026-.016.053-.016.082 0 .029.005.056.016.082.011.026.025.049.044.069.019.02.041.036.066.047.025.012.053.018.083.018Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(327.76463481 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.267c0-.038.007-.074.021-.108.014-.033.033-.063.058-.088.025-.025.054-.045.087-.06.033-.015.069-.022.108-.022.043 0 .083.009.119.027.035.019.066.047.093.084v-.302h.068V0H.537v-.091C.508-.056.475-.029.44-.013.404.005.365.013.323.013.284.013.248.006.215-.01.182-.024.153-.045.129-.071.104-.096.085-.126.072-.16.058-.193.051-.229.051-.267Zm.279.218c.027 0 .054-.005.079-.015.025-.01.048-.024.068-.043.019-.018.035-.04.047-.067.011-.027.017-.056.017-.089 0-.031-.005-.059-.016-.086C.514-.375.5-.398.481-.417.462-.436.439-.452.414-.463.389-.474.361-.479.331-.479c-.031 0-.059.006-.084.017C.221-.45.199-.434.18-.415c-.019.02-.033.043-.043.068-.011.026-.016.053-.016.082 0 .029.005.056.016.082.011.026.025.049.044.069.019.02.041.036.066.047.025.012.053.018.083.018Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(488.71612761 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="m.034-.062.043-.049c.017.019.035.034.054.044.018.01.037.015.057.015.013 0 .026-.002.038-.007.011-.004.021-.01.031-.018.009-.008.016-.017.021-.028.005-.011.008-.022.008-.035 0-.019-.005-.034-.014-.047C.263-.199.248-.21.229-.221.205-.234.183-.247.162-.259.14-.271.122-.284.107-.298.092-.311.08-.327.071-.344.062-.361.058-.381.058-.404c0-.021.004-.04.012-.058.007-.016.018-.031.031-.044.013-.013.028-.022.046-.029.018-.007.037-.01.057-.01.029 0 .056.006.079.019s.045.031.068.053l-.044.045C.291-.443.275-.456.258-.465.241-.474.221-.479.2-.479c-.022 0-.041.007-.056.02C.128-.445.12-.428.12-.408c0 .019.006.035.017.048.011.013.027.026.048.037.027.015.05.028.071.04.021.013.038.026.052.039.014.013.025.028.032.044.007.016.011.035.011.057 0 .021-.004.041-.011.059-.008.019-.019.036-.033.05-.014.015-.031.026-.05.035C.237.01.215.014.191.014c-.03 0-.059-.006-.086-.02C.077-.019.053-.037.034-.062Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(649.90292961 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.266c0-.04.007-.077.022-.111.014-.034.034-.063.059-.089.025-.025.054-.044.089-.058.035-.014.072-.021.113-.021.051 0 .098.01.139.03.041.021.075.049.1.085l-.05.043C.498-.418.47-.441.439-.456.408-.471.372-.479.331-.479c-.03 0-.058.005-.083.016C.222-.452.2-.436.181-.418.162-.399.148-.376.137-.35c-.011.026-.016.054-.016.084 0 .031.005.06.016.086.011.027.025.049.044.068.019.019.041.034.067.044.025.011.053.016.084.016.077 0 .141-.03.191-.09l.051.04c-.028.036-.062.064-.103.085C.43.004.384.014.332.014.291.014.254.007.219-.008.184-.022.155-.042.13-.067.105-.092.086-.121.072-.156.058-.19.051-.227.051-.266Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(741.20289921 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.267c0-.038.007-.074.021-.108.014-.033.033-.063.058-.088.025-.025.054-.045.087-.06.033-.015.069-.022.108-.022.043 0 .083.009.119.027.035.019.066.047.093.084v-.097h.067V0H.537v-.091C.508-.056.475-.029.44-.013.404.005.365.013.323.013.284.013.248.006.215-.01.182-.024.153-.045.129-.071.104-.096.085-.126.072-.16.058-.193.051-.229.051-.267Zm.279.218c.027 0 .054-.005.079-.015.025-.01.048-.024.068-.043.019-.018.035-.04.047-.067.012-.027.018-.056.018-.089 0-.031-.005-.059-.016-.086C.515-.375.501-.398.482-.417.462-.436.44-.452.415-.463.389-.474.361-.479.331-.479c-.031 0-.059.006-.084.017C.221-.45.199-.434.18-.415c-.019.02-.033.043-.043.068-.011.026-.016.053-.016.082 0 .029.005.056.016.082.011.026.025.049.044.069.019.02.041.036.066.047.025.012.053.018.083.018Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(884.27089281 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.066-.736h.068V0H.066z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(1045.22238561 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<path
|
||||
d="M.051-.264c0-.036.007-.071.02-.105.013-.034.031-.064.055-.09.023-.026.052-.047.086-.063.033-.015.071-.023.112-.023.039 0 .076.007.109.021.033.014.062.033.087.058.025.025.044.054.058.088.014.035.021.072.021.113v.005H.121c.001.031.007.059.018.084.01.025.024.047.042.065.018.019.04.033.065.043.025.01.052.015.082.015.026 0 .049-.003.069-.01.02-.007.038-.016.054-.028C.466-.102.48-.115.492-.13c.011-.015.022-.03.032-.046l.057.03C.556-.097.522-.058.48-.03.437-.001.387.013.328.013.284.013.245.006.21-.01.175-.024.146-.045.123-.07.1-.095.082-.125.07-.159.057-.192.051-.227.051-.264ZM.128-.32h.396C.51-.375.485-.416.449-.441.412-.466.371-.479.325-.479c-.048 0-.089.013-.123.039-.034.026-.059.066-.074.12Z"
|
||||
style="fill: #8d8d8d; fill-rule: nonzero"
|
||||
transform="translate(1092.28422561 521.8022953) scale(235.3092)"
|
||||
/>
|
||||
<circle
|
||||
cx="141.023"
|
||||
cy="338.36"
|
||||
r="117.472"
|
||||
style="fill: #f8b5cb"
|
||||
transform="matrix(.581302 0 0 .58613 40.06479894 12.59842153)"
|
||||
/>
|
||||
<circle
|
||||
cx="352.014"
|
||||
cy="268.302"
|
||||
r="33.095"
|
||||
style="fill: #a2a2a2"
|
||||
transform="matrix(.59308 0 0 .58289 32.39345942 21.2386)"
|
||||
/>
|
||||
<circle
|
||||
cx="352.014"
|
||||
cy="268.302"
|
||||
r="33.095"
|
||||
style="fill: #a2a2a2"
|
||||
transform="matrix(.59308 0 0 .58289 32.39345942 88.80371146)"
|
||||
/>
|
||||
<circle
|
||||
cx="352.014"
|
||||
cy="268.302"
|
||||
r="33.095"
|
||||
style="fill: #a2a2a2"
|
||||
transform="matrix(.59308 0 0 .58289 120.7528627 88.80371146)"
|
||||
/>
|
||||
<circle
|
||||
cx="352.014"
|
||||
cy="268.302"
|
||||
r="33.095"
|
||||
style="fill: #a2a2a2"
|
||||
transform="matrix(.59308 0 0 .58289 120.99825939 21.2386)"
|
||||
/>
|
||||
<circle
|
||||
cx="805.557"
|
||||
cy="336.915"
|
||||
r="118.199"
|
||||
style="fill: #8d8d8d"
|
||||
transform="matrix(.5782 0 0 .58289 36.19871106 15.26642564)"
|
||||
/>
|
||||
<circle
|
||||
cx="805.557"
|
||||
cy="336.915"
|
||||
r="118.199"
|
||||
style="fill: #8d8d8d"
|
||||
transform="matrix(.5782 0 0 .58289 183.24041937 15.26642564)"
|
||||
/>
|
||||
<path
|
||||
d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z"
|
||||
style="fill: #303030"
|
||||
transform="translate(34.2345 21.2386) scale(.58289)"
|
||||
/>
|
||||
<path
|
||||
d="M680.282 124.808h-68.093v390.325h68.081v-28.23H640V153.228h40.282v-28.42Z"
|
||||
style="fill: #303030"
|
||||
transform="matrix(-.58289 0 0 .58289 1116.7719791 21.2386)"
|
||||
/>
|
||||
</svg>
|
||||
<div class="message is-success">
|
||||
<svg
|
||||
id="checkbox"
|
||||
aria-hidden="true"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="20"
|
||||
height="20"
|
||||
viewBox="0 0 512 512"
|
||||
>
|
||||
<path
|
||||
d="M256 32C132.3 32 32 132.3 32 256s100.3 224 224 224 224-100.3 224-224S379.7 32 256 32zm114.9 149.1L231.8 359.6c-1.1 1.1-2.9 3.5-5.1 3.5-2.3 0-3.8-1.6-5.1-2.9-1.3-1.3-78.9-75.9-78.9-75.9l-1.5-1.5c-.6-.9-1.1-2-1.1-3.2 0-1.2.5-2.3 1.1-3.2.4-.4.7-.7 1.1-1.2 7.7-8.1 23.3-24.5 24.3-25.5 1.3-1.3 2.4-3 4.8-3 2.5 0 4.1 2.1 5.3 3.3 1.2 1.2 45 43.3 45 43.3l111.3-143c1-.8 2.2-1.4 3.5-1.4 1.3 0 2.5.5 3.5 1.3l30.6 24.1c.8 1 1.3 2.2 1.3 3.5.1 1.3-.4 2.4-1 3.3z"
|
||||
></path>
|
||||
</svg>
|
||||
<div class="message-content">
|
||||
<div class="message-title">Signed in via your OIDC provider</div>
|
||||
<p class="message-body">
|
||||
{{.Verb}} as {{.User}}, you can now close this window.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<hr />
|
||||
<h1>Not sure how to get started?</h1>
|
||||
<p class="learn">
|
||||
Check out beginner and advanced guides on, or read more in the
|
||||
documentation.
|
||||
</p>
|
||||
<a
|
||||
href="https://github.com/juanfont/headscale/tree/main/docs"
|
||||
rel="noreferrer noopener"
|
||||
target="_blank"
|
||||
>
|
||||
<span class="icon">
|
||||
<svg
|
||||
width="16"
|
||||
height="16"
|
||||
viewBox="0 0 16 16"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M13.307 1H11.5a.5.5 0 1 1 0-1h3a.499.499 0 0 1 .5.65V3.5a.5.5 0 1 1-1 0V1.72l-1.793 1.774a.5.5 0 0 1-.713-.701L13.307 1zM12 14V8a.5.5 0 1 1 1 0v6.5a.5.5 0 0 1-.5.5H.563a.5.5 0 0 1-.5-.5v-13a.5.5 0 0 1 .5-.5H8a.5.5 0 0 1 0 1H1v12h11zM4 6a.5.5 0 0 1 0-1h3a.5.5 0 0 1 0 1H4zm0 2.5a.5.5 0 0 1 0-1h5a.5.5 0 0 1 0 1H4zM4 11a.5.5 0 1 1 0-1h5a.5.5 0 1 1 0 1H4z"
|
||||
/>
|
||||
</svg>
|
||||
</span>
|
||||
View the headscale documentation
|
||||
</a>
|
||||
<a
|
||||
href="https://tailscale.com/kb/"
|
||||
rel="noreferrer noopener"
|
||||
target="_blank"
|
||||
>
|
||||
<span class="icon">
|
||||
<svg
|
||||
width="16"
|
||||
height="16"
|
||||
viewBox="0 0 16 16"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M13.307 1H11.5a.5.5 0 1 1 0-1h3a.499.499 0 0 1 .5.65V3.5a.5.5 0 1 1-1 0V1.72l-1.793 1.774a.5.5 0 0 1-.713-.701L13.307 1zM12 14V8a.5.5 0 1 1 1 0v6.5a.5.5 0 0 1-.5.5H.563a.5.5 0 0 1-.5-.5v-13a.5.5 0 0 1 .5-.5H8a.5.5 0 0 1 0 1H1v12h11zM4 6a.5.5 0 0 1 0-1h3a.5.5 0 0 1 0 1H4zm0 2.5a.5.5 0 0 1 0-1h5a.5.5 0 0 1 0 1H4zM4 11a.5.5 0 1 1 0-1h5a.5.5 0 1 1 0 1H4z"
|
||||
/>
|
||||
</svg>
|
||||
</span>
|
||||
View the tailscale documentation
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
143
hscontrol/assets/style.css
Normal file
@@ -0,0 +1,143 @@
|
||||
/* CSS Variables from Material for MkDocs */
|
||||
:root {
|
||||
--md-default-fg-color: rgba(0, 0, 0, 0.87);
|
||||
--md-default-fg-color--light: rgba(0, 0, 0, 0.54);
|
||||
--md-default-fg-color--lighter: rgba(0, 0, 0, 0.32);
|
||||
--md-default-fg-color--lightest: rgba(0, 0, 0, 0.07);
|
||||
--md-code-fg-color: #36464e;
|
||||
--md-code-bg-color: #f5f5f5;
|
||||
--md-primary-fg-color: #4051b5;
|
||||
--md-accent-fg-color: #526cfe;
|
||||
--md-typeset-a-color: var(--md-primary-fg-color);
|
||||
--md-text-font: "Roboto", -apple-system, BlinkMacSystemFont, "Segoe UI", "Helvetica Neue", Arial, sans-serif;
|
||||
--md-code-font: "Roboto Mono", "SF Mono", Monaco, "Cascadia Code", Consolas, "Courier New", monospace;
|
||||
}
|
||||
|
||||
/* Base Typography */
|
||||
.md-typeset {
|
||||
font-size: 0.8rem;
|
||||
line-height: 1.6;
|
||||
color: var(--md-default-fg-color);
|
||||
font-family: var(--md-text-font);
|
||||
overflow-wrap: break-word;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
/* Headings */
|
||||
.md-typeset h1 {
|
||||
color: var(--md-default-fg-color--light);
|
||||
font-size: 2em;
|
||||
line-height: 1.3;
|
||||
margin: 0 0 1.25em;
|
||||
font-weight: 300;
|
||||
letter-spacing: -0.01em;
|
||||
}
|
||||
|
||||
.md-typeset h1:not(:first-child) {
|
||||
margin-top: 2em;
|
||||
}
|
||||
|
||||
.md-typeset h2 {
|
||||
font-size: 1.5625em;
|
||||
line-height: 1.4;
|
||||
margin: 2.4em 0 0.64em;
|
||||
font-weight: 300;
|
||||
letter-spacing: -0.01em;
|
||||
color: var(--md-default-fg-color--light);
|
||||
}
|
||||
|
||||
.md-typeset h3 {
|
||||
font-size: 1.25em;
|
||||
line-height: 1.5;
|
||||
margin: 2em 0 0.8em;
|
||||
font-weight: 400;
|
||||
letter-spacing: -0.01em;
|
||||
color: var(--md-default-fg-color--light);
|
||||
}
|
||||
|
||||
/* Paragraphs and block elements */
|
||||
.md-typeset p {
|
||||
margin: 1em 0;
|
||||
}
|
||||
|
||||
.md-typeset blockquote,
|
||||
.md-typeset dl,
|
||||
.md-typeset figure,
|
||||
.md-typeset ol,
|
||||
.md-typeset pre,
|
||||
.md-typeset ul {
|
||||
margin-bottom: 1em;
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
/* Lists */
|
||||
.md-typeset ol,
|
||||
.md-typeset ul {
|
||||
padding-left: 2em;
|
||||
}
|
||||
|
||||
/* Links */
|
||||
.md-typeset a {
|
||||
color: var(--md-typeset-a-color);
|
||||
text-decoration: none;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.md-typeset a:hover,
|
||||
.md-typeset a:focus {
|
||||
color: var(--md-accent-fg-color);
|
||||
}
|
||||
|
||||
/* Code (inline) */
|
||||
.md-typeset code {
|
||||
background-color: var(--md-code-bg-color);
|
||||
color: var(--md-code-fg-color);
|
||||
border-radius: 0.1rem;
|
||||
font-size: 0.85em;
|
||||
font-family: var(--md-code-font);
|
||||
padding: 0 0.2941176471em;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
/* Code blocks (pre) */
|
||||
.md-typeset pre {
|
||||
display: block;
|
||||
line-height: 1.4;
|
||||
margin: 1em 0;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
.md-typeset pre > code {
|
||||
background-color: var(--md-code-bg-color);
|
||||
color: var(--md-code-fg-color);
|
||||
display: block;
|
||||
padding: 0.7720588235em 1.1764705882em;
|
||||
font-family: var(--md-code-font);
|
||||
font-size: 0.85em;
|
||||
line-height: 1.4;
|
||||
overflow-wrap: break-word;
|
||||
word-wrap: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
/* Links in code */
|
||||
.md-typeset a code {
|
||||
color: currentcolor;
|
||||
}
|
||||
|
||||
/* Logo */
|
||||
.headscale-logo {
|
||||
display: block;
|
||||
width: 400px;
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
margin: 0 0 3rem 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.headscale-logo {
|
||||
width: 200px;
|
||||
margin-left: 0;
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -21,8 +20,8 @@ import (
|
||||
)
|
||||
|
||||
type AuthProvider interface {
|
||||
RegisterHandler(http.ResponseWriter, *http.Request)
|
||||
AuthURL(types.RegistrationID) string
|
||||
RegisterHandler(w http.ResponseWriter, r *http.Request)
|
||||
AuthURL(regID types.RegistrationID) string
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegister(
|
||||
@@ -43,8 +42,7 @@ func (h *Headscale) handleRegister(
|
||||
// This is a logout attempt (expiry in the past)
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Msg("Found existing node for logout, calling handleLogout")
|
||||
@@ -53,6 +51,7 @@ func (h *Headscale) handleRegister(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling logout: %w", err)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
@@ -134,7 +133,7 @@ func (h *Headscale) handleRegister(
|
||||
}
|
||||
|
||||
// handleLogout checks if the [tailcfg.RegisterRequest] is a
|
||||
// logout attempt from a node. If the node is not attempting to
|
||||
// logout attempt from a node. If the node is not attempting to.
|
||||
func (h *Headscale) handleLogout(
|
||||
node types.NodeView,
|
||||
req tailcfg.RegisterRequest,
|
||||
@@ -156,11 +155,12 @@ func (h *Headscale) handleLogout(
|
||||
// force the client to re-authenticate.
|
||||
// TODO(kradalby): I wonder if this is a path we ever hit?
|
||||
if node.IsExpired() {
|
||||
log.Trace().Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
log.Trace().
|
||||
EmbedObject(node).
|
||||
Interface("reg.req", req).
|
||||
Bool("unexpected", true).
|
||||
Msg("Node key expired, forcing re-authentication")
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
@@ -183,8 +183,7 @@ func (h *Headscale) handleLogout(
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Time("req.expiry", req.Expiry).
|
||||
@@ -192,8 +191,7 @@ func (h *Headscale) handleLogout(
|
||||
|
||||
if node.IsEphemeral() {
|
||||
log.Info().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Deleting ephemeral node during logout")
|
||||
|
||||
c, err := h.state.DeleteNode(node)
|
||||
@@ -210,8 +208,7 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
EmbedObject(node).
|
||||
Msg("Node is not ephemeral, setting expiry instead of deleting")
|
||||
}
|
||||
|
||||
@@ -234,11 +231,7 @@ func isAuthKey(req tailcfg.RegisterRequest) bool {
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
|
||||
return &tailcfg.RegisterResponse{
|
||||
// TODO(kradalby): Only send for user-owned nodes
|
||||
// and not tagged nodes when tags is working.
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
|
||||
// Headscale does not implement the concept of machine authorization
|
||||
@@ -246,6 +239,18 @@ func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
|
||||
// Revisit this if #2176 gets implemented.
|
||||
MachineAuthorized: true,
|
||||
}
|
||||
|
||||
// For tagged nodes, use the TaggedDevices special user
|
||||
// For user-owned nodes, include User and Login information from the actual user
|
||||
if node.IsTagged() {
|
||||
resp.User = types.TaggedDevices.View().TailscaleUser()
|
||||
resp.Login = types.TaggedDevices.View().TailscaleLogin()
|
||||
} else if node.Owner().Valid() {
|
||||
resp.User = node.Owner().TailscaleUser()
|
||||
resp.Login = node.Owner().TailscaleLogin()
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (h *Headscale) waitForFollowup(
|
||||
@@ -272,6 +277,7 @@ func (h *Headscale) waitForFollowup(
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node.View()), nil
|
||||
}
|
||||
}
|
||||
@@ -317,7 +323,7 @@ func (h *Headscale) reqToNewRegisterResponse(
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
log.Info().Msgf("New followup node registration using key: %s", newRegID)
|
||||
log.Info().Msgf("new followup node registration using key: %s", newRegID)
|
||||
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
@@ -337,6 +343,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
|
||||
}
|
||||
|
||||
var perr types.PAKError
|
||||
if errors.As(err, &perr) {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
|
||||
@@ -348,7 +355,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// If node is not valid, it means an ephemeral node was deleted during logout
|
||||
if !node.Valid() {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // intentional: no node to return when ephemeral deleted
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
@@ -364,16 +371,13 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// eventbus.
|
||||
// TODO(kradalby): This needs to be ran as part of the batcher maybe?
|
||||
// now since we dont update the node/pol here anymore
|
||||
routeChange := h.state.AutoApproveRoutes(node)
|
||||
|
||||
if _, _, err := h.state.SaveNode(node); err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
routesChange, err := h.state.AutoApproveRoutes(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto approving routes: %w", err)
|
||||
}
|
||||
|
||||
if routeChange && changed.Empty() {
|
||||
changed = change.NodeAdded(node.ID())
|
||||
}
|
||||
h.Change(changed)
|
||||
// Send both changes. Empty changes are ignored by Change().
|
||||
h.Change(changed, routesChange)
|
||||
|
||||
// TODO(kradalby): I think this is covered above, but we need to validate that.
|
||||
// // If policy changed due to node registration, send a separate policy change
|
||||
@@ -385,16 +389,15 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
User: node.Owner().TailscaleUser(),
|
||||
Login: node.Owner().TailscaleLogin(),
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Interface("reg.resp", resp).
|
||||
Interface("reg.req", req).
|
||||
Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
EmbedObject(node).
|
||||
Msg("RegisterResponse")
|
||||
|
||||
return resp, nil
|
||||
@@ -431,6 +434,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with empty hostname, generated default")
|
||||
}
|
||||
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
@@ -452,7 +456,7 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
nodeToRegister,
|
||||
)
|
||||
|
||||
log.Info().Msgf("Starting node registration using key: %s", registrationId)
|
||||
log.Info().Msgf("starting node registration using key: %s", registrationId)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(registrationId),
|
||||
|
||||