mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-13 19:27:40 +01:00
Compare commits
96 Commits
v0.28.0-be
...
v0.28.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97fa117c48 | ||
|
|
b5329ff0f3 | ||
|
|
eac8a57bce | ||
|
|
44af046196 | ||
|
|
4a744f423b | ||
|
|
ca75e096e6 | ||
|
|
ce7c256d1e | ||
|
|
4912ceaaf5 | ||
|
|
d7f7f2c85e | ||
|
|
df184e5276 | ||
|
|
0630fd32e5 | ||
|
|
306aabbbce | ||
|
|
a09b0d1d69 | ||
|
|
362696a5ef | ||
|
|
1f32c8bf61 | ||
|
|
fb137a8fe3 | ||
|
|
c2f28efbd7 | ||
|
|
11f0d4cfdd | ||
|
|
5d300273dc | ||
|
|
7f003ecaff | ||
|
|
2695d1527e | ||
|
|
d32f6707f7 | ||
|
|
89e436f0e6 | ||
|
|
46daa659e2 | ||
|
|
49b70db7f2 | ||
|
|
04b4071888 | ||
|
|
ee127edbf7 | ||
|
|
606e5f68a0 | ||
|
|
a04b21abc6 | ||
|
|
92caadcee6 | ||
|
|
aa29fd95a3 | ||
|
|
0565e01c2f | ||
|
|
aee1d2a640 | ||
|
|
ee303186b3 | ||
|
|
e9a94f00a9 | ||
|
|
d40203e153 | ||
|
|
5688c201e9 | ||
|
|
4e1834adaf | ||
|
|
22afb2c61b | ||
|
|
b3c4d0ec81 | ||
|
|
b82c9c9c0e | ||
|
|
e0bae9b769 | ||
|
|
a194712c34 | ||
|
|
8776745428 | ||
|
|
b01eda721c | ||
|
|
42bd9cd058 | ||
|
|
515a22e696 | ||
|
|
6654142fbe | ||
|
|
424e26d636 | ||
|
|
d9cbb96603 | ||
|
|
c1cfb59b91 | ||
|
|
4be13baf3f | ||
|
|
98c0817b95 | ||
|
|
951fd5a8e7 | ||
|
|
b8f3e09046 | ||
|
|
4ab06930a2 | ||
|
|
165c5f0491 | ||
|
|
c8c3c9d4a0 | ||
|
|
4dd1b49a35 | ||
|
|
db6882b5f5 | ||
|
|
1325fd8b27 | ||
|
|
8631581852 | ||
|
|
1398d01bd8 | ||
|
|
00da5361b3 | ||
|
|
740d2b5a2c | ||
|
|
3b4b9a4436 | ||
|
|
1b6db34b93 | ||
|
|
07a4b1b1fd | ||
|
|
2e180d2587 | ||
|
|
0451dd4718 | ||
|
|
a6696582a4 | ||
|
|
00f22a8443 | ||
|
|
1d9900273e | ||
|
|
18e13f6ffa | ||
|
|
a445278f76 | ||
|
|
8387c9cd82 | ||
|
|
25a7434830 | ||
|
|
183a38715c | ||
|
|
99d35fbbbc | ||
|
|
d50108c722 | ||
|
|
6d21a4a3fe | ||
|
|
7d81dca9aa | ||
|
|
3689f05407 | ||
|
|
bb30208f97 | ||
|
|
c3e2e57f8e | ||
|
|
e43f19df79 | ||
|
|
0516c0ec37 | ||
|
|
eec54cbbf3 | ||
|
|
72fcb93ef3 | ||
|
|
f5c779626a | ||
|
|
d227b3a135 | ||
|
|
0bcfdc29ad | ||
|
|
87c230d251 | ||
|
|
84c092a9f9 | ||
|
|
9146140217 | ||
|
|
5103b35f3c |
@@ -71,7 +71,7 @@ go run ./cmd/hi run "TestName" --timeout=60s
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` runs for 12 minutes
|
||||
|
||||
**CRITICAL**: Only ONE test can run at a time due to Docker port conflicts and resource constraints.
|
||||
**CONCURRENT EXECUTION**: Multiple tests CAN run simultaneously. Each test run gets a unique Run ID for isolation. See "Concurrent Execution and Run ID Isolation" section below.
|
||||
|
||||
## Test Artifacts and Log Analysis
|
||||
|
||||
@@ -98,6 +98,97 @@ When tests fail, examine artifacts in this specific order:
|
||||
4. **Client status dumps** (`*_status.json`): Network state and peer connectivity information
|
||||
5. **Database snapshots** (`.db` files): For data consistency and state persistence issues
|
||||
|
||||
## Concurrent Execution and Run ID Isolation
|
||||
|
||||
### Overview
|
||||
|
||||
The integration test system supports running multiple tests concurrently on the same Docker daemon. Each test run is isolated through a unique Run ID that ensures containers, networks, and cleanup operations don't interfere with each other.
|
||||
|
||||
### Run ID Format and Usage
|
||||
|
||||
Each test run generates a unique Run ID in the format: `YYYYMMDD-HHMMSS-{6-char-hash}`
|
||||
- Example: `20260109-104215-mdjtzx`
|
||||
|
||||
The Run ID is used for:
|
||||
- **Container naming**: `ts-{runIDShort}-{version}-{hash}` (e.g., `ts-mdjtzx-1-74-fgdyls`)
|
||||
- **Docker labels**: All containers get `hi.run-id={runID}` label
|
||||
- **Log directories**: `control_logs/{runID}/`
|
||||
- **Cleanup isolation**: Only containers with matching run ID are cleaned up
|
||||
|
||||
### Container Isolation Mechanisms
|
||||
|
||||
1. **Unique Container Names**: Each container includes the run ID for identification
|
||||
2. **Docker Labels**: `hi.run-id` and `hi.test-type` labels on all containers
|
||||
3. **Dynamic Port Allocation**: All ports use `{HostPort: "0"}` to let kernel assign free ports
|
||||
4. **Per-Run Networks**: Network names include scenario hash for isolation
|
||||
5. **Isolated Cleanup**: `killTestContainersByRunID()` only removes containers matching the run ID
|
||||
|
||||
### ⚠️ CRITICAL: Never Interfere with Other Test Runs
|
||||
|
||||
**FORBIDDEN OPERATIONS** when other tests may be running:
|
||||
|
||||
```bash
|
||||
# ❌ NEVER do global container cleanup while tests are running
|
||||
docker rm -f $(docker ps -q --filter "name=hs-")
|
||||
docker rm -f $(docker ps -q --filter "name=ts-")
|
||||
|
||||
# ❌ NEVER kill all test containers
|
||||
# This will destroy other agents' test sessions!
|
||||
|
||||
# ❌ NEVER prune all Docker resources during active tests
|
||||
docker system prune -f # Only safe when NO tests are running
|
||||
```
|
||||
|
||||
**SAFE OPERATIONS**:
|
||||
|
||||
```bash
|
||||
# ✅ Clean up only YOUR test run's containers (by run ID)
|
||||
# The test runner does this automatically via cleanup functions
|
||||
|
||||
# ✅ Clean stale (stopped/exited) containers only
|
||||
# Pre-test cleanup only removes stopped containers, not running ones
|
||||
|
||||
# ✅ Check what's running before cleanup
|
||||
docker ps --filter "name=headscale-test-suite" --format "{{.Names}}"
|
||||
```
|
||||
|
||||
### Running Concurrent Tests
|
||||
|
||||
```bash
|
||||
# Start multiple tests in parallel - each gets unique run ID
|
||||
go run ./cmd/hi run "TestPingAllByIP" &
|
||||
go run ./cmd/hi run "TestACLAllowUserDst" &
|
||||
go run ./cmd/hi run "TestOIDCAuthenticationPingAll" &
|
||||
|
||||
# Monitor running test suites
|
||||
docker ps --filter "name=headscale-test-suite" --format "table {{.Names}}\t{{.Status}}"
|
||||
```
|
||||
|
||||
### Agent Session Isolation Rules
|
||||
|
||||
When working as an agent:
|
||||
|
||||
1. **Your run ID is unique**: Each test you start gets its own run ID
|
||||
2. **Never clean up globally**: Only use run ID-specific cleanup
|
||||
3. **Check before cleanup**: Verify no other tests are running if you need to prune resources
|
||||
4. **Respect other sessions**: Other agents may have tests running concurrently
|
||||
5. **Log directories are isolated**: Your artifacts are in `control_logs/{your-run-id}/`
|
||||
|
||||
### Identifying Your Containers
|
||||
|
||||
Your test containers can be identified by:
|
||||
- The run ID in the container name
|
||||
- The `hi.run-id` Docker label
|
||||
- The test suite container: `headscale-test-suite-{your-run-id}`
|
||||
|
||||
```bash
|
||||
# List containers for a specific run ID
|
||||
docker ps --filter "label=hi.run-id=20260109-104215-mdjtzx"
|
||||
|
||||
# Get your run ID from the test output
|
||||
# Look for: "Run ID: 20260109-104215-mdjtzx"
|
||||
```
|
||||
|
||||
## Common Failure Patterns and Root Cause Analysis
|
||||
|
||||
### CRITICAL MINDSET: Code Issues vs Infrastructure Issues
|
||||
@@ -250,10 +341,10 @@ require.NotNil(t, targetNode, "should find expected node")
|
||||
- **Detection**: No progress in logs for >2 minutes during initialization
|
||||
- **Solution**: `docker system prune -f` and retry
|
||||
|
||||
3. **Docker Port Conflicts**: Multiple tests trying to use same ports
|
||||
- **Pattern**: "bind: address already in use" errors
|
||||
- **Detection**: Port binding failures in Docker logs
|
||||
- **Solution**: Only run ONE test at a time
|
||||
3. **Docker Resource Exhaustion**: Too many concurrent tests overwhelming system
|
||||
- **Pattern**: Container creation timeouts, OOM kills, slow test execution
|
||||
- **Detection**: System load high, Docker daemon slow to respond
|
||||
- **Solution**: Reduce number of concurrent tests, wait for completion before starting more
|
||||
|
||||
**CODE ISSUES (99% of failures)**:
|
||||
1. **Route Approval Process Failures**: Routes not getting approved when they should be
|
||||
@@ -273,12 +364,22 @@ require.NotNil(t, targetNode, "should find expected node")
|
||||
|
||||
### Critical Test Environment Setup
|
||||
|
||||
**Pre-Test Cleanup (MANDATORY)**:
|
||||
**Pre-Test Cleanup**:
|
||||
|
||||
The test runner automatically handles cleanup:
|
||||
- **Before test**: Removes only stale (stopped/exited) containers - does NOT affect running tests
|
||||
- **After test**: Removes only containers belonging to the specific run ID
|
||||
|
||||
```bash
|
||||
# ALWAYS run this before each test
|
||||
# Only clean old log directories if disk space is low
|
||||
rm -rf control_logs/202507*
|
||||
docker system prune -f
|
||||
df -h # Verify sufficient disk space
|
||||
|
||||
# SAFE: Clean only stale/stopped containers (does not affect running tests)
|
||||
# The test runner does this automatically via cleanupStaleTestContainers()
|
||||
|
||||
# ⚠️ DANGEROUS: Only use when NO tests are running
|
||||
docker system prune -f
|
||||
```
|
||||
|
||||
**Environment Verification**:
|
||||
@@ -286,8 +387,8 @@ df -h # Verify sufficient disk space
|
||||
# Verify system readiness
|
||||
go run ./cmd/hi doctor
|
||||
|
||||
# Check for running containers that might conflict
|
||||
docker ps
|
||||
# Check what tests are currently running (ALWAYS check before global cleanup)
|
||||
docker ps --filter "name=headscale-test-suite" --format "{{.Names}}"
|
||||
```
|
||||
|
||||
### Specific Test Categories and Known Issues
|
||||
@@ -756,8 +857,14 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
- **Why security focus**: Integration tests are the last line of defense against security regressions
|
||||
- **EventuallyWithT Usage**: Proper use prevents race conditions without weakening security assertions
|
||||
|
||||
6. **Concurrent Execution Awareness**: Respect run ID isolation and never interfere with other agents' test sessions. Each test run has a unique run ID - only clean up YOUR containers (by run ID label), never perform global cleanup while tests may be running.
|
||||
- **Why this matters**: Multiple agents/users may run tests concurrently on the same Docker daemon
|
||||
- **Key Rule**: NEVER use global container cleanup commands - the test runner handles cleanup automatically per run ID
|
||||
|
||||
**CRITICAL PRINCIPLE**: Test expectations are sacred contracts that define correct system behavior. When tests fail, fix the code to match the test, never change the test to match broken code. Only timing and observability improvements are allowed - business logic expectations are immutable.
|
||||
|
||||
**ISOLATION PRINCIPLE**: Each test run is isolated by its unique Run ID. Never interfere with other test sessions. The system handles cleanup automatically - manual global cleanup commands are forbidden when other tests may be running.
|
||||
|
||||
**EventuallyWithT PRINCIPLE**: Every external call to headscale server or tailscale client must be wrapped in EventuallyWithT. Follow the five key rules strictly: one external call per block, proper variable scoping, no nesting, use CollectT for assertions, and provide descriptive messages.
|
||||
|
||||
**Remember**: Test failures are usually code issues in Headscale that need to be fixed, not infrastructure problems to be ignored. Use the specific debugging workflows and failure patterns documented above to efficiently identify root causes. Infrastructure issues have very specific signatures - everything else is code-related.
|
||||
|
||||
11
.github/workflows/test-integration.yaml
vendored
11
.github/workflows/test-integration.yaml
vendored
@@ -137,6 +137,11 @@ jobs:
|
||||
- TestACLPolicyPropagationOverTime
|
||||
- TestACLTagPropagation
|
||||
- TestACLTagPropagationPortSpecific
|
||||
- TestACLGroupWithUnknownUser
|
||||
- TestACLGroupAfterUserDeletion
|
||||
- TestACLGroupDeletionExactReproduction
|
||||
- TestACLDynamicUnknownUserAddition
|
||||
- TestACLDynamicUnknownUserRemoval
|
||||
- TestAPIAuthenticationBypass
|
||||
- TestAPIAuthenticationBypassCurl
|
||||
- TestGRPCAuthenticationBypass
|
||||
@@ -165,6 +170,7 @@ jobs:
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
- TestPreAuthKeyCommandReusableEphemeral
|
||||
- TestPreAuthKeyCorrectUserLoggedInCommand
|
||||
- TestTaggedNodesCLIOutput
|
||||
- TestApiKeyCommand
|
||||
- TestNodeCommand
|
||||
- TestNodeExpireCommand
|
||||
@@ -236,7 +242,12 @@ jobs:
|
||||
- TestTagsAdminAPICannotSetNonExistentTag
|
||||
- TestTagsAdminAPICanSetUnownedTag
|
||||
- TestTagsAdminAPICannotRemoveAllTags
|
||||
- TestTagsIssue2978ReproTagReplacement
|
||||
- TestTagsAdminAPICannotSetInvalidFormat
|
||||
- TestTagsUserLoginReauthWithEmptyTagsRemovesAllTags
|
||||
- TestTagsAuthKeyWithoutUserInheritsTags
|
||||
- TestTagsAuthKeyWithoutUserRejectsAdvertisedTags
|
||||
- TestTagsAuthKeyConvertToUserViaCLIRegister
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
|
||||
20
AGENTS.md
20
AGENTS.md
@@ -405,13 +405,29 @@ go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Pattern matching for related tests
|
||||
go run ./cmd/hi run "TestPattern*"
|
||||
|
||||
# Run multiple tests concurrently (each gets isolated run ID)
|
||||
go run ./cmd/hi run "TestPingAllByIP" &
|
||||
go run ./cmd/hi run "TestACLAllowUserDst" &
|
||||
go run ./cmd/hi run "TestOIDCAuthenticationPingAll" &
|
||||
```
|
||||
|
||||
**Concurrent Execution Support**:
|
||||
|
||||
The test runner supports running multiple tests concurrently on the same Docker daemon:
|
||||
|
||||
- Each test run gets a **unique Run ID** (format: `YYYYMMDD-HHMMSS-{6-char-hash}`)
|
||||
- All containers are labeled with `hi.run-id` for isolation
|
||||
- Container names include the run ID for easy identification (e.g., `ts-{runID}-1-74-{hash}`)
|
||||
- Dynamic port allocation prevents port conflicts between concurrent runs
|
||||
- Cleanup only affects containers belonging to the specific run ID
|
||||
- Log directories are isolated per run: `control_logs/{runID}/`
|
||||
|
||||
**Critical Notes**:
|
||||
|
||||
- Only ONE test can run at a time (Docker port conflicts)
|
||||
- Tests generate ~100MB of logs per run in `control_logs/`
|
||||
- Clean environment before each test: `sudo rm -rf control_logs/202* && docker system prune -f`
|
||||
- Running many tests concurrently may cause resource contention (CPU/memory)
|
||||
- Clean stale containers periodically: `docker system prune -f`
|
||||
|
||||
### Test Artifacts Location
|
||||
|
||||
|
||||
102
CHANGELOG.md
102
CHANGELOG.md
@@ -1,6 +1,8 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.28.0 (202x-xx-xx)
|
||||
## 0.29.0 (202x-xx-xx)
|
||||
|
||||
## 0.28.0 (2026-02-04)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.74.0**
|
||||
|
||||
@@ -12,7 +14,11 @@ tags rather than users, making them suitable for servers and infrastructure. App
|
||||
ownership. See the [Tailscale tags documentation](https://tailscale.com/kb/1068/tags) for details on how tags work.
|
||||
|
||||
User-owned nodes can now request tags during registration using `--advertise-tags`. Tags are validated against the `tagOwners` policy
|
||||
and applied at registration time. Tags can be managed via the CLI or API after registration.
|
||||
and applied at registration time. Tags can be managed via the CLI or API after registration. Tagged nodes can return to user-owned
|
||||
by re-authenticating with `tailscale up --advertise-tags= --force-reauth`.
|
||||
|
||||
A one-time migration will validate and migrate any `RequestTags` (stored in hostinfo) to the tags column. Tags are validated against
|
||||
your policy's `tagOwners` rules during migration. [#3011](https://github.com/juanfont/headscale/pull/3011)
|
||||
|
||||
### Smarter map updates
|
||||
|
||||
@@ -38,7 +44,34 @@ sequentially through each stable release, selecting the latest patch version ava
|
||||
|
||||
### BREAKING
|
||||
|
||||
- **Tags**: The gRPC `SetTags` endpoint now allows converting user-owned nodes to tagged nodes by setting tags. Once a node is tagged, it cannot be converted back to a user-owned node. [#2885](https://github.com/juanfont/headscale/pull/2885)
|
||||
- **API**: The Node message in the gRPC/REST API has been simplified - the `ForcedTags`, `InvalidTags`, and `ValidTags` fields have been removed and replaced with a single `Tags` field that contains the node's applied tags [#2993](https://github.com/juanfont/headscale/pull/2993)
|
||||
- API clients should use the `Tags` field instead of `ValidTags`
|
||||
- The `headscale nodes list` CLI command now always shows a Tags column and the `--tags` flag has been removed
|
||||
- **PreAuthKey CLI**: Commands now use ID-based operations instead of user+key combinations [#2992](https://github.com/juanfont/headscale/pull/2992)
|
||||
- `headscale preauthkeys create` no longer requires `--user` flag (optional for tracking creation)
|
||||
- `headscale preauthkeys list` lists all keys (no longer filtered by user)
|
||||
- `headscale preauthkeys expire --id <ID>` replaces `--user <USER> <KEY>`
|
||||
- `headscale preauthkeys delete --id <ID>` replaces `--user <USER> <KEY>`
|
||||
|
||||
**Before:**
|
||||
|
||||
```bash
|
||||
headscale preauthkeys create --user 1 --reusable --tags tag:server
|
||||
headscale preauthkeys list --user 1
|
||||
headscale preauthkeys expire --user 1 <KEY>
|
||||
headscale preauthkeys delete --user 1 <KEY>
|
||||
```
|
||||
|
||||
**After:**
|
||||
|
||||
```bash
|
||||
headscale preauthkeys create --reusable --tags tag:server
|
||||
headscale preauthkeys list
|
||||
headscale preauthkeys expire --id 123
|
||||
headscale preauthkeys delete --id 123
|
||||
```
|
||||
|
||||
- **Tags**: The gRPC `SetTags` endpoint now allows converting user-owned nodes to tagged nodes by setting tags. [#2885](https://github.com/juanfont/headscale/pull/2885)
|
||||
- **Tags**: Tags are now resolved from the node's stored Tags field only [#2931](https://github.com/juanfont/headscale/pull/2931)
|
||||
- `--advertise-tags` is processed during registration, not on every policy evaluation
|
||||
- PreAuthKey tagged devices ignore `--advertise-tags` from clients
|
||||
@@ -51,15 +84,69 @@ sequentially through each stable release, selecting the latest patch version ava
|
||||
- Remove ability to move nodes between users [#2922](https://github.com/juanfont/headscale/pull/2922)
|
||||
- The `headscale nodes move` CLI command has been removed
|
||||
- The `MoveNode` API endpoint has been removed
|
||||
- Nodes are permanently associated with their user at registration time
|
||||
- Nodes are permanently associated with their user or tag at registration time
|
||||
- Add `oidc.email_verified_required` config option to control email verification requirement [#2860](https://github.com/juanfont/headscale/pull/2860)
|
||||
- When `true` (default), only verified emails can authenticate via OIDC in conjunction with `oidc.allowed_domains` or
|
||||
`oidc.allowed_users`. Previous versions allowed to authenticate with an unverified email but did not store the email
|
||||
address in the user profile. This is now rejected during authentication with an `unverified email` error.
|
||||
- When `false`, unverified emails are allowed for OIDC authentication and the email address is stored in the user
|
||||
profile regardless of its verification state.
|
||||
- **SSH Policy**: Wildcard (`*`) is no longer supported as an SSH destination [#3009](https://github.com/juanfont/headscale/issues/3009)
|
||||
- Use `autogroup:member` for user-owned devices
|
||||
- Use `autogroup:tagged` for tagged devices
|
||||
- Use specific tags (e.g., `tag:server`) for targeted access
|
||||
|
||||
**Before:**
|
||||
|
||||
```json
|
||||
{ "action": "accept", "src": ["group:admins"], "dst": ["*"], "users": ["root"] }
|
||||
```
|
||||
|
||||
**After:**
|
||||
|
||||
```json
|
||||
{ "action": "accept", "src": ["group:admins"], "dst": ["autogroup:member", "autogroup:tagged"], "users": ["root"] }
|
||||
```
|
||||
|
||||
- **SSH Policy**: SSH source/destination validation now enforces Tailscale's security model [#3010](https://github.com/juanfont/headscale/issues/3010)
|
||||
|
||||
Per [Tailscale SSH documentation](https://tailscale.com/kb/1193/tailscale-ssh), the following rules are now enforced:
|
||||
1. **Tags cannot SSH to user-owned devices**: SSH rules with `tag:*` or `autogroup:tagged` as source cannot have username destinations (e.g., `alice@`) or `autogroup:member`/`autogroup:self` as destination
|
||||
2. **Username destinations require same-user source**: If destination is a specific username (e.g., `alice@`), the source must be that exact same user only. Use `autogroup:self` for same-user SSH access instead
|
||||
|
||||
**Invalid policies now rejected at load time:**
|
||||
|
||||
```json
|
||||
// INVALID: tag source to user destination
|
||||
{"src": ["tag:server"], "dst": ["alice@"], ...}
|
||||
|
||||
// INVALID: autogroup:tagged to autogroup:member
|
||||
{"src": ["autogroup:tagged"], "dst": ["autogroup:member"], ...}
|
||||
|
||||
// INVALID: group to specific user (use autogroup:self instead)
|
||||
{"src": ["group:admins"], "dst": ["alice@"], ...}
|
||||
```
|
||||
|
||||
**Valid patterns:**
|
||||
|
||||
```json
|
||||
// Users/groups can SSH to their own devices via autogroup:self
|
||||
{"src": ["group:admins"], "dst": ["autogroup:self"], ...}
|
||||
|
||||
// Users/groups can SSH to tagged devices
|
||||
{"src": ["group:admins"], "dst": ["autogroup:tagged"], ...}
|
||||
|
||||
// Tagged devices can SSH to other tagged devices
|
||||
{"src": ["autogroup:tagged"], "dst": ["autogroup:tagged"], ...}
|
||||
|
||||
// Same user can SSH to their own devices
|
||||
{"src": ["alice@"], "dst": ["alice@"], ...}
|
||||
```
|
||||
|
||||
### Changes
|
||||
|
||||
- Smarter change notifications send partial map updates and node removals instead of full maps [#2961](https://github.com/juanfont/headscale/pull/2961)
|
||||
- Send lightweight endpoint and DERP region updates instead of full maps [#2856](https://github.com/juanfont/headscale/pull/2856)
|
||||
- Add `oidc.email_verified_required` config option to control email verification requirement [#2860](https://github.com/juanfont/headscale/pull/2860)
|
||||
- When `true` (default), only verified emails can authenticate via OIDC with `allowed_domains` or `allowed_users`
|
||||
- When `false`, unverified emails are allowed for OIDC authentication
|
||||
- Add NixOS module in repository for faster iteration [#2857](https://github.com/juanfont/headscale/pull/2857)
|
||||
- Add favicon to webpages [#2858](https://github.com/juanfont/headscale/pull/2858)
|
||||
- Redesign OIDC callback and registration web templates [#2832](https://github.com/juanfont/headscale/pull/2832)
|
||||
@@ -77,6 +164,7 @@ sequentially through each stable release, selecting the latest patch version ava
|
||||
- Fix autogroup:self preventing visibility of nodes matched by other ACL rules [#2882](https://github.com/juanfont/headscale/pull/2882)
|
||||
- Fix nodes being rejected after pre-authentication key expiration [#2917](https://github.com/juanfont/headscale/pull/2917)
|
||||
- Fix list-routes command respecting identifier filter with JSON output [#2927](https://github.com/juanfont/headscale/pull/2927)
|
||||
- Add `--id` flag to expire/delete commands as alternative to `--prefix` for API Keys [#3016](https://github.com/juanfont/headscale/pull/3016)
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
@@ -29,15 +28,11 @@ func init() {
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
if err := expireAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
expireAPIKeyCmd.Flags().Uint64P("id", "i", 0, "ApiKey ID")
|
||||
apiKeysCmd.AddCommand(expireAPIKeyCmd)
|
||||
|
||||
deleteAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
if err := deleteAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
deleteAPIKeyCmd.Flags().Uint64P("id", "i", 0, "ApiKey ID")
|
||||
apiKeysCmd.AddCommand(deleteAPIKeyCmd)
|
||||
}
|
||||
|
||||
@@ -154,11 +149,20 @@ var expireAPIKeyCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -167,8 +171,11 @@ var expireAPIKeyCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
request := &v1.ExpireApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
@@ -191,11 +198,20 @@ var deleteAPIKeyCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
prefix, _ := cmd.Flags().GetString("prefix")
|
||||
|
||||
switch {
|
||||
case id == 0 && prefix == "":
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
errMissingParameter,
|
||||
"Either --id or --prefix must be provided",
|
||||
output,
|
||||
)
|
||||
case id != 0 && prefix != "":
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Only one of --id or --prefix can be provided",
|
||||
output,
|
||||
)
|
||||
}
|
||||
@@ -204,8 +220,11 @@ var deleteAPIKeyCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeleteApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
request := &v1.DeleteApiKeyRequest{}
|
||||
if id != 0 {
|
||||
request.Id = id
|
||||
} else {
|
||||
request.Prefix = prefix
|
||||
}
|
||||
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
|
||||
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
|
||||
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
|
||||
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
|
||||
@@ -148,10 +146,6 @@ var listNodesCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
showTags, err := cmd.Flags().GetBool("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
@@ -174,7 +168,7 @@ var listNodesCmd = &cobra.Command{
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
tableData, err := nodesToPtables(user, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
}
|
||||
@@ -282,7 +276,8 @@ var expireNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
expiryTime := time.Now()
|
||||
now := time.Now()
|
||||
expiryTime := now
|
||||
if expiry != "" {
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
@@ -317,7 +312,11 @@ var expireNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
if now.Equal(expiryTime) || now.After(expiryTime) {
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
} else {
|
||||
SuccessOutput(response.GetNode(), "Node expiration updated", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -482,7 +481,6 @@ be assigned to nodes.`,
|
||||
|
||||
func nodesToPtables(
|
||||
currentUser string,
|
||||
showTags bool,
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
@@ -492,6 +490,7 @@ func nodesToPtables(
|
||||
"MachineKey",
|
||||
"NodeKey",
|
||||
"User",
|
||||
"Tags",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
@@ -499,13 +498,6 @@ func nodesToPtables(
|
||||
"Connected",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
tableHeader = append(tableHeader, []string{
|
||||
"ForcedTags",
|
||||
"InvalidTags",
|
||||
"ValidTags",
|
||||
}...)
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -560,28 +552,17 @@ func nodesToPtables(
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
forcedTags += "\n" + tag
|
||||
// TODO(kradalby): as part of CLI rework, we should add the posibility to show "unusable" tags as mentioned in
|
||||
// https://github.com/juanfont/headscale/issues/2981
|
||||
var tagsBuilder strings.Builder
|
||||
|
||||
for _, tag := range node.GetTags() {
|
||||
tagsBuilder.WriteString("\n" + tag)
|
||||
}
|
||||
|
||||
forcedTags = strings.TrimLeft(forcedTags, "\n")
|
||||
var invalidTags string
|
||||
for _, tag := range node.GetInvalidTags() {
|
||||
if !slices.Contains(node.GetForcedTags(), tag) {
|
||||
invalidTags += "\n" + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
tags := tagsBuilder.String()
|
||||
|
||||
invalidTags = strings.TrimLeft(invalidTags, "\n")
|
||||
var validTags string
|
||||
for _, tag := range node.GetValidTags() {
|
||||
if !slices.Contains(node.GetForcedTags(), tag) {
|
||||
validTags += "\n" + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
|
||||
validTags = strings.TrimLeft(validTags, "\n")
|
||||
tags = strings.TrimLeft(tags, "\n")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
@@ -608,6 +589,7 @@ func nodesToPtables(
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
user,
|
||||
tags,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
@@ -615,9 +597,6 @@ func nodesToPtables(
|
||||
online,
|
||||
expired,
|
||||
}
|
||||
if showTags {
|
||||
nodeData = append(nodeData, []string{forcedTags, invalidTags, validTags}...)
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
nodeData,
|
||||
|
||||
@@ -69,8 +69,7 @@ var getPolicy = &cobra.Command{
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -145,8 +144,7 @@ var setPolicy = &cobra.Command{
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
@@ -20,17 +19,6 @@ const (
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(preauthkeysCmd)
|
||||
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
|
||||
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
|
||||
pakNamespaceFlag.Deprecated = deprecateNamespaceMessage
|
||||
pakNamespaceFlag.Hidden = true
|
||||
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("user")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
@@ -43,6 +31,9 @@ func init() {
|
||||
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
StringSlice("tags", []string{}, "Tags to automatically assign to node")
|
||||
createPreAuthKeyCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
|
||||
expirePreAuthKeyCmd.PersistentFlags().Uint64P("id", "i", 0, "Authkey ID")
|
||||
deletePreAuthKeyCmd.PersistentFlags().Uint64P("id", "i", 0, "Authkey ID")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -53,25 +44,16 @@ var preauthkeysCmd = &cobra.Command{
|
||||
|
||||
var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this user",
|
||||
Short: "List all preauthkeys",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
response, err := client.ListPreAuthKeys(ctx, &v1.ListPreAuthKeysRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -95,7 +77,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
"Used",
|
||||
"Expiration",
|
||||
"Created",
|
||||
"Tags",
|
||||
"Owner",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
@@ -104,14 +86,15 @@ var listPreAuthKeys = &cobra.Command{
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "\n" + tag
|
||||
var owner string
|
||||
if len(key.GetAclTags()) > 0 {
|
||||
owner = strings.Join(key.GetAclTags(), "\n")
|
||||
} else if key.GetUser() != nil {
|
||||
owner = key.GetUser().GetName()
|
||||
} else {
|
||||
owner = "-"
|
||||
}
|
||||
|
||||
aclTags = strings.TrimLeft(aclTags, "\n")
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
key.GetKey(),
|
||||
@@ -120,7 +103,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
aclTags,
|
||||
owner,
|
||||
})
|
||||
|
||||
}
|
||||
@@ -137,16 +120,12 @@ var listPreAuthKeys = &cobra.Command{
|
||||
|
||||
var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified user",
|
||||
Short: "Creates a new preauthkey",
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
user, _ := cmd.Flags().GetUint64("user")
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
tags, _ := cmd.Flags().GetStringSlice("tags")
|
||||
@@ -171,10 +150,6 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().
|
||||
Dur("expiration", time.Duration(duration)).
|
||||
Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -195,21 +170,21 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire KEY",
|
||||
Use: "expire",
|
||||
Short: "Expire a preauthkey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -217,8 +192,7 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
@@ -235,21 +209,21 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var deletePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "delete KEY",
|
||||
Use: "delete",
|
||||
Short: "Delete a preauthkey",
|
||||
Aliases: []string{"del", "rm", "d"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
id, _ := cmd.Flags().GetUint64("id")
|
||||
|
||||
if id == 0 {
|
||||
ErrorOutput(
|
||||
errMissingParameter,
|
||||
"Error: missing --id parameter",
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -257,8 +231,7 @@ var deletePreAuthKeyCmd = &cobra.Command{
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeletePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
Id: id,
|
||||
}
|
||||
|
||||
response, err := client.DeletePreAuthKey(ctx, request)
|
||||
|
||||
@@ -9,34 +9,17 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/check.v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
func (s *Suite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
func TestConfigFileLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
cfgFile := filepath.Join(tmpDir, "config.yaml")
|
||||
|
||||
@@ -45,70 +28,54 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
cfgFile,
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = types.LoadConfig(cfgFile, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
assert.Equal(t, "http://127.0.0.1:8080", viper.GetString("server_url"))
|
||||
assert.Equal(t, "127.0.0.1:8080", viper.GetString("listen_addr"))
|
||||
assert.Equal(t, "127.0.0.1:9090", viper.GetString("metrics_listen_addr"))
|
||||
assert.Equal(t, "sqlite", viper.GetString("database.type"))
|
||||
assert.Equal(t, "/var/lib/headscale/db.sqlite", viper.GetString("database.sqlite.path"))
|
||||
assert.Empty(t, viper.GetString("tls_letsencrypt_hostname"))
|
||||
assert.Equal(t, ":http", viper.GetString("tls_letsencrypt_listen"))
|
||||
assert.Equal(t, "HTTP-01", viper.GetString("tls_letsencrypt_challenge_type"))
|
||||
assert.Equal(t, fs.FileMode(0o770), util.GetFileMode("unix_socket_permission"))
|
||||
assert.False(t, viper.GetBool("logtail.enabled"))
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
func TestConfigLoading(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false)
|
||||
assert.Equal(t, "http://127.0.0.1:8080", viper.GetString("server_url"))
|
||||
assert.Equal(t, "127.0.0.1:8080", viper.GetString("listen_addr"))
|
||||
assert.Equal(t, "127.0.0.1:9090", viper.GetString("metrics_listen_addr"))
|
||||
assert.Equal(t, "sqlite", viper.GetString("database.type"))
|
||||
assert.Equal(t, "/var/lib/headscale/db.sqlite", viper.GetString("database.sqlite.path"))
|
||||
assert.Empty(t, viper.GetString("tls_letsencrypt_hostname"))
|
||||
assert.Equal(t, ":http", viper.GetString("tls_letsencrypt_listen"))
|
||||
assert.Equal(t, "HTTP-01", viper.GetString("tls_letsencrypt_challenge_type"))
|
||||
assert.Equal(t, fs.FileMode(0o770), util.GetFileMode("unix_socket_permission"))
|
||||
assert.False(t, viper.GetBool("logtail.enabled"))
|
||||
assert.False(t, viper.GetBool("randomize_client_port"))
|
||||
}
|
||||
|
||||
@@ -18,9 +18,11 @@ import (
|
||||
)
|
||||
|
||||
// cleanupBeforeTest performs cleanup operations before running tests.
|
||||
// Only removes stale (stopped/exited) test containers to avoid interfering with concurrent test runs.
|
||||
func cleanupBeforeTest(ctx context.Context) error {
|
||||
if err := killTestContainers(ctx); err != nil {
|
||||
return fmt.Errorf("failed to kill test containers: %w", err)
|
||||
err := cleanupStaleTestContainers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean stale test containers: %w", err)
|
||||
}
|
||||
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
@@ -30,11 +32,25 @@ func cleanupBeforeTest(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupAfterTest removes the test container after completion.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
|
||||
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
// cleanupAfterTest removes the test container and all associated integration test containers for the run.
|
||||
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runID string) error {
|
||||
// Remove the main test container
|
||||
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove test container: %w", err)
|
||||
}
|
||||
|
||||
// Clean up integration test containers for this run only
|
||||
if runID != "" {
|
||||
err := killTestContainersByRunID(ctx, runID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clean up containers for run %s: %w", runID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
@@ -87,6 +103,100 @@ func killTestContainers(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// killTestContainersByRunID terminates and removes all test containers for a specific run ID.
|
||||
// This function filters containers by the hi.run-id label to only affect containers
|
||||
// belonging to the specified test run, leaving other concurrent test runs untouched.
|
||||
func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Filter containers by hi.run-id label
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("label", "hi.run-id="+runID),
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list containers for run %s: %w", runID, err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
// Kill the container if it's running
|
||||
if cont.State == "running" {
|
||||
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
|
||||
}
|
||||
|
||||
// Remove the container with retry logic
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d containers for run ID %s\n", removed, runID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupStaleTestContainers removes stopped/exited test containers without affecting running tests.
|
||||
// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs
|
||||
// without interfering with currently running concurrent tests.
|
||||
func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Only get stopped/exited containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("status", "exited"),
|
||||
filters.Arg("status", "dead"),
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list stopped containers: %w", err)
|
||||
}
|
||||
|
||||
removed := 0
|
||||
|
||||
for _, cont := range containers {
|
||||
// Only remove containers that look like test containers
|
||||
shouldRemove := false
|
||||
|
||||
for _, name := range cont.Names {
|
||||
if strings.Contains(name, "headscale-test-suite") ||
|
||||
strings.Contains(name, "hs-") ||
|
||||
strings.Contains(name, "ts-") ||
|
||||
strings.Contains(name, "derp-") {
|
||||
shouldRemove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
if removeContainerWithRetry(ctx, cli, cont.ID) {
|
||||
removed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
fmt.Printf("Removed %d stale test containers\n", removed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
containerRemoveInitialInterval = 100 * time.Millisecond
|
||||
containerRemoveMaxElapsedTime = 2 * time.Second
|
||||
|
||||
@@ -26,93 +26,8 @@ var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
|
||||
ErrNoDockerContext = errors.New("no docker context found")
|
||||
ErrAnotherRunInProgress = errors.New("another integration test run is already in progress")
|
||||
)
|
||||
|
||||
// RunningTestInfo contains information about a currently running integration test.
|
||||
type RunningTestInfo struct {
|
||||
RunID string
|
||||
ContainerID string
|
||||
ContainerName string
|
||||
StartTime time.Time
|
||||
Duration time.Duration
|
||||
TestPattern string
|
||||
}
|
||||
|
||||
// ErrNoRunningTests indicates that no integration test is currently running.
|
||||
var ErrNoRunningTests = errors.New("no running tests found")
|
||||
|
||||
// checkForRunningTests checks if there's already an integration test running.
|
||||
// Returns ErrNoRunningTests if no test is running, or RunningTestInfo with details about the running test.
|
||||
func checkForRunningTests(ctx context.Context) (*RunningTestInfo, error) {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %w", err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// List all running containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{
|
||||
All: false, // Only running containers
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list containers: %w", err)
|
||||
}
|
||||
|
||||
// Look for containers with hi.test-type=test-runner label
|
||||
for _, cont := range containers {
|
||||
if cont.Labels != nil && cont.Labels["hi.test-type"] == "test-runner" {
|
||||
// Found a running test runner container
|
||||
runID := cont.Labels["hi.run-id"]
|
||||
|
||||
containerName := ""
|
||||
for _, name := range cont.Names {
|
||||
containerName = strings.TrimPrefix(name, "/")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// Get more details via inspection
|
||||
inspect, err := cli.ContainerInspect(ctx, cont.ID)
|
||||
if err != nil {
|
||||
// Return basic info if inspection fails
|
||||
return &RunningTestInfo{
|
||||
RunID: runID,
|
||||
ContainerID: cont.ID,
|
||||
ContainerName: containerName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
startTime, _ := time.Parse(time.RFC3339Nano, inspect.State.StartedAt)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
// Try to extract test pattern from command
|
||||
testPattern := ""
|
||||
|
||||
if len(inspect.Config.Cmd) > 0 {
|
||||
for i, arg := range inspect.Config.Cmd {
|
||||
if arg == "-run" && i+1 < len(inspect.Config.Cmd) {
|
||||
testPattern = inspect.Config.Cmd[i+1]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &RunningTestInfo{
|
||||
RunID: runID,
|
||||
ContainerID: cont.ID,
|
||||
ContainerName: containerName,
|
||||
StartTime: startTime,
|
||||
Duration: duration,
|
||||
TestPattern: testPattern,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNoRunningTests
|
||||
}
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
cli, err := createDockerClient()
|
||||
@@ -174,6 +89,9 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
}
|
||||
|
||||
log.Printf("Starting test: %s", config.TestPattern)
|
||||
log.Printf("Run ID: %s", runID)
|
||||
log.Printf("Monitor with: docker logs -f %s", containerName)
|
||||
log.Printf("Logs directory: %s", logsDir)
|
||||
|
||||
// Start stats collection for container resource monitoring (if enabled)
|
||||
var statsCollector *StatsCollector
|
||||
@@ -234,9 +152,12 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
|
||||
if shouldCleanup {
|
||||
if config.Verbose {
|
||||
log.Printf("Running post-test cleanup...")
|
||||
log.Printf("Running post-test cleanup for run %s...", runID)
|
||||
}
|
||||
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
|
||||
|
||||
cleanErr := cleanupAfterTest(ctx, cli, resp.ID, runID)
|
||||
|
||||
if cleanErr != nil && config.Verbose {
|
||||
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
@@ -14,65 +13,13 @@ import (
|
||||
|
||||
var ErrTestPatternRequired = errors.New("test pattern is required as first argument or use --test flag")
|
||||
|
||||
// formatRunningTestError creates a detailed error message about a running test.
|
||||
func formatRunningTestError(info *RunningTestInfo) error {
|
||||
var msg strings.Builder
|
||||
msg.WriteString("\n")
|
||||
msg.WriteString("╔══════════════════════════════════════════════════════════════════╗\n")
|
||||
msg.WriteString("║ Another integration test run is already in progress! ║\n")
|
||||
msg.WriteString("╚══════════════════════════════════════════════════════════════════╝\n")
|
||||
msg.WriteString("\n")
|
||||
msg.WriteString("Running test details:\n")
|
||||
msg.WriteString(fmt.Sprintf(" Run ID: %s\n", info.RunID))
|
||||
msg.WriteString(fmt.Sprintf(" Container: %s\n", info.ContainerName))
|
||||
|
||||
if info.TestPattern != "" {
|
||||
msg.WriteString(fmt.Sprintf(" Test: %s\n", info.TestPattern))
|
||||
}
|
||||
|
||||
if !info.StartTime.IsZero() {
|
||||
msg.WriteString(fmt.Sprintf(" Started: %s\n", info.StartTime.Format("2006-01-02 15:04:05")))
|
||||
msg.WriteString(fmt.Sprintf(" Running for: %s\n", formatDuration(info.Duration)))
|
||||
}
|
||||
|
||||
msg.WriteString("\n")
|
||||
msg.WriteString("Please wait for the current test to complete, or stop it with:\n")
|
||||
msg.WriteString(" go run ./cmd/hi clean containers\n")
|
||||
msg.WriteString("\n")
|
||||
msg.WriteString("To monitor the running test:\n")
|
||||
msg.WriteString(fmt.Sprintf(" docker logs -f %s\n", info.ContainerName))
|
||||
|
||||
return fmt.Errorf("%w\n%s", ErrAnotherRunInProgress, msg.String())
|
||||
}
|
||||
|
||||
const secondsPerMinute = 60
|
||||
|
||||
// formatDuration formats a duration in a human-readable way.
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%d seconds", int(d.Seconds()))
|
||||
}
|
||||
|
||||
if d < time.Hour {
|
||||
minutes := int(d.Minutes())
|
||||
seconds := int(d.Seconds()) % secondsPerMinute
|
||||
|
||||
return fmt.Sprintf("%d minutes, %d seconds", minutes, seconds)
|
||||
}
|
||||
|
||||
hours := int(d.Hours())
|
||||
minutes := int(d.Minutes()) % secondsPerMinute
|
||||
|
||||
return fmt.Sprintf("%d hours, %d minutes", hours, minutes)
|
||||
}
|
||||
|
||||
type RunConfig struct {
|
||||
TestPattern string `flag:"test,Test pattern to run"`
|
||||
Timeout time.Duration `flag:"timeout,default=120m,Test timeout"`
|
||||
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
|
||||
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
|
||||
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
|
||||
CleanBefore bool `flag:"clean-before,default=true,Clean stale resources before test"`
|
||||
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
|
||||
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
|
||||
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
|
||||
@@ -80,7 +27,6 @@ type RunConfig struct {
|
||||
Stats bool `flag:"stats,default=false,Collect and display container resource usage statistics"`
|
||||
HSMemoryLimit float64 `flag:"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
TSMemoryLimit float64 `flag:"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
Force bool `flag:"force,default=false,Kill any running test and start a new one"`
|
||||
}
|
||||
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
@@ -98,23 +44,6 @@ func runIntegrationTest(env *command.Env) error {
|
||||
runConfig.GoVersion = detectGoVersion()
|
||||
}
|
||||
|
||||
// Check if another test run is already in progress
|
||||
runningTest, err := checkForRunningTests(env.Context())
|
||||
if err != nil && !errors.Is(err, ErrNoRunningTests) {
|
||||
log.Printf("Warning: failed to check for running tests: %v", err)
|
||||
} else if runningTest != nil {
|
||||
if runConfig.Force {
|
||||
log.Printf("Force flag set, killing existing test run: %s", runningTest.RunID)
|
||||
|
||||
err = killTestContainers(env.Context())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to kill existing test containers: %w", err)
|
||||
}
|
||||
} else {
|
||||
return formatRunningTestError(runningTest)
|
||||
}
|
||||
}
|
||||
|
||||
// Run pre-flight checks
|
||||
if runConfig.Verbose {
|
||||
log.Printf("Running pre-flight system checks...")
|
||||
|
||||
@@ -20,6 +20,7 @@ listen_addr: 127.0.0.1:8080
|
||||
|
||||
# Address to listen to /metrics and /debug, you may want
|
||||
# to keep this endpoint private to your internal network
|
||||
# Use an emty value to disable the metrics listener.
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
|
||||
# Address to listen for gRPC.
|
||||
@@ -361,6 +362,12 @@ unix_socket_permission: "0770"
|
||||
# # required "openid" scope.
|
||||
# scope: ["openid", "profile", "email"]
|
||||
#
|
||||
# # Only verified email addresses are synchronized to the user profile by
|
||||
# # default. Unverified emails may be allowed in case an identity provider
|
||||
# # does not send the "email_verified: true" claim or email verification is
|
||||
# # not required.
|
||||
# email_verified_required: true
|
||||
#
|
||||
# # Provide custom key/value pairs which get sent to the identity provider's
|
||||
# # authorization endpoint.
|
||||
# extra_params:
|
||||
|
||||
@@ -5,15 +5,16 @@ to provide self-hosters and hobbyists with an open-source server they can use fo
|
||||
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
|
||||
|
||||
- [x] Full "base" support of Tailscale's features
|
||||
- [x] Node registration
|
||||
- [x] Interactive
|
||||
- [x] Pre authenticated key
|
||||
- [x] [Node registration](../ref/registration.md)
|
||||
- [x] [Web authentication](../ref/registration.md#web-authentication)
|
||||
- [x] [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
- [x] [DNS](../ref/dns.md)
|
||||
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
|
||||
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
|
||||
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
|
||||
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
|
||||
- [x] [Tags](../ref/tags.md)
|
||||
- [x] [Routes](../ref/routes.md)
|
||||
- [x] [Subnet routers](../ref/routes.md#subnet-router)
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
|
||||
@@ -222,7 +222,7 @@ Allows access to the internet through [exit nodes](routes.md#exit-node). Can onl
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all users who are direct members of the tailnet. Does not include users from shared devices.
|
||||
Includes all [personal (untagged) devices](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -234,7 +234,7 @@ Includes all users who are direct members of the tailnet. Does not include users
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
Includes all devices that [have at least one tag](registration.md/#identity-model).
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -29,6 +29,7 @@ headscale apikeys expire --prefix <PREFIX>
|
||||
|
||||
- API endpoint: `/api/v1`, e.g. `https://headscale.example.com/api/v1`
|
||||
- Documentation: `/swagger`, e.g. `https://headscale.example.com/swagger`
|
||||
- Headscale Version: `/version`, e.g. `https://headscale.example.com/version`
|
||||
- Authenticate using HTTP Bearer authentication by sending the [API key](#api) with the HTTP `Authorization: Bearer
|
||||
<API_KEY>` header.
|
||||
|
||||
@@ -53,7 +54,7 @@ Headscale server at `/swagger` for details.
|
||||
|
||||
```console
|
||||
curl -H "Authorization: Bearer <API_KEY>" \
|
||||
-d user=<USER> -d key=<KEY> \
|
||||
-d user=<USER> -d key=<REGISTRATION_KEY> \
|
||||
https://headscale.example.com/api/v1/node/register
|
||||
```
|
||||
|
||||
|
||||
@@ -64,6 +64,9 @@ Headscale provides a metrics and debug endpoint. It allows to introspect differe
|
||||
|
||||
Keep the metrics and debug endpoint private to your internal network and don't expose it to the Internet.
|
||||
|
||||
The metrics and debug interface can be disabled completely by setting `metrics_listen_addr: null` in the
|
||||
[configuration file](./configuration.md).
|
||||
|
||||
Query metrics via <http://localhost:9090/metrics> and get an overview of available debug information via
|
||||
<http://localhost:9090/debug/>. Metrics may be queried from outside localhost but the debug interface is subject to
|
||||
additional protection despite listening on all interfaces.
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
This page collects third-party tools, client libraries, and scripts related to headscale.
|
||||
|
||||
- [headscale-operator](https://github.com/infradohq/headscale-operator) - Headscale Kubernetes Operator
|
||||
- [tailscale-manager](https://github.com/singlestore-labs/tailscale-manager) - Dynamically manage Tailscale route
|
||||
advertisements
|
||||
- [headscalebacktosqlite](https://github.com/bigbozza/headscalebacktosqlite) - Migrate headscale from PostgreSQL back to
|
||||
|
||||
@@ -19,5 +19,6 @@ Headscale doesn't provide a built-in web interface but users may pick one from t
|
||||
it offers Local (`docker exec`) and API Mode
|
||||
- [headscale-console](https://github.com/rickli-cloud/headscale-console) - WebAssembly-based client supporting SSH, VNC
|
||||
and RDP with optional self-service capabilities
|
||||
- [headscale-piying](https://github.com/wszgrcy/headscale-piying) - headscale web ui,support visual ACL configuration
|
||||
|
||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||
|
||||
@@ -77,6 +77,7 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
* Check the email domain of each authenticating user against the list of allowed domains and only authorize users
|
||||
whose email domain matches `example.com`.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`
|
||||
* Access denied: `bob@example.net`
|
||||
|
||||
@@ -93,6 +94,7 @@ are configured, a user needs to pass all of them.
|
||||
|
||||
* Check the email address of each authenticating user against the list of allowed email addresses and only authorize
|
||||
users whose email is part of the `allowed_users` list.
|
||||
* A verified email address is required [unless email verification is disabled](#control-email-verification).
|
||||
* Access allowed: `alice@example.com`, `bob@example.net`
|
||||
* Access denied: `mallory@example.net`
|
||||
|
||||
@@ -123,6 +125,23 @@ are configured, a user needs to pass all of them.
|
||||
- "headscale_users"
|
||||
```
|
||||
|
||||
### Control email verification
|
||||
|
||||
Headscale uses the `email` claim from the identity provider to synchronize the email address to its user profile. By
|
||||
default, a user's email address is only synchronized when the identity provider reports the email address as verified
|
||||
via the `email_verified: true` claim.
|
||||
|
||||
Unverified emails may be allowed in case an identity provider does not send the `email_verified` claim or email
|
||||
verification is not required. In that case, a user's email address is always synchronized to the user profile.
|
||||
|
||||
```yaml hl_lines="5"
|
||||
oidc:
|
||||
issuer: "https://sso.example.com"
|
||||
client_id: "headscale"
|
||||
client_secret: "generated-secret"
|
||||
email_verified_required: false
|
||||
```
|
||||
|
||||
### Customize node expiration
|
||||
|
||||
The node expiration is the amount of time a node is authenticated with OpenID Connect until it expires and needs to
|
||||
@@ -189,7 +208,7 @@ endpoint.
|
||||
|
||||
| Headscale profile | OIDC claim | Notes / examples |
|
||||
| ------------------- | -------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| email address | `email` | Only used when `email_verified: true` |
|
||||
| email address | `email` | Only verified emails are synchronized, unless `email_verified_required: false` is configured |
|
||||
| display name | `name` | eg: `Sam Smith` |
|
||||
| username | `preferred_username` | Depends on identity provider, eg: `ssmith`, `ssmith@idp.example.com`, `\\example.com\ssmith` |
|
||||
| profile picture | `picture` | URL to a profile picture or avatar |
|
||||
@@ -205,8 +224,6 @@ endpoint.
|
||||
- The username must be at least two characters long.
|
||||
- It must only contain letters, digits, hyphens, dots, underscores, and up to a single `@`.
|
||||
- The username must start with a letter.
|
||||
- A user's email address is only synchronized to the local user profile when the identity provider marks the email
|
||||
address as verified (`email_verified: true`).
|
||||
|
||||
Please see the [GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC) for OIDC related issues.
|
||||
|
||||
@@ -233,7 +250,7 @@ Authelia is fully supported by Headscale.
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
- [Headscale does not JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
- [Headscale does not support JSON Web Encryption](https://github.com/juanfont/headscale/issues/2446). Leave the field
|
||||
`Encryption Key` in the providers section unset.
|
||||
|
||||
### Google OAuth
|
||||
|
||||
141
docs/ref/registration.md
Normal file
141
docs/ref/registration.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Registration methods
|
||||
|
||||
Headscale supports multiple ways to register a node. The preferred registration method depends on the identity of a node
|
||||
and your use case.
|
||||
|
||||
## Identity model
|
||||
|
||||
Tailscale's identity model distinguishes between personal and tagged nodes:
|
||||
|
||||
- A personal node (or user-owned node) is owned by a human and typically refers to end-user devices such as laptops,
|
||||
workstations or mobile phones. End-user devices are managed by a single user.
|
||||
- A tagged node (or service-based node or non-human node) provides services to the network. Common examples include web-
|
||||
and database servers. Those nodes are typically managed by a team of users. Some additional restrictions apply for
|
||||
tagged nodes, e.g. a tagged node is not allowed to [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh) into a
|
||||
personal node.
|
||||
|
||||
Headscale implements Tailscale's identity model and distinguishes between personal and tagged nodes where a personal
|
||||
node is owned by a Headscale user and a tagged node is owned by a tag. Tagged devices are grouped under the special user
|
||||
`tagged-devices`.
|
||||
|
||||
## Registration methods
|
||||
|
||||
There are two main ways to register new nodes, [web authentication](#web-authentication) and [registration with a pre
|
||||
authenticated key](#pre-authenticated-key). Both methods can be used to register personal and tagged nodes.
|
||||
|
||||
### Web authentication
|
||||
|
||||
Web authentication is the default method to register a new node. It's interactive, where the client initiates the
|
||||
registration and the Headscale administrator needs to approve the new node before it is allowed to join the network. A
|
||||
node can be approved with:
|
||||
|
||||
- Headscale CLI (described in this documentation)
|
||||
- [Headscale API](api.md)
|
||||
- Or delegated to an identity provider via [OpenID Connect](oidc.md)
|
||||
|
||||
Web authentication relies on the presence of a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
Run `tailscale up` to login your personal device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Your Headscale user needs to be authorized to register tagged devices. This authorization is specified in the
|
||||
[`tagOwners`](https://tailscale.com/kb/1337/policy-syntax#tag-owners) section of the [ACL](acls.md). A simple
|
||||
example looks like this:
|
||||
|
||||
```json title="The user alice can register nodes tagged with tag:server"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:server": ["alice@"]
|
||||
},
|
||||
// more rules
|
||||
}
|
||||
```
|
||||
|
||||
Run `tailscale up` and provide at least one tag to login a tagged device:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:<TAG>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration
|
||||
on your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
Headscale checks that `<USER>` is allowed to register a node with the specified tag(s) and then transfers ownership
|
||||
of the new node to the special user `tagged-devices`. The registration of a tagged node is complete and it should be
|
||||
listed as "online" in the output of `headscale nodes list`. The "User" column displays `tagged-devices` as the owner
|
||||
of the node. See the "Tags" column for the list of assigned tags.
|
||||
|
||||
### Pre authenticated key
|
||||
|
||||
Registration with a pre authenticated key (or auth key) is a non-interactive way to register a new node. The Headscale
|
||||
administrator creates a preauthkey upfront and this preauthkey can then be used to register a node non-interactively.
|
||||
Its best suited for automation.
|
||||
|
||||
=== "Personal devices"
|
||||
|
||||
A personal node is always assigned to a Headscale user. Use the `headscale users` command to create a new user:
|
||||
|
||||
```console
|
||||
headscale users create <USER>
|
||||
```
|
||||
|
||||
Use the `headscale user list` command to learn its `<USER_ID>` and create a new pre authenticated key for your user:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --user <USER_ID>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
Congrations, the registration of your personal node is complete and it should be listed as "online" in the output of
|
||||
`headscale nodes list`. The "User" column displays `<USER>` as the owner of the node.
|
||||
|
||||
=== "Tagged devices"
|
||||
|
||||
Create a new pre authenticated key and provide at least one tag:
|
||||
|
||||
```console
|
||||
headscale preauthkeys create --tags tag:<TAG>
|
||||
```
|
||||
|
||||
The above prints a pre authenticated key with the default settings (can be used once and is valid for one hour). Use
|
||||
this auth key to register a node non-interactively. You don't need to provide the `--advertise-tags` parameter as
|
||||
the tags are automatically read from the pre authenticated key:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
The registration of a tagged node is complete and it should be listed as "online" in the output of `headscale nodes
|
||||
list`. The "User" column displays `tagged-devices` as the owner of the node. See the "Tags" column for the list of
|
||||
assigned tags.
|
||||
@@ -42,8 +42,9 @@ can be used.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8, 192.168.0.0/24 |
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | | 10.0.0.0/8 |
|
||||
| | | 192.168.0.0/24 |
|
||||
```
|
||||
|
||||
Approve all desired routes of a subnet router by specifying them as comma separated list:
|
||||
@@ -57,8 +58,9 @@ The node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myrouter | 10.0.0.0/8 | 10.0.0.0/8 | 10.0.0.0/8
|
||||
| | 192.168.0.0/24 | 192.168.0.0/24 | 192.168.0.0/24
|
||||
```
|
||||
|
||||
#### Use the subnet router
|
||||
@@ -109,9 +111,9 @@ approval of routes served with a subnet router.
|
||||
|
||||
The ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the
|
||||
`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router
|
||||
owned by the user `alice` and that also advertises the tag `tag:router`.
|
||||
that advertises the tag `tag:router`.
|
||||
|
||||
```json title="Subnet routers owned by alice and tagged with tag:router are automatically approved"
|
||||
```json title="Subnet routers tagged with tag:router are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:router": ["alice@"]
|
||||
@@ -168,8 +170,9 @@ available, but needs to be approved:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0, ::/0 |
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | | 0.0.0.0/0 |
|
||||
| | | ::/0 |
|
||||
```
|
||||
|
||||
For exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.
|
||||
@@ -183,8 +186,9 @@ The node `myexit` is now approved as exit node for the tailnet:
|
||||
|
||||
```console
|
||||
$ headscale nodes list-routes
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
|
||||
ID | Hostname | Approved | Available | Serving (Primary)
|
||||
1 | myexit | 0.0.0.0/0 | 0.0.0.0/0 | 0.0.0.0/0
|
||||
| | ::/0 | ::/0 | ::/0
|
||||
```
|
||||
|
||||
#### Use the exit node
|
||||
@@ -256,10 +260,9 @@ in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automa
|
||||
soon as it joins the tailnet.
|
||||
|
||||
The ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the
|
||||
`autoApprovers` section. A new exit node which is owned by the user `alice` and that also advertises the tag `tag:exit`
|
||||
is automatically approved:
|
||||
`autoApprovers` section. A new exit node that advertises the tag `tag:exit` is automatically approved:
|
||||
|
||||
```json title="Exit nodes owned by alice and tagged with tag:exit are automatically approved"
|
||||
```json title="Exit nodes tagged with tag:exit are automatically approved"
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:exit": ["alice@"]
|
||||
|
||||
54
docs/ref/tags.md
Normal file
54
docs/ref/tags.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Tags
|
||||
|
||||
Headscale supports Tailscale tags. Please read [Tailscale's tag documentation](https://tailscale.com/kb/1068/tags) to
|
||||
learn how tags work and how to use them.
|
||||
|
||||
Tags can be applied during [node registration](registration.md):
|
||||
|
||||
- using the `--advertise-tags` flag, see [web authentication for tagged devices](registration.md#__tabbed_1_2)
|
||||
- using a tagged pre authenticated key, see [how to create and use it](registration.md#__tabbed_2_2)
|
||||
|
||||
Administrators can manage tags with:
|
||||
|
||||
- Headscale CLI
|
||||
- [Headscale API](api.md)
|
||||
|
||||
## Common operations
|
||||
|
||||
### Manage tags for a node
|
||||
|
||||
Run `headscale nodes list` to list the tags for a node.
|
||||
|
||||
Use the `headscale nodes tag` command to modify the tags for a node. At least one tag is required and multiple tags can
|
||||
be provided as comma separated list. The following command sets the tags `tag:server` and `tag:prod` on node with ID 1:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i 1 -t tag:server,tag:prod
|
||||
```
|
||||
|
||||
### Convert from personal to tagged node
|
||||
|
||||
Use the `headscale nodes tag` command to convert a personal (user-owned) node to a tagged node:
|
||||
|
||||
```console
|
||||
headscale nodes tag -i <NODE_ID> -t <TAG>
|
||||
```
|
||||
|
||||
The node is now owned by the special user `tagged-devices` and has the specified tags assigned to it.
|
||||
|
||||
### Convert from tagged to personal node
|
||||
|
||||
Tagged nodes can return to personal (user-owned) nodes by re-authenticating with:
|
||||
|
||||
```console
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags= --force-reauth
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your Headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
```console
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
All previously assigned tags get removed and the node is now owned by the user specified in the above command.
|
||||
@@ -6,7 +6,7 @@ This documentation has the goal of showing how a user can use the official Andro
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
## Connect via normal, interactive login
|
||||
## Connect via web authentication
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -15,7 +15,7 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is
|
||||
visible in the server logs.
|
||||
|
||||
## Connect using a preauthkey
|
||||
## Connect using a pre authenticated key
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
@@ -24,5 +24,5 @@ Install the official Tailscale Android client from the [Google Play Store](https
|
||||
- Open the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`
|
||||
- Enter your [preauthkey generated from headscale](../getting-started.md#using-a-preauthkey)
|
||||
- Enter your [preauthkey generated from headscale](../../ref/registration.md#pre-authenticated-key)
|
||||
- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.
|
||||
|
||||
@@ -60,10 +60,9 @@ options, run:
|
||||
|
||||
## Manage headscale users
|
||||
|
||||
In headscale, a node (also known as machine or device) is always assigned to a
|
||||
headscale user. Such a headscale user may have many nodes assigned to them and
|
||||
can be managed with the `headscale users` command. Invoke the built-in help for
|
||||
more information: `headscale users --help`.
|
||||
In headscale, a node (also known as machine or device) is [typically assigned to a headscale
|
||||
user](../ref/registration.md#identity-model). Such a headscale user may have many nodes assigned to them and can be
|
||||
managed with the `headscale users` command. Invoke the built-in help for more information: `headscale users --help`.
|
||||
|
||||
### Create a headscale user
|
||||
|
||||
@@ -97,11 +96,12 @@ more information: `headscale users --help`.
|
||||
|
||||
## Register a node
|
||||
|
||||
One has to register a node first to use headscale as coordination with Tailscale. The following examples work for the
|
||||
Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions to connect
|
||||
[Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices.
|
||||
One has to [register a node](../ref/registration.md) first to use headscale as coordination server with Tailscale. The
|
||||
following examples work for the Tailscale client on Linux/BSD operating systems. Alternatively, follow the instructions
|
||||
to connect [Android](connect/android.md), [Apple](connect/apple.md) or [Windows](connect/windows.md) devices. Read
|
||||
[registration methods](../ref/registration.md) for an overview of available registration methods.
|
||||
|
||||
### Normal, interactive login
|
||||
### [Web authentication](../ref/registration.md#web-authentication)
|
||||
|
||||
On a client machine, run the `tailscale up` command and provide the FQDN of your headscale instance as argument:
|
||||
|
||||
@@ -109,23 +109,23 @@ On a client machine, run the `tailscale up` command and provide the FQDN of your
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Usually, a browser window with further instructions is opened and contains the value for `<YOUR_MACHINE_KEY>`. Approve
|
||||
and register the node on your headscale server:
|
||||
Usually, a browser window with further instructions is opened. This page explains how to complete the registration on
|
||||
your headscale server and it also prints the registration key required to approve the node:
|
||||
|
||||
=== "Native"
|
||||
|
||||
```shell
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
=== "Container"
|
||||
|
||||
```shell
|
||||
docker exec -it headscale \
|
||||
headscale nodes register --user <USER> --key <YOUR_MACHINE_KEY>
|
||||
headscale nodes register --user <USER> --key <REGISTRATION_KEY>
|
||||
```
|
||||
|
||||
### Using a preauthkey
|
||||
### [Pre authenticated key](../ref/registration.md#pre-authenticated-key)
|
||||
|
||||
It is also possible to generate a preauthkey and register a node non-interactively. First, generate a preauthkey on the
|
||||
headscale instance. By default, the key is valid for one hour and can only be used once (see `headscale preauthkeys
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1760533177,
|
||||
"narHash": "sha256-OwM1sFustLHx+xmTymhucZuNhtq98fHIbfO8Swm5L8A=",
|
||||
"lastModified": 1770141374,
|
||||
"narHash": "sha256-yD4K/vRHPwXbJf5CK3JkptBA6nFWUKNX/jlFp2eKEQc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "35f590344ff791e6b1d6d6b8f3523467c9217caf",
|
||||
"rev": "41965737c1797c1d83cfb0b644ed0840a6220bd1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
36
flake.nix
36
flake.nix
@@ -23,11 +23,11 @@
|
||||
default = headscale;
|
||||
};
|
||||
|
||||
overlay = _: prev:
|
||||
overlays.default = _: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
pkgs = nixpkgs.legacyPackages.${prev.stdenv.hostPlatform.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
|
||||
vendorHash = "sha256-jkeB9XUTEGt58fPOMpE4/e3+JQoMQTgf0RlthVBmfG0=";
|
||||
in
|
||||
{
|
||||
headscale = buildGo {
|
||||
@@ -62,16 +62,16 @@
|
||||
|
||||
protoc-gen-grpc-gateway = buildGo rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.24.0";
|
||||
version = "2.27.7";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-lUEoqXJF1k4/il9bdDTinkUV5L869njZNYqObG/mHyA=";
|
||||
sha256 = "sha256-6R0EhNnOBEISJddjkbVTcBvUuU5U3r9Hu2UPfAZDep4=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-Ttt7bPKU+TMKRg5550BS6fsPwYp0QJqcZ7NLrhttSdw=";
|
||||
vendorHash = "sha256-SOAbRrzMf2rbKaG9PGSnPSLY/qZVgbHcNjOLmVonycY=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
@@ -80,16 +80,16 @@
|
||||
|
||||
protobuf-language-server = buildGo rec {
|
||||
pname = "protobuf-language-server";
|
||||
version = "2546944";
|
||||
version = "1cf777d";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "lasorda";
|
||||
repo = "protobuf-language-server";
|
||||
rev = "${version}";
|
||||
sha256 = "sha256-Cbr3ktT86RnwUntOiDKRpNTClhdyrKLTQG2ZEd6fKDc=";
|
||||
rev = "1cf777de4d35a6e493a689e3ca1a6183ce3206b6";
|
||||
sha256 = "sha256-9MkBQPxr/TDr/sNz/Sk7eoZwZwzdVbE5u6RugXXk5iY=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-PfT90dhfzJZabzLTb1D69JCO+kOh2khrlpF5mCDeypk=";
|
||||
vendorHash = "sha256-4nTpKBe7ekJsfQf+P6edT/9Vp2SBYbKz1ITawD3bhkI=";
|
||||
|
||||
subPackages = [ "." ];
|
||||
};
|
||||
@@ -129,7 +129,7 @@
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ self.overlay ];
|
||||
overlays = [ self.overlays.default ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_25 gnumake ];
|
||||
@@ -182,9 +182,9 @@
|
||||
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
|
||||
};
|
||||
in
|
||||
rec {
|
||||
{
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs =
|
||||
devDeps
|
||||
++ [
|
||||
@@ -219,17 +219,19 @@
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
default = headscale;
|
||||
};
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
drv = pkgs.headscale;
|
||||
};
|
||||
apps.default = flake-utils.lib.mkApp {
|
||||
drv = pkgs.headscale;
|
||||
};
|
||||
apps.default = apps.headscale;
|
||||
|
||||
checks = {
|
||||
headscale = pkgs.nixosTest (import ./nix/tests/headscale.nix);
|
||||
headscale = pkgs.testers.nixosTest (import ./nix/tests/headscale.nix);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
@@ -189,6 +189,7 @@ func (x *CreateApiKeyResponse) GetApiKey() string {
|
||||
type ExpireApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -230,6 +231,13 @@ func (x *ExpireApiKeyRequest) GetPrefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ExpireApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -349,6 +357,7 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
type DeleteApiKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -390,6 +399,13 @@ func (x *DeleteApiKeyRequest) GetPrefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteApiKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -445,15 +461,17 @@ const file_headscale_v1_apikey_proto_rawDesc = "" +
|
||||
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
|
||||
"expiration\"/\n" +
|
||||
"\x14CreateApiKeyResponse\x12\x17\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
|
||||
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"=\n" +
|
||||
"\x13ExpireApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\"\x16\n" +
|
||||
"\x14ExpireApiKeyResponse\"\x14\n" +
|
||||
"\x12ListApiKeysRequest\"F\n" +
|
||||
"\x13ListApiKeysResponse\x12/\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
|
||||
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"=\n" +
|
||||
"\x13DeleteApiKeyRequest\x12\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
|
||||
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x0e\n" +
|
||||
"\x02id\x18\x02 \x01(\x04R\x02id\"\x16\n" +
|
||||
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
|
||||
@@ -43,6 +43,9 @@ func request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtim
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreateUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -65,6 +68,9 @@ func request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["old_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "old_id")
|
||||
@@ -117,6 +123,9 @@ func request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
|
||||
@@ -154,6 +163,9 @@ func request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime
|
||||
protoReq ListUsersRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -187,6 +199,9 @@ func request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreatePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -211,6 +226,9 @@ func request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ExpirePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -234,6 +252,9 @@ func request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler
|
||||
protoReq DeletePreAuthKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -259,18 +280,13 @@ func local_request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, mars
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ListPreAuthKeys_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq ListPreAuthKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ListPreAuthKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
@@ -281,12 +297,6 @@ func local_request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marsh
|
||||
protoReq ListPreAuthKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.ListPreAuthKeys(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -299,6 +309,9 @@ func request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler r
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.DebugCreateNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -321,6 +334,9 @@ func request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.M
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -360,6 +376,9 @@ func request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.M
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -402,6 +421,9 @@ func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -442,6 +464,9 @@ func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runt
|
||||
protoReq RegisterNodeRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -473,6 +498,9 @@ func request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -511,6 +539,9 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -559,6 +590,9 @@ func request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtim
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["node_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
|
||||
@@ -612,6 +646,9 @@ func request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime
|
||||
protoReq ListNodesRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -644,6 +681,9 @@ func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler r
|
||||
protoReq BackfillNodeIPsRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
@@ -677,6 +717,9 @@ func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runt
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.CreateApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -701,6 +744,9 @@ func request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runt
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ExpireApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -722,6 +768,9 @@ func request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler runti
|
||||
protoReq ListApiKeysRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.ListApiKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -735,12 +784,17 @@ func local_request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_DeleteApiKey_0 = &utilities.DoubleArray{Encoding: map[string]int{"prefix": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||
|
||||
func request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeleteApiKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
err error
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
val, ok := pathParams["prefix"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "prefix")
|
||||
@@ -749,6 +803,12 @@ func request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshaler runt
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.DeleteApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -767,6 +827,12 @@ func local_request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshale
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "prefix", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeleteApiKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.DeleteApiKey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -776,6 +842,9 @@ func request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime
|
||||
protoReq GetPolicyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.GetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -797,6 +866,9 @@ func request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.SetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -818,6 +890,9 @@ func request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Ma
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc-gen-go-grpc v1.6.0
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -387,79 +387,79 @@ type HeadscaleServiceServer interface {
|
||||
type UnimplementedHeadscaleServiceServer struct{}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RenameUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteUser not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteUser not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListUsers not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListUsers not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeletePreAuthKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeletePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DebugCreateNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method GetNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetApprovedRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpireNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameNode not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method RenameNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListNodes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ExpireApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteApiKey not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method DeleteApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPolicy not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method GetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method Health not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
@@ -472,7 +472,7 @@ type UnsafeHeadscaleServiceServer interface {
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// If the following call panics, it indicates UnimplementedHeadscaleServiceServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
@@ -75,27 +75,29 @@ func (RegisterMethod) EnumDescriptor() ([]byte, []int) {
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
MachineKey string `protobuf:"bytes,2,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
|
||||
NodeKey string `protobuf:"bytes,3,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
|
||||
DiscoKey string `protobuf:"bytes,4,opt,name=disco_key,json=discoKey,proto3" json:"disco_key,omitempty"`
|
||||
IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"`
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
User *User `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
|
||||
ForcedTags []string `protobuf:"bytes,18,rep,name=forced_tags,json=forcedTags,proto3" json:"forced_tags,omitempty"`
|
||||
InvalidTags []string `protobuf:"bytes,19,rep,name=invalid_tags,json=invalidTags,proto3" json:"invalid_tags,omitempty"`
|
||||
ValidTags []string `protobuf:"bytes,20,rep,name=valid_tags,json=validTags,proto3" json:"valid_tags,omitempty"`
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
|
||||
ApprovedRoutes []string `protobuf:"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3" json:"approved_routes,omitempty"`
|
||||
AvailableRoutes []string `protobuf:"bytes,24,rep,name=available_routes,json=availableRoutes,proto3" json:"available_routes,omitempty"`
|
||||
SubnetRoutes []string `protobuf:"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3" json:"subnet_routes,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
MachineKey string `protobuf:"bytes,2,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
|
||||
NodeKey string `protobuf:"bytes,3,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
|
||||
DiscoKey string `protobuf:"bytes,4,opt,name=disco_key,json=discoKey,proto3" json:"disco_key,omitempty"`
|
||||
IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"`
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
User *User `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
|
||||
// Deprecated
|
||||
// repeated string forced_tags = 18;
|
||||
// repeated string invalid_tags = 19;
|
||||
// repeated string valid_tags = 20;
|
||||
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
|
||||
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
|
||||
ApprovedRoutes []string `protobuf:"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3" json:"approved_routes,omitempty"`
|
||||
AvailableRoutes []string `protobuf:"bytes,24,rep,name=available_routes,json=availableRoutes,proto3" json:"available_routes,omitempty"`
|
||||
SubnetRoutes []string `protobuf:"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3" json:"subnet_routes,omitempty"`
|
||||
Tags []string `protobuf:"bytes,26,rep,name=tags,proto3" json:"tags,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -214,27 +216,6 @@ func (x *Node) GetRegisterMethod() RegisterMethod {
|
||||
return RegisterMethod_REGISTER_METHOD_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *Node) GetForcedTags() []string {
|
||||
if x != nil {
|
||||
return x.ForcedTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetInvalidTags() []string {
|
||||
if x != nil {
|
||||
return x.InvalidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetValidTags() []string {
|
||||
if x != nil {
|
||||
return x.ValidTags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetGivenName() string {
|
||||
if x != nil {
|
||||
return x.GivenName
|
||||
@@ -270,6 +251,13 @@ func (x *Node) GetSubnetRoutes() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Node) GetTags() []string {
|
||||
if x != nil {
|
||||
return x.Tags
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RegisterNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -1210,7 +1198,7 @@ var File_headscale_v1_node_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x17headscale/v1/node.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/user.proto\"\x98\x06\n" +
|
||||
"\x17headscale/v1/node.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/user.proto\"\xc9\x05\n" +
|
||||
"\x04Node\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x1f\n" +
|
||||
"\vmachine_key\x18\x02 \x01(\tR\n" +
|
||||
@@ -1227,19 +1215,15 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"preAuthKey\x129\n" +
|
||||
"\n" +
|
||||
"created_at\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12E\n" +
|
||||
"\x0fregister_method\x18\r \x01(\x0e2\x1c.headscale.v1.RegisterMethodR\x0eregisterMethod\x12\x1f\n" +
|
||||
"\vforced_tags\x18\x12 \x03(\tR\n" +
|
||||
"forcedTags\x12!\n" +
|
||||
"\finvalid_tags\x18\x13 \x03(\tR\vinvalidTags\x12\x1d\n" +
|
||||
"\n" +
|
||||
"valid_tags\x18\x14 \x03(\tR\tvalidTags\x12\x1d\n" +
|
||||
"\x0fregister_method\x18\r \x01(\x0e2\x1c.headscale.v1.RegisterMethodR\x0eregisterMethod\x12\x1d\n" +
|
||||
"\n" +
|
||||
"given_name\x18\x15 \x01(\tR\tgivenName\x12\x16\n" +
|
||||
"\x06online\x18\x16 \x01(\bR\x06online\x12'\n" +
|
||||
"\x0fapproved_routes\x18\x17 \x03(\tR\x0eapprovedRoutes\x12)\n" +
|
||||
"\x10available_routes\x18\x18 \x03(\tR\x0favailableRoutes\x12#\n" +
|
||||
"\rsubnet_routes\x18\x19 \x03(\tR\fsubnetRoutesJ\x04\b\t\x10\n" +
|
||||
"J\x04\b\x0e\x10\x12\";\n" +
|
||||
"\rsubnet_routes\x18\x19 \x03(\tR\fsubnetRoutes\x12\x12\n" +
|
||||
"\x04tags\x18\x1a \x03(\tR\x04tagsJ\x04\b\t\x10\n" +
|
||||
"J\x04\b\x0e\x10\x15\";\n" +
|
||||
"\x13RegisterNodeRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\tR\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\">\n" +
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -252,8 +252,7 @@ func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -288,20 +287,13 @@ func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
|
||||
func (x *ExpirePreAuthKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -340,8 +332,7 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
|
||||
type DeletePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -376,20 +367,13 @@ func (*DeletePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) GetUser() uint64 {
|
||||
func (x *DeletePreAuthKeyRequest) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeletePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
@@ -428,7 +412,6 @@ func (*DeletePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -463,13 +446,6 @@ func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
@@ -543,17 +519,14 @@ const file_headscale_v1_preauthkey_proto_rawDesc = "" +
|
||||
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
|
||||
"\x18CreatePreAuthKeyResponse\x12:\n" +
|
||||
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
|
||||
"preAuthKey\"?\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\"?\n" +
|
||||
"\x17DeletePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18DeletePreAuthKeyResponse\",\n" +
|
||||
"\x16ListPreAuthKeysRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
|
||||
"preAuthKey\")\n" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\")\n" +
|
||||
"\x17DeletePreAuthKeyRequest\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\x04R\x02id\"\x1a\n" +
|
||||
"\x18DeletePreAuthKeyResponse\"\x18\n" +
|
||||
"\x16ListPreAuthKeysRequest\"W\n" +
|
||||
"\x17ListPreAuthKeysResponse\x12<\n" +
|
||||
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -124,6 +124,13 @@
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "id",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -566,15 +573,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "user",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
@@ -597,17 +595,11 @@
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "user",
|
||||
"name": "id",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "key",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -1027,6 +1019,10 @@
|
||||
"properties": {
|
||||
"prefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1044,12 +1040,9 @@
|
||||
"v1ExpirePreAuthKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1178,26 +1171,9 @@
|
||||
"registerMethod": {
|
||||
"$ref": "#/definitions/v1RegisterMethod"
|
||||
},
|
||||
"forcedTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"invalidTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"validTags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"givenName": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "Deprecated\nrepeated string forced_tags = 18;\nrepeated string invalid_tags = 19;\nrepeated string valid_tags = 20;"
|
||||
},
|
||||
"online": {
|
||||
"type": "boolean"
|
||||
@@ -1219,6 +1195,12 @@
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
202
go.mod
202
go.mod
@@ -1,58 +1,57 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.25
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/arl/statsviz v0.7.2
|
||||
github.com/arl/statsviz v0.8.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/coreos/go-oidc/v3 v3.17.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.5.1+incompatible
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e
|
||||
github.com/gofrs/uuid/v5 v5.4.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.66.1
|
||||
github.com/prometheus/common v0.67.5
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
|
||||
google.golang.org/grpc v1.75.1
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.31.0
|
||||
tailscale.com v1.86.5
|
||||
gorm.io/gorm v1.31.1
|
||||
tailscale.com v1.94.1
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
@@ -75,12 +74,20 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1
|
||||
modernc.org/sqlite v1.44.3
|
||||
)
|
||||
|
||||
// NOTE: gvisor must be updated in lockstep with
|
||||
// tailscale.com. The version used here should match
|
||||
// the version required by the tailscale.com dependency.
|
||||
// To find the correct version, check tailscale.com's
|
||||
// go.mod file for the gvisor.dev/gvisor version:
|
||||
// https://github.com/tailscale/tailscale/blob/main/go.mod
|
||||
require gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
@@ -91,147 +98,140 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.2.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/creachadair/mds v0.25.15 // indirect
|
||||
github.com/creachadair/msync v0.8.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d // indirect
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/cli v29.2.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gaissmai/bart v0.26.1 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/go-version v1.8.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/illarion/gonotify/v3 v3.0.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/pgx/v5 v5.8.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 // indirect
|
||||
github.com/kamstrup/intmap v0.5.2 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/lib/pq v1.11.1 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mdlayher/netlink v1.8.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.1 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/moby/api v1.53.0 // indirect
|
||||
github.com/moby/moby/client v0.2.2 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/morikuni/aec v1.1.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 // indirect
|
||||
github.com/pires/go-proxyproto v0.9.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.7.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/safchain/ethtool v0.7.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
|
||||
438
go.sum
438
go.sum
@@ -16,8 +16,8 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
|
||||
github.com/MarvinJWendt/testza v0.2.1/go.mod h1:God7bhG8n6uQxwdScay+gjm9/LnO4D3kkcZX4hv9Rp8=
|
||||
github.com/MarvinJWendt/testza v0.2.8/go.mod h1:nwIcjmr0Zz+Rcwfh3/4UhBp7ePKVhuBExvZqnKYWlII=
|
||||
@@ -33,51 +33,55 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.7.2 h1:xnuIfRiXE4kvxEcfGL+IE3mKH1BXNHuE+eJELIh7oOA=
|
||||
github.com/arl/statsviz v0.7.2/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/arl/statsviz v0.8.0 h1:O6GjjVxEDxcByAucOSl29HaGYLXsuwA3ujJw8H9E7/U=
|
||||
github.com/arl/statsviz v0.8.0/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2 h1:U3ygWUhCpiSPYSHOrRhb3gOl9T5Y3kB8k5Vjs//57bE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.93.2/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=
|
||||
github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
@@ -99,8 +103,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
@@ -108,45 +114,48 @@ github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/q
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
|
||||
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/mds v0.25.15 h1:i8CUqtfgbCqbvZ++L7lm8No3cOeic9YKF4vHEvEoj+Y=
|
||||
github.com/creachadair/mds v0.25.15/go.mod h1:XtMfRW15sjd1iOi1Z1k+dq0pRsR5xPbulpoTrpyhk8w=
|
||||
github.com/creachadair/msync v0.8.2 h1:ujvc/SVJPn+bFwmjUHucXNTTn3opVe2YbQ46mBCnP08=
|
||||
github.com/creachadair/msync v0.8.2/go.mod h1:LzxqD9kfIl/O3DczkwOgJplLPqwrTbIhINlf9bHIsEY=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/OjWsmQKMGg3LoPSz9jh/pQXIrHjUj4=
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d h1:QRKpU+9ZBDs62LyBfwhZkJdB5DJX2Sm3p4kUh7l1aA0=
|
||||
github.com/dblohm7/wingoes v0.0.0-20250822163801-6d8e6105c62d/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=
|
||||
github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
|
||||
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -162,10 +171,10 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
|
||||
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=
|
||||
github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
|
||||
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
@@ -178,8 +187,8 @@ github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFS
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e h1:Lf/gRkoycfOBPa42vU2bbgPurFong6zXeFtPoxholzU=
|
||||
github.com/go-json-experiment/json v0.0.0-20251027170946-4849db3c2f7e/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -189,42 +198,42 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||
github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=
|
||||
github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
|
||||
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno=
|
||||
github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@@ -237,14 +246,19 @@ github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=
|
||||
github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
|
||||
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
|
||||
@@ -257,8 +271,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -271,13 +285,13 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90=
|
||||
github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM=
|
||||
github.com/kamstrup/intmap v0.5.2 h1:qnwBm1mh4XAnW9W9Ue9tZtTff8pS6+s6iKF6JRIV2Dk=
|
||||
github.com/kamstrup/intmap v0.5.2/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -288,7 +302,6 @@ github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryy
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@@ -299,8 +312,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI=
|
||||
github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@@ -316,18 +329,22 @@ github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byF
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
|
||||
github.com/mdlayher/netlink v1.8.0 h1:e7XNIYJKD7hUct3Px04RuIGJbBxy1/c4nX7D5YyvvlM=
|
||||
github.com/mdlayher/netlink v1.8.0/go.mod h1:UhgKXUlDQhzb09DrCl2GuRNEglHmhYoWAHid9HK3594=
|
||||
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
|
||||
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
|
||||
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
|
||||
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
|
||||
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
|
||||
github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
@@ -336,8 +353,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
|
||||
github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
@@ -358,13 +375,14 @@ github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXR
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741 h1:KPpdlQLZcHfTMQRi6bFQ7ogNO0ltFT4PmtwTLW4W+14=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pires/go-proxyproto v0.9.2 h1:H1UdHn695zUVVmB0lQ354lOWHOy6TZSpzBl3tgN0s1U=
|
||||
github.com/pires/go-proxyproto v0.9.2/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -374,16 +392,16 @@ github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Q
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=
|
||||
github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -393,20 +411,19 @@ github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5b
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/safchain/ethtool v0.7.0 h1:rlJzfDetsVvT61uz8x1YIcFn12akMfuPulHtZjtb7Is=
|
||||
github.com/safchain/ethtool v0.7.0/go.mod h1:MenQKEjXdfkjD3mp2QdCk8B/hwvkrlOTm/FD4gTpFxQ=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
@@ -416,14 +433,14 @@ github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGT
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -449,22 +466,20 @@ github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33 h1:idh63uw+gsG05HwjZsAENCG4KZfyvjK03bpjxa5qRRk=
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I=
|
||||
github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d h1:N+TtzIaGYREbLbKZB0WU0vVnMSfaqUkSf3qMEi03hwE=
|
||||
github.com/tailscale/setec v0.0.0-20260115174028-19d190c5556d/go.mod h1:6NU8H/GLPVX2TnXAY1duyy9ylLaHwFpr0X93UPiYmNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f h1:CL6gu95Y1o2ko4XiWPvWkJka0QmQWcUyPywWVWDPQbQ=
|
||||
github.com/tailscale/squibble v0.0.0-20251104223530-a961feffb67f/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09 h1:Fc9lE2cDYJbBLpCqnVmoLdf7McPqoHZiDxDPPpkJM04=
|
||||
github.com/tailscale/tailsql v0.0.0-20260105194658-001575c3ca09/go.mod h1:QMNhC4XGFiXKngHVLXE+ERDmQoH0s5fD7AUxupykocQ=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368 h1:0tpDdAj9sSfSZg4gMwNTdqMP592sBrq2Sm0w6ipnh7k=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20251127225136-f19339b67368/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
|
||||
@@ -475,13 +490,12 @@ github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k=
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
|
||||
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4=
|
||||
github.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8=
|
||||
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
|
||||
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -497,30 +511,30 @@ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1z
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
@@ -530,36 +544,34 @@ go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/W
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -570,18 +582,16 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -589,77 +599,75 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
|
||||
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=
|
||||
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
|
||||
modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
|
||||
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
||||
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
@@ -668,16 +676,18 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4=
|
||||
modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||
modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=
|
||||
modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.86.5 h1:yBtWFjuLYDmxVnfnvPbZNZcKADCYgNfMd0rUAOA9XCs=
|
||||
tailscale.com v1.86.5/go.mod h1:Lm8dnzU2i/Emw15r6sl3FRNp/liSQ/nYw6ZSQvIdZ1M=
|
||||
tailscale.com v1.94.1 h1:0dAst/ozTuFkgmxZULc3oNwR9+qPIt5ucvzH7kaM0Jw=
|
||||
tailscale.com v1.94.1/go.mod h1:gLnVrEOP32GWvroaAHHGhjSGMPJ1i4DvqNwEg+Yuov4=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
|
||||
@@ -247,9 +247,9 @@ func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
|
||||
if node.IsTagged() {
|
||||
resp.User = types.TaggedDevices.View().TailscaleUser()
|
||||
resp.Login = types.TaggedDevices.View().TailscaleLogin()
|
||||
} else if node.UserView().Valid() {
|
||||
resp.User = node.UserView().TailscaleUser()
|
||||
resp.Login = node.UserView().TailscaleLogin()
|
||||
} else if node.Owner().Valid() {
|
||||
resp.User = node.Owner().TailscaleUser()
|
||||
resp.Login = node.Owner().TailscaleLogin()
|
||||
}
|
||||
|
||||
return resp
|
||||
@@ -389,8 +389,8 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
User: node.Owner().TailscaleUser(),
|
||||
Login: node.Owner().TailscaleLogin(),
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
|
||||
@@ -471,6 +471,306 @@ func TestSingleVsMultipleTags(t *testing.T) {
|
||||
assert.False(t, node2.HasTag("tag:other"))
|
||||
}
|
||||
|
||||
// TestTaggedPreAuthKeyDisablesKeyExpiry tests that nodes registered with
|
||||
// a tagged PreAuthKey have key expiry disabled (expiry is nil).
|
||||
func TestTaggedPreAuthKeyDisablesKeyExpiry(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("tag-creator")
|
||||
tags := []string{"tag:server", "tag:prod"}
|
||||
|
||||
// Create a tagged PreAuthKey
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t, tags, pak.Tags)
|
||||
|
||||
// Register a node using the tagged key
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Client requests an expiry time, but for tagged nodes it should be ignored
|
||||
clientRequestedExpiry := time.Now().Add(24 * time.Hour)
|
||||
|
||||
regReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-expiry-test",
|
||||
},
|
||||
Expiry: clientRequestedExpiry,
|
||||
}
|
||||
|
||||
resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.MachineAuthorized)
|
||||
|
||||
// Verify the node has key expiry DISABLED (expiry is nil/zero)
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found)
|
||||
|
||||
// Critical assertion: Tagged nodes should have expiry disabled
|
||||
assert.True(t, node.IsTagged(), "Node should be tagged")
|
||||
assert.False(t, node.Expiry().Valid(), "Tagged node should have expiry disabled (nil)")
|
||||
}
|
||||
|
||||
// TestUntaggedPreAuthKeyPreservesKeyExpiry tests that nodes registered with
|
||||
// an untagged PreAuthKey preserve the client's requested key expiry.
|
||||
func TestUntaggedPreAuthKeyPreservesKeyExpiry(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("node-owner")
|
||||
|
||||
// Create an untagged PreAuthKey
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, pak.Tags, "PreAuthKey should not be tagged")
|
||||
|
||||
// Register a node
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Client requests an expiry time
|
||||
clientRequestedExpiry := time.Now().Add(24 * time.Hour)
|
||||
|
||||
regReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "untagged-expiry-test",
|
||||
},
|
||||
Expiry: clientRequestedExpiry,
|
||||
}
|
||||
|
||||
resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.MachineAuthorized)
|
||||
|
||||
// Verify the node has the client's requested expiry
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found)
|
||||
|
||||
// Critical assertion: User-owned nodes should preserve client expiry
|
||||
assert.False(t, node.IsTagged(), "Node should not be tagged")
|
||||
assert.True(t, node.Expiry().Valid(), "User-owned node should have expiry set")
|
||||
// Allow some tolerance for test execution time
|
||||
assert.WithinDuration(t, clientRequestedExpiry, node.Expiry().Get(), 5*time.Second,
|
||||
"User-owned node should have the client's requested expiry")
|
||||
}
|
||||
|
||||
// TestTaggedNodeReauthPreservesDisabledExpiry tests that when a tagged node
|
||||
// re-authenticates, the disabled expiry is preserved (not updated from client request).
|
||||
func TestTaggedNodeReauthPreservesDisabledExpiry(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("tag-creator")
|
||||
tags := []string{"tag:server"}
|
||||
|
||||
// Create a reusable tagged PreAuthKey
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initial registration
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
regReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-reauth-test",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.True(t, resp.MachineAuthorized)
|
||||
|
||||
// Verify initial registration has expiry disabled
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found)
|
||||
require.True(t, node.IsTagged())
|
||||
require.False(t, node.Expiry().Valid(), "Initial registration should have expiry disabled")
|
||||
|
||||
// Re-authenticate with a NEW expiry request (should be ignored for tagged nodes)
|
||||
newRequestedExpiry := time.Now().Add(48 * time.Hour)
|
||||
reAuthReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-reauth-test",
|
||||
},
|
||||
Expiry: newRequestedExpiry, // Client requests new expiry
|
||||
}
|
||||
|
||||
reAuthResp, err := app.handleRegisterWithAuthKey(reAuthReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.True(t, reAuthResp.MachineAuthorized)
|
||||
|
||||
// Verify expiry is STILL disabled after re-auth
|
||||
nodeAfterReauth, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found)
|
||||
|
||||
// Critical assertion: Tagged node should preserve disabled expiry on re-auth
|
||||
assert.True(t, nodeAfterReauth.IsTagged(), "Node should still be tagged")
|
||||
assert.False(t, nodeAfterReauth.Expiry().Valid(),
|
||||
"Tagged node should have expiry PRESERVED as disabled after re-auth")
|
||||
}
|
||||
|
||||
// TestExpiryDuringPersonalToTaggedConversion tests that when a personal node
|
||||
// is converted to tagged via reauth with RequestTags, the expiry is cleared to nil.
|
||||
// BUG #3048: Previously expiry was NOT cleared because expiry handling ran
|
||||
// BEFORE processReauthTags.
|
||||
func TestExpiryDuringPersonalToTaggedConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create user-owned node WITH expiry set
|
||||
clientExpiry := time.Now().Add(24 * time.Hour)
|
||||
registrationID1 := types.MustRegistrationID()
|
||||
regEntry1 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{}, // No tags - user-owned
|
||||
},
|
||||
Expiry: &clientExpiry,
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, node.IsTagged(), "Node should be user-owned initially")
|
||||
require.True(t, node.Expiry().Valid(), "User-owned node should have expiry set")
|
||||
|
||||
// Step 2: Re-auth with tags (Personal → Tagged conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
registrationID2 := types.MustRegistrationID()
|
||||
regEntry2 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "personal-to-tagged",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "personal-to-tagged",
|
||||
RequestTags: []string{"tag:server"}, // Adding tags
|
||||
},
|
||||
Expiry: &clientExpiry, // Client still sends expiry
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeAfter.IsTagged(), "Node should be tagged after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: Tagged nodes should NOT have expiry
|
||||
assert.False(t, nodeAfter.Expiry().Valid(),
|
||||
"Tagged node should have expiry cleared to nil")
|
||||
}
|
||||
|
||||
// TestExpiryDuringTaggedToPersonalConversion tests that when a tagged node
|
||||
// is converted to personal via reauth with empty RequestTags, expiry is set
|
||||
// from the client request.
|
||||
// BUG #3048: Previously expiry was NOT set because expiry handling ran
|
||||
// BEFORE processReauthTags (node was still tagged at check time).
|
||||
func TestExpiryDuringTaggedToPersonalConversion(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
user := app.state.CreateUserForTest("expiry-test-user2")
|
||||
|
||||
// Update policy to allow user to own tags
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err)
|
||||
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["expiry-test-user2@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Create tagged node (expiry should be nil)
|
||||
registrationID1 := types.MustRegistrationID()
|
||||
regEntry1 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{"tag:server"}, // Tagged node
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, node.IsTagged(), "Node should be tagged initially")
|
||||
require.False(t, node.Expiry().Valid(), "Tagged node should have nil expiry")
|
||||
|
||||
// Step 2: Re-auth with empty tags (Tagged → Personal conversion)
|
||||
nodeKey2 := key.NewNode()
|
||||
clientExpiry := time.Now().Add(48 * time.Hour)
|
||||
registrationID2 := types.MustRegistrationID()
|
||||
regEntry2 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "tagged-to-personal",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-to-personal",
|
||||
RequestTags: []string{}, // Empty tags - convert to user-owned
|
||||
},
|
||||
Expiry: &clientExpiry, // Client requests expiry
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
nodeAfter, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2, types.UserID(user.ID), nil, "webauth",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.False(t, nodeAfter.IsTagged(), "Node should be user-owned after conversion")
|
||||
|
||||
// CRITICAL ASSERTION: User-owned nodes should have expiry from client
|
||||
assert.True(t, nodeAfter.Expiry().Valid(),
|
||||
"User-owned node should have expiry set")
|
||||
assert.WithinDuration(t, clientExpiry, nodeAfter.Expiry().Get(), 5*time.Second,
|
||||
"Expiry should match client request")
|
||||
}
|
||||
|
||||
// TestReAuthWithDifferentMachineKey tests the edge case where a node attempts
|
||||
// to re-authenticate with the same NodeKey but a DIFFERENT MachineKey.
|
||||
// This scenario should be handled gracefully (currently creates a new node).
|
||||
|
||||
@@ -3541,3 +3541,389 @@ func TestWebAuthRejectsUnauthorizedRequestTags(t *testing.T) {
|
||||
_, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.False(t, found, "Node should not be created when tags are unauthorized")
|
||||
}
|
||||
|
||||
// TestWebAuthReauthWithEmptyTagsRemovesAllTags tests that when an existing tagged node
|
||||
// reauths with empty RequestTags, all tags are removed and ownership returns to user.
|
||||
// This is the fix for issue #2979.
|
||||
func TestWebAuthReauthWithEmptyTagsRemovesAllTags(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("reauth-untag-user")
|
||||
|
||||
// Update policy manager to recognize the new user
|
||||
// This is necessary because CreateUserForTest doesn't update the policy manager
|
||||
err := app.state.UpdatePolicyManagerUsersForTest()
|
||||
require.NoError(t, err, "Failed to update policy manager users")
|
||||
|
||||
// Set up policy that allows the user to own these tags
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:valid-owned": ["reauth-untag-user@"],
|
||||
"tag:second": ["reauth-untag-user@"]
|
||||
},
|
||||
"acls": [{"action": "accept", "src": ["*"], "dst": ["*:*"]}]
|
||||
}`
|
||||
_, err = app.state.SetPolicy([]byte(policy))
|
||||
require.NoError(t, err, "Failed to set policy")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Initial registration with tags
|
||||
registrationID1 := types.MustRegistrationID()
|
||||
regEntry1 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostname: "reauth-untag-node",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reauth-untag-node",
|
||||
RequestTags: []string{"tag:valid-owned", "tag:second"},
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID1, regEntry1)
|
||||
|
||||
// Complete initial registration with tags
|
||||
node, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID1,
|
||||
types.UserID(user.ID),
|
||||
nil,
|
||||
"webauth",
|
||||
)
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, node.IsTagged(), "Node should be tagged after initial registration")
|
||||
require.ElementsMatch(t, []string{"tag:valid-owned", "tag:second"}, node.Tags().AsSlice())
|
||||
t.Logf("Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t",
|
||||
node.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged())
|
||||
|
||||
// Step 2: Reauth with EMPTY tags to untag
|
||||
nodeKey2 := key.NewNode() // New node key for reauth
|
||||
registrationID2 := types.MustRegistrationID()
|
||||
regEntry2 := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(), // Same machine key
|
||||
NodeKey: nodeKey2.Public(), // Different node key (rotation)
|
||||
Hostname: "reauth-untag-node",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reauth-untag-node",
|
||||
RequestTags: []string{}, // EMPTY - should untag
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID2, regEntry2)
|
||||
|
||||
// Complete reauth with empty tags
|
||||
nodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID2,
|
||||
types.UserID(user.ID),
|
||||
nil,
|
||||
"webauth",
|
||||
)
|
||||
require.NoError(t, err, "Reauth should succeed")
|
||||
|
||||
// Verify tags were removed
|
||||
require.False(t, nodeAfterReauth.IsTagged(), "Node should NOT be tagged after reauth with empty tags")
|
||||
require.Empty(t, nodeAfterReauth.Tags().AsSlice(), "Node should have no tags")
|
||||
|
||||
// Verify ownership returned to user
|
||||
require.True(t, nodeAfterReauth.UserID().Valid(), "Node should have a user ID")
|
||||
require.Equal(t, user.ID, nodeAfterReauth.UserID().Get(), "Node should be owned by the user again")
|
||||
|
||||
// Verify it's the same node (not a new one)
|
||||
require.Equal(t, node.ID(), nodeAfterReauth.ID(), "Should be the same node after reauth")
|
||||
|
||||
t.Logf("Reauth complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d",
|
||||
nodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),
|
||||
nodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())
|
||||
}
|
||||
|
||||
// TestAuthKeyTaggedToUserOwnedViaReauth tests that a node originally registered
|
||||
// with a tagged pre-auth key can transition to user-owned by re-authenticating
|
||||
// via web auth with empty RequestTags. This ensures authkey-tagged nodes are
|
||||
// not permanently locked to being tagged.
|
||||
func TestAuthKeyTaggedToUserOwnedViaReauth(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("authkey-to-user")
|
||||
|
||||
// Create a tagged pre-auth key
|
||||
authKeyTags := []string{"tag:server", "tag:prod"}
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, authKeyTags)
|
||||
require.NoError(t, err, "Failed to create tagged pre-auth key")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 1: Initial registration with tagged pre-auth key
|
||||
regReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "authkey-tagged-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, resp.MachineAuthorized, "Node should be authorized")
|
||||
|
||||
// Verify initial state: node is tagged via authkey
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey1.Public())
|
||||
require.True(t, found, "Node should be found")
|
||||
require.True(t, node.IsTagged(), "Node should be tagged after authkey registration")
|
||||
require.ElementsMatch(t, authKeyTags, node.Tags().AsSlice(), "Node should have authkey tags")
|
||||
require.NotNil(t, node.AuthKey(), "Node should have AuthKey reference")
|
||||
require.Positive(t, node.AuthKey().Tags().Len(), "AuthKey should have tags")
|
||||
|
||||
t.Logf("Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t, AuthKey.Tags.Len: %d",
|
||||
node.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged(), node.AuthKey().Tags().Len())
|
||||
|
||||
// Step 2: Reauth via web auth with EMPTY tags to transition to user-owned
|
||||
nodeKey2 := key.NewNode() // New node key for reauth
|
||||
registrationID := types.MustRegistrationID()
|
||||
regEntry := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(), // Same machine key
|
||||
NodeKey: nodeKey2.Public(), // Different node key (rotation)
|
||||
Hostname: "authkey-tagged-node",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "authkey-tagged-node",
|
||||
RequestTags: []string{}, // EMPTY - should untag
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID, regEntry)
|
||||
|
||||
// Complete reauth with empty tags
|
||||
nodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID,
|
||||
types.UserID(user.ID),
|
||||
nil,
|
||||
"webauth",
|
||||
)
|
||||
require.NoError(t, err, "Reauth should succeed")
|
||||
|
||||
// Verify tags were removed (authkey-tagged → user-owned transition)
|
||||
require.False(t, nodeAfterReauth.IsTagged(), "Node should NOT be tagged after reauth with empty tags")
|
||||
require.Empty(t, nodeAfterReauth.Tags().AsSlice(), "Node should have no tags")
|
||||
|
||||
// Verify ownership returned to user
|
||||
require.True(t, nodeAfterReauth.UserID().Valid(), "Node should have a user ID")
|
||||
require.Equal(t, user.ID, nodeAfterReauth.UserID().Get(), "Node should be owned by the user")
|
||||
|
||||
// Verify it's the same node (not a new one)
|
||||
require.Equal(t, node.ID(), nodeAfterReauth.ID(), "Should be the same node after reauth")
|
||||
|
||||
// AuthKey reference should still exist (for audit purposes)
|
||||
require.NotNil(t, nodeAfterReauth.AuthKey(), "AuthKey reference should be preserved")
|
||||
|
||||
t.Logf("Reauth complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d",
|
||||
nodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),
|
||||
nodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())
|
||||
}
|
||||
|
||||
// TestDeletedPreAuthKeyNotRecreatedOnNodeUpdate tests that when a PreAuthKey is deleted,
|
||||
// subsequent node updates (like those triggered by MapRequests) do not recreate the key.
|
||||
//
|
||||
// This reproduces the bug where:
|
||||
// 1. Create a tagged preauthkey and register a node
|
||||
// 2. Delete the preauthkey (confirmed gone from pre_auth_keys DB table)
|
||||
// 3. Node sends MapRequest (e.g., after tailscaled restart)
|
||||
// 4. BUG: The preauthkey reappears because GORM's Updates() upserts the stale AuthKey
|
||||
// data that still exists in the NodeStore's in-memory cache.
|
||||
//
|
||||
// The fix is to use Omit("AuthKey") on all node Updates() calls to prevent GORM
|
||||
// from touching the AuthKey association.
|
||||
func TestDeletedPreAuthKeyNotRecreatedOnNodeUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create user and tagged pre-auth key
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pakNew, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{"tag:test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
pakID := pakNew.ID
|
||||
t.Logf("Created PreAuthKey ID: %d", pakID)
|
||||
|
||||
// Register a node with the pre-auth key
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
registerReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pakNew.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := app.handleRegister(context.Background(), registerReq, machineKey.Public())
|
||||
require.NoError(t, err, "registration should succeed")
|
||||
require.True(t, resp.MachineAuthorized, "node should be authorized")
|
||||
|
||||
// Verify node exists and has AuthKey reference
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should exist")
|
||||
require.True(t, node.AuthKeyID().Valid(), "node should have AuthKeyID set")
|
||||
require.Equal(t, pakID, node.AuthKeyID().Get(), "node should reference the correct PreAuthKey")
|
||||
t.Logf("Node ID: %d, AuthKeyID: %d", node.ID().Uint64(), node.AuthKeyID().Get())
|
||||
|
||||
// Verify the PreAuthKey exists in the database
|
||||
var pakCount int64
|
||||
|
||||
err = app.state.DB().DB.Model(&types.PreAuthKey{}).Where("id = ?", pakID).Count(&pakCount).Error
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), pakCount, "PreAuthKey should exist in database")
|
||||
|
||||
// Delete the PreAuthKey
|
||||
t.Log("Deleting PreAuthKey...")
|
||||
|
||||
err = app.state.DeletePreAuthKey(pakID)
|
||||
require.NoError(t, err, "deleting PreAuthKey should succeed")
|
||||
|
||||
// Verify the PreAuthKey is gone from the database
|
||||
err = app.state.DB().DB.Model(&types.PreAuthKey{}).Where("id = ?", pakID).Count(&pakCount).Error
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), pakCount, "PreAuthKey should be deleted from database")
|
||||
t.Log("PreAuthKey deleted from database")
|
||||
|
||||
// Verify the node's auth_key_id is now NULL in the database
|
||||
var dbNode types.Node
|
||||
|
||||
err = app.state.DB().DB.First(&dbNode, node.ID().Uint64()).Error
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, dbNode.AuthKeyID, "node's AuthKeyID should be NULL after PreAuthKey deletion")
|
||||
t.Log("Node's AuthKeyID is NULL in database")
|
||||
|
||||
// The NodeStore may still have stale AuthKey data in memory.
|
||||
// Now simulate what happens when the node sends a MapRequest after a tailscaled restart.
|
||||
// This triggers persistNodeToDB which calls GORM's Updates().
|
||||
|
||||
// Simulate a MapRequest by updating the node through the state layer
|
||||
// This mimics what poll.go does when processing MapRequests
|
||||
mapReq := tailcfg.MapRequest{
|
||||
NodeKey: nodeKey.Public(),
|
||||
DiscoKey: node.DiscoKey(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
GoVersion: "go1.21", // Some change to trigger an update
|
||||
},
|
||||
}
|
||||
|
||||
// Process the MapRequest-like update
|
||||
// This calls UpdateNodeFromMapRequest which eventually calls persistNodeToDB
|
||||
_, err = app.state.UpdateNodeFromMapRequest(node.ID(), mapReq)
|
||||
require.NoError(t, err, "UpdateNodeFromMapRequest should succeed")
|
||||
t.Log("Simulated MapRequest update completed")
|
||||
|
||||
// THE CRITICAL CHECK: Verify the PreAuthKey was NOT recreated
|
||||
err = app.state.DB().DB.Model(&types.PreAuthKey{}).Where("id = ?", pakID).Count(&pakCount).Error
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), pakCount,
|
||||
"BUG: PreAuthKey was recreated! The deleted PreAuthKey should NOT reappear after node update")
|
||||
|
||||
t.Log("SUCCESS: PreAuthKey remained deleted after node update")
|
||||
}
|
||||
|
||||
// TestTaggedNodeWithoutUserToDifferentUser tests that a node registered with a
|
||||
// tags-only PreAuthKey (no user) can be re-registered to a different user
|
||||
// without panicking. This reproduces the issue reported in #3038.
|
||||
func TestTaggedNodeWithoutUserToDifferentUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Step 1: Create a tags-only PreAuthKey (no user, only tags)
|
||||
// This is valid for tagged nodes where ownership is defined by tags, not users
|
||||
tags := []string{"tag:server", "tag:prod"}
|
||||
pak, err := app.state.CreatePreAuthKey(nil, true, false, nil, tags)
|
||||
require.NoError(t, err, "Failed to create tags-only pre-auth key")
|
||||
require.Nil(t, pak.User, "Tags-only PAK should have nil User")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey1 := key.NewNode()
|
||||
|
||||
// Step 2: Register node with tags-only PreAuthKey
|
||||
regReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey1.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-orphan-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public())
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, resp.MachineAuthorized, "Node should be authorized")
|
||||
|
||||
// Verify initial state: node is tagged with no UserID
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey1.Public())
|
||||
require.True(t, found, "Node should be found")
|
||||
require.True(t, node.IsTagged(), "Node should be tagged")
|
||||
require.ElementsMatch(t, tags, node.Tags().AsSlice(), "Node should have tags from PAK")
|
||||
require.False(t, node.UserID().Valid(), "Node should NOT have a UserID (tags-only PAK)")
|
||||
require.False(t, node.User().Valid(), "Node should NOT have a User (tags-only PAK)")
|
||||
|
||||
t.Logf("Initial registration complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID valid: %t",
|
||||
node.ID().Uint64(), node.Tags().AsSlice(), node.IsTagged(), node.UserID().Valid())
|
||||
|
||||
// Step 3: Create a new user (alice) to re-register the node to
|
||||
alice := app.state.CreateUserForTest("alice")
|
||||
require.NotNil(t, alice, "Alice user should be created")
|
||||
|
||||
// Step 4: Re-register the node to alice via HandleNodeFromAuthPath
|
||||
// This is what happens when running: headscale nodes register --user alice --key ...
|
||||
nodeKey2 := key.NewNode()
|
||||
registrationID := types.MustRegistrationID()
|
||||
regEntry := types.NewRegisterNode(types.Node{
|
||||
MachineKey: machineKey.Public(), // Same machine key as the tagged node
|
||||
NodeKey: nodeKey2.Public(),
|
||||
Hostname: "tagged-orphan-node",
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "tagged-orphan-node",
|
||||
RequestTags: []string{}, // Empty - transition to user-owned
|
||||
},
|
||||
})
|
||||
app.state.SetRegistrationCacheEntry(registrationID, regEntry)
|
||||
|
||||
// This should NOT panic - before the fix, this would panic with:
|
||||
// panic: runtime error: invalid memory address or nil pointer dereference
|
||||
// at UserView.Name() because the existing node has no User
|
||||
nodeAfterReauth, _, err := app.state.HandleNodeFromAuthPath(
|
||||
registrationID,
|
||||
types.UserID(alice.ID),
|
||||
nil,
|
||||
"cli",
|
||||
)
|
||||
require.NoError(t, err, "Re-registration to alice should succeed without panic")
|
||||
|
||||
// Verify the existing tagged node was converted to be owned by alice (same node ID)
|
||||
require.True(t, nodeAfterReauth.Valid(), "Node should be valid")
|
||||
require.True(t, nodeAfterReauth.UserID().Valid(), "Node should have a UserID")
|
||||
require.Equal(t, alice.ID, nodeAfterReauth.UserID().Get(), "Node should be owned by alice")
|
||||
require.Equal(t, node.ID(), nodeAfterReauth.ID(), "Should be the same node (converted, not new)")
|
||||
require.False(t, nodeAfterReauth.IsTagged(), "Node should no longer be tagged")
|
||||
require.Empty(t, nodeAfterReauth.Tags().AsSlice(), "Node should have no tags")
|
||||
|
||||
// Verify Owner() works without panicking - this is what the mapper's
|
||||
// generateUserProfiles calls, and it would panic with a nil pointer
|
||||
// dereference if node.User was not set during the tag→user conversion.
|
||||
owner := nodeAfterReauth.Owner()
|
||||
require.True(t, owner.Valid(), "Owner should be valid after conversion (mapper would panic if nil)")
|
||||
require.Equal(t, alice.ID, owner.Model().ID, "Owner should be alice")
|
||||
|
||||
t.Logf("Re-registration complete - Node ID: %d, Tags: %v, IsTagged: %t, UserID: %d",
|
||||
nodeAfterReauth.ID().Uint64(), nodeAfterReauth.Tags().AsSlice(),
|
||||
nodeAfterReauth.IsTagged(), nodeAfterReauth.UserID().Get())
|
||||
}
|
||||
|
||||
@@ -9,89 +9,103 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (*Suite) TestCreateAPIKey(c *check.C) {
|
||||
func TestCreateAPIKey(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
apiKeyStr, apiKey, err := db.CreateAPIKey(nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
// Did we get a valid key?
|
||||
c.Assert(apiKey.Prefix, check.NotNil)
|
||||
c.Assert(apiKey.Hash, check.NotNil)
|
||||
c.Assert(apiKeyStr, check.Not(check.Equals), "")
|
||||
assert.NotNil(t, apiKey.Prefix)
|
||||
assert.NotNil(t, apiKey.Hash)
|
||||
assert.NotEmpty(t, apiKeyStr)
|
||||
|
||||
_, err = db.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
keys, err := db.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(keys), check.Equals, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, keys, 1)
|
||||
}
|
||||
|
||||
func (*Suite) TestAPIKeyDoesNotExist(c *check.C) {
|
||||
func TestAPIKeyDoesNotExist(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := db.GetAPIKey("does-not-exist")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(key, check.IsNil)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, key)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyOk(c *check.C) {
|
||||
func TestValidateAPIKeyOk(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
valid, err := db.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyNotOk(c *check.C) {
|
||||
func TestValidateAPIKeyNotOk(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
nowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour)
|
||||
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowMinus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
valid, err := db.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, false)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, valid)
|
||||
|
||||
now := time.Now()
|
||||
apiKeyStrNow, apiKey, err := db.CreateAPIKey(&now)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
validNow, err := db.ValidateAPIKey(apiKeyStrNow)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(validNow, check.Equals, false)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, validNow)
|
||||
|
||||
validSilly, err := db.ValidateAPIKey("nota.validkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validSilly, check.Equals, false)
|
||||
require.Error(t, err)
|
||||
assert.False(t, validSilly)
|
||||
|
||||
validWithErr, err := db.ValidateAPIKey("produceerrorkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validWithErr, check.Equals, false)
|
||||
require.Error(t, err)
|
||||
assert.False(t, validWithErr)
|
||||
}
|
||||
|
||||
func (*Suite) TestExpireAPIKey(c *check.C) {
|
||||
func TestExpireAPIKey(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
valid, err := db.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
|
||||
err = db.ExpireAPIKey(apiKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey.Expiration, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, apiKey.Expiration)
|
||||
|
||||
notValid, err := db.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(notValid, check.Equals, false)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, notValid)
|
||||
}
|
||||
|
||||
func TestAPIKeyWithPrefix(t *testing.T) {
|
||||
@@ -232,3 +246,30 @@ func TestAPIKeyWithPrefix(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAPIKeyByID(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create an API key
|
||||
_, apiKey, err := db.CreateAPIKey(nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiKey)
|
||||
|
||||
// Retrieve by ID
|
||||
retrievedKey, err := db.GetAPIKeyByID(apiKey.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, retrievedKey)
|
||||
assert.Equal(t, apiKey.ID, retrievedKey.ID)
|
||||
assert.Equal(t, apiKey.Prefix, retrievedKey.Prefix)
|
||||
}
|
||||
|
||||
func TestGetAPIKeyByIDNotFound(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to get a non-existent key by ID
|
||||
key, err := db.GetAPIKeyByID(99999)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, key)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/go-gormigrate/gormigrate/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/db/sqliteconfig"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -44,29 +45,19 @@ const (
|
||||
contextTimeoutSecs = 10
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
// TODO(kradalby): Is this used for anything?
|
||||
type KV struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
type HSDatabase struct {
|
||||
DB *gorm.DB
|
||||
cfg *types.DatabaseConfig
|
||||
cfg *types.Config
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
|
||||
|
||||
baseDomain string
|
||||
}
|
||||
|
||||
// TODO(kradalby): assemble this struct from toptions or something typed
|
||||
// rather than arguments.
|
||||
// NewHeadscaleDatabase creates a new database connection and runs migrations.
|
||||
// It accepts the full configuration to allow migrations access to policy settings.
|
||||
func NewHeadscaleDatabase(
|
||||
cfg types.DatabaseConfig,
|
||||
baseDomain string,
|
||||
cfg *types.Config,
|
||||
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
||||
) (*HSDatabase, error) {
|
||||
dbConn, err := openDB(cfg)
|
||||
dbConn, err := openDB(cfg.Database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -253,7 +244,7 @@ AND auth_key_id NOT IN (
|
||||
ID: "202507021200",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Only run on SQLite
|
||||
if cfg.Type != types.DatabaseSqlite {
|
||||
if cfg.Database.Type != types.DatabaseSqlite {
|
||||
log.Info().Msg("Skipping schema migration on non-SQLite database")
|
||||
return nil
|
||||
}
|
||||
@@ -592,6 +583,112 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Migrate RequestTags from host_info JSON to tags column.
|
||||
// In 0.27.x, tags from --advertise-tags (ValidTags) were stored only in
|
||||
// host_info.RequestTags, not in the tags column (formerly forced_tags).
|
||||
// This migration validates RequestTags against the policy's tagOwners
|
||||
// and merges validated tags into the tags column.
|
||||
// Fixes: https://github.com/juanfont/headscale/issues/3006
|
||||
ID: "202601121700-migrate-hostinfo-request-tags",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// 1. Load policy from file or database based on configuration
|
||||
policyData, err := PolicyBytes(tx, cfg)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(policyData) == 0 {
|
||||
log.Info().Msg("No policy found, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// 2. Load users and nodes to create PolicyManager
|
||||
users, err := ListUsers(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading users for RequestTags migration: %w", err)
|
||||
}
|
||||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading nodes for RequestTags migration: %w", err)
|
||||
}
|
||||
|
||||
// 3. Create PolicyManager (handles HuJSON parsing, groups, nested tags, etc.)
|
||||
polMan, err := policy.NewPolicyManager(policyData, users, nodes.ViewSlice())
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse policy, skipping RequestTags migration (tags will be validated on node reconnect)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// 4. Process each node
|
||||
for _, node := range nodes {
|
||||
if node.Hostinfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
requestTags := node.Hostinfo.RequestTags
|
||||
if len(requestTags) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
existingTags := node.Tags
|
||||
|
||||
var validatedTags, rejectedTags []string
|
||||
|
||||
nodeView := node.View()
|
||||
|
||||
for _, tag := range requestTags {
|
||||
if polMan.NodeCanHaveTag(nodeView, tag) {
|
||||
if !slices.Contains(existingTags, tag) {
|
||||
validatedTags = append(validatedTags, tag)
|
||||
}
|
||||
} else {
|
||||
rejectedTags = append(rejectedTags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validatedTags) == 0 {
|
||||
if len(rejectedTags) > 0 {
|
||||
log.Debug().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Msg("RequestTags rejected during migration (not authorized)")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
mergedTags := append(existingTags, validatedTags...)
|
||||
slices.Sort(mergedTags)
|
||||
mergedTags = slices.Compact(mergedTags)
|
||||
|
||||
tagsJSON, err := json.Marshal(mergedTags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("serializing merged tags for node %d: %w", node.ID, err)
|
||||
}
|
||||
|
||||
err = tx.Exec("UPDATE nodes SET tags = ? WHERE id = ?", string(tagsJSON), node.ID).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating tags for node %d: %w", node.ID, err)
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("validated_tags", validatedTags).
|
||||
Strs("rejected_tags", rejectedTags).
|
||||
Strs("existing_tags", existingTags).
|
||||
Strs("merged_tags", mergedTags).
|
||||
Msg("Migrated validated RequestTags from host_info to tags column")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -648,7 +745,8 @@ AND auth_key_id NOT IN (
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := runMigrations(cfg, dbConn, migrations); err != nil {
|
||||
err = runMigrations(cfg.Database, dbConn, migrations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("migration failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -656,7 +754,7 @@ AND auth_key_id NOT IN (
|
||||
// This is currently only done on sqlite as squibble does not
|
||||
// support Postgres and we use our sqlite schema as our source of
|
||||
// truth.
|
||||
if cfg.Type == types.DatabaseSqlite {
|
||||
if cfg.Database.Type == types.DatabaseSqlite {
|
||||
sqlConn, err := dbConn.DB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting DB from gorm: %w", err)
|
||||
@@ -688,10 +786,8 @@ AND auth_key_id NOT IN (
|
||||
|
||||
db := HSDatabase{
|
||||
DB: dbConn,
|
||||
cfg: &cfg,
|
||||
cfg: cfg,
|
||||
regCache: regCache,
|
||||
|
||||
baseDomain: baseDomain,
|
||||
}
|
||||
|
||||
return &db, err
|
||||
@@ -934,7 +1030,7 @@ func (hsdb *HSDatabase) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if hsdb.cfg.Type == types.DatabaseSqlite && hsdb.cfg.Sqlite.WriteAheadLog {
|
||||
if hsdb.cfg.Database.Type == types.DatabaseSqlite && hsdb.cfg.Database.Sqlite.WriteAheadLog {
|
||||
db.Exec("VACUUM")
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,83 @@ func TestSQLiteMigrationAndDataValidation(t *testing.T) {
|
||||
}
|
||||
},
|
||||
},
|
||||
// Test for RequestTags migration (202601121700-migrate-hostinfo-request-tags)
|
||||
// and forced_tags->tags rename migration (202511131445-node-forced-tags-to-tags)
|
||||
//
|
||||
// This test validates that:
|
||||
// 1. The forced_tags column is renamed to tags
|
||||
// 2. RequestTags from host_info are validated against policy tagOwners
|
||||
// 3. Authorized tags are migrated to the tags column
|
||||
// 4. Unauthorized tags are rejected
|
||||
// 5. Existing tags are preserved
|
||||
// 6. Group membership is evaluated for tag authorization
|
||||
{
|
||||
dbPath: "testdata/sqlite/request_tags_migration_test.sql",
|
||||
wantFunc: func(t *testing.T, hsdb *HSDatabase) {
|
||||
t.Helper()
|
||||
|
||||
nodes, err := Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
||||
return ListNodes(rx)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 7, "should have all 7 nodes")
|
||||
|
||||
// Helper to find node by hostname
|
||||
findNode := func(hostname string) *types.Node {
|
||||
for _, n := range nodes {
|
||||
if n.Hostname == hostname {
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Node 1: user1 has RequestTags for tag:server (authorized)
|
||||
// Expected: tags = ["tag:server"]
|
||||
node1 := findNode("node1")
|
||||
require.NotNil(t, node1, "node1 should exist")
|
||||
assert.Contains(t, node1.Tags, "tag:server", "node1 should have tag:server migrated from RequestTags")
|
||||
|
||||
// Node 2: user1 has RequestTags for tag:unauthorized (NOT authorized)
|
||||
// Expected: tags = [] (unchanged)
|
||||
node2 := findNode("node2")
|
||||
require.NotNil(t, node2, "node2 should exist")
|
||||
assert.Empty(t, node2.Tags, "node2 should have empty tags (unauthorized tag rejected)")
|
||||
|
||||
// Node 3: user2 has RequestTags for tag:client (authorized) + existing tag:existing
|
||||
// Expected: tags = ["tag:client", "tag:existing"]
|
||||
node3 := findNode("node3")
|
||||
require.NotNil(t, node3, "node3 should exist")
|
||||
assert.Contains(t, node3.Tags, "tag:client", "node3 should have tag:client migrated from RequestTags")
|
||||
assert.Contains(t, node3.Tags, "tag:existing", "node3 should preserve existing tag")
|
||||
|
||||
// Node 4: user1 has RequestTags for tag:server which already exists
|
||||
// Expected: tags = ["tag:server"] (no duplicates)
|
||||
node4 := findNode("node4")
|
||||
require.NotNil(t, node4, "node4 should exist")
|
||||
assert.Equal(t, []string{"tag:server"}, node4.Tags, "node4 should have tag:server without duplicates")
|
||||
|
||||
// Node 5: user2 has no RequestTags
|
||||
// Expected: tags = [] (unchanged)
|
||||
node5 := findNode("node5")
|
||||
require.NotNil(t, node5, "node5 should exist")
|
||||
assert.Empty(t, node5.Tags, "node5 should have empty tags (no RequestTags)")
|
||||
|
||||
// Node 6: admin1 has RequestTags for tag:admin (authorized via group:admins)
|
||||
// Expected: tags = ["tag:admin"]
|
||||
node6 := findNode("node6")
|
||||
require.NotNil(t, node6, "node6 should exist")
|
||||
assert.Contains(t, node6.Tags, "tag:admin", "node6 should have tag:admin migrated via group membership")
|
||||
|
||||
// Node 7: user1 has RequestTags for tag:server (authorized) and tag:forbidden (unauthorized)
|
||||
// Expected: tags = ["tag:server"] (only authorized tag)
|
||||
node7 := findNode("node7")
|
||||
require.NotNil(t, node7, "node7 should exist")
|
||||
assert.Contains(t, node7.Tags, "tag:server", "node7 should have tag:server migrated")
|
||||
assert.NotContains(t, node7.Tags, "tag:forbidden", "node7 should NOT have tag:forbidden (unauthorized)")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -255,7 +332,7 @@ func TestPostgresMigrationAndDataValidation(t *testing.T) {
|
||||
t.Fatalf("failed to restore postgres database: %s", err)
|
||||
}
|
||||
|
||||
db = newHeadscaleDBFromPostgresURL(t, u)
|
||||
db := newHeadscaleDBFromPostgresURL(t, u)
|
||||
|
||||
if tt.wantFunc != nil {
|
||||
tt.wantFunc(t, db)
|
||||
@@ -288,13 +365,17 @@ func dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase {
|
||||
}
|
||||
|
||||
db, err := NewHeadscaleDatabase(
|
||||
types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: dbPath,
|
||||
&types.Config{
|
||||
Database: types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: dbPath,
|
||||
},
|
||||
},
|
||||
Policy: types.PolicyConfig{
|
||||
Mode: types.PolicyModeDB,
|
||||
},
|
||||
},
|
||||
"",
|
||||
emptyCache(),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -343,13 +424,17 @@ func TestSQLiteAllTestdataMigrations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = NewHeadscaleDatabase(
|
||||
types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: dbPath,
|
||||
&types.Config{
|
||||
Database: types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: dbPath,
|
||||
},
|
||||
},
|
||||
Policy: types.PolicyConfig{
|
||||
Mode: types.PolicyModeDB,
|
||||
},
|
||||
},
|
||||
"",
|
||||
emptyCache(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/check.v1"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -26,70 +25,85 @@ import (
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func (s *Suite) TestGetNode(c *check.C) {
|
||||
func TestGetNode(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user := db.CreateUserForTest("test")
|
||||
|
||||
_, err := db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
require.Error(t, err)
|
||||
|
||||
node := db.CreateNodeForTest(user, "testnode")
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(node.Hostname, check.Equals, "testnode")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testnode", node.Hostname)
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetNodeByID(c *check.C) {
|
||||
func TestGetNodeByID(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user := db.CreateUserForTest("test")
|
||||
|
||||
_, err := db.GetNodeByID(0)
|
||||
c.Assert(err, check.NotNil)
|
||||
_, err = db.GetNodeByID(0)
|
||||
require.Error(t, err)
|
||||
|
||||
node := db.CreateNodeForTest(user, "testnode")
|
||||
|
||||
retrievedNode, err := db.GetNodeByID(node.ID)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(retrievedNode.Hostname, check.Equals, "testnode")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testnode", retrievedNode.Hostname)
|
||||
}
|
||||
|
||||
func (s *Suite) TestHardDeleteNode(c *check.C) {
|
||||
func TestHardDeleteNode(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user := db.CreateUserForTest("test")
|
||||
node := db.CreateNodeForTest(user, "testnode3")
|
||||
|
||||
err := db.DeleteNode(node)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = db.DeleteNode(node)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode3")
|
||||
c.Assert(err, check.NotNil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func (s *Suite) TestListPeers(c *check.C) {
|
||||
func TestListPeersManyNodes(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user := db.CreateUserForTest("test")
|
||||
|
||||
_, err := db.GetNodeByID(0)
|
||||
c.Assert(err, check.NotNil)
|
||||
_, err = db.GetNodeByID(0)
|
||||
require.Error(t, err)
|
||||
|
||||
nodes := db.CreateNodesForTest(user, 11, "testnode")
|
||||
|
||||
firstNode := nodes[0]
|
||||
peersOfFirstNode, err := db.ListPeers(firstNode.ID)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.Assert(len(peersOfFirstNode), check.Equals, 10)
|
||||
c.Assert(peersOfFirstNode[0].Hostname, check.Equals, "testnode-1")
|
||||
c.Assert(peersOfFirstNode[5].Hostname, check.Equals, "testnode-6")
|
||||
c.Assert(peersOfFirstNode[9].Hostname, check.Equals, "testnode-10")
|
||||
assert.Len(t, peersOfFirstNode, 10)
|
||||
assert.Equal(t, "testnode-1", peersOfFirstNode[0].Hostname)
|
||||
assert.Equal(t, "testnode-6", peersOfFirstNode[5].Hostname)
|
||||
assert.Equal(t, "testnode-10", peersOfFirstNode[9].Hostname)
|
||||
}
|
||||
|
||||
func (s *Suite) TestExpireNode(c *check.C) {
|
||||
func TestExpireNode(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
require.Error(t, err)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
@@ -107,30 +121,33 @@ func (s *Suite) TestExpireNode(c *check.C) {
|
||||
db.DB.Save(node)
|
||||
|
||||
nodeFromDB, err := db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nodeFromDB, check.NotNil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, nodeFromDB)
|
||||
|
||||
c.Assert(nodeFromDB.IsExpired(), check.Equals, false)
|
||||
assert.False(t, nodeFromDB.IsExpired())
|
||||
|
||||
now := time.Now()
|
||||
err = db.NodeSetExpiry(nodeFromDB.ID, now)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeFromDB, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.Assert(nodeFromDB.IsExpired(), check.Equals, true)
|
||||
assert.True(t, nodeFromDB.IsExpired())
|
||||
}
|
||||
|
||||
func (s *Suite) TestSetTags(c *check.C) {
|
||||
func TestSetTags(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
require.Error(t, err)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
@@ -146,27 +163,23 @@ func (s *Suite) TestSetTags(c *check.C) {
|
||||
}
|
||||
|
||||
trx := db.DB.Save(node)
|
||||
c.Assert(trx.Error, check.IsNil)
|
||||
require.NoError(t, trx.Error)
|
||||
|
||||
// assign simple tags
|
||||
sTags := []string{"tag:test", "tag:foo"}
|
||||
err = db.SetTags(node.ID, sTags)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
node, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(node.Tags, check.DeepEquals, sTags)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, sTags, node.Tags)
|
||||
|
||||
// assign duplicate tags, expect no errors but no doubles in DB
|
||||
eTags := []string{"tag:bar", "tag:test", "tag:unknown", "tag:test"}
|
||||
err = db.SetTags(node.ID, eTags)
|
||||
c.Assert(err, check.IsNil)
|
||||
require.NoError(t, err)
|
||||
node, err = db.getNode(types.UserID(user.ID), "testnode")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(
|
||||
node.Tags,
|
||||
check.DeepEquals,
|
||||
[]string{"tag:bar", "tag:test", "tag:unknown"},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"tag:bar", "tag:test", "tag:unknown"}, node.Tags)
|
||||
}
|
||||
|
||||
func TestHeadscale_generateGivenName(t *testing.T) {
|
||||
|
||||
@@ -2,8 +2,10 @@ package db
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
@@ -24,14 +26,22 @@ func (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) {
|
||||
|
||||
// GetPolicy returns the latest policy in the database.
|
||||
func (hsdb *HSDatabase) GetPolicy() (*types.Policy, error) {
|
||||
return GetPolicy(hsdb.DB)
|
||||
}
|
||||
|
||||
// GetPolicy returns the latest policy from the database.
|
||||
// This standalone function can be used in contexts where HSDatabase is not available,
|
||||
// such as during migrations.
|
||||
func GetPolicy(tx *gorm.DB) (*types.Policy, error) {
|
||||
var p types.Policy
|
||||
|
||||
// Query:
|
||||
// SELECT * FROM policies ORDER BY id DESC LIMIT 1;
|
||||
if err := hsdb.DB.
|
||||
err := tx.
|
||||
Order("id DESC").
|
||||
Limit(1).
|
||||
First(&p).Error; err != nil {
|
||||
First(&p).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, types.ErrPolicyNotFound
|
||||
}
|
||||
@@ -41,3 +51,41 @@ func (hsdb *HSDatabase) GetPolicy() (*types.Policy, error) {
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// PolicyBytes loads policy configuration from file or database based on the configured mode.
|
||||
// Returns nil if no policy is configured, which is valid.
|
||||
// This standalone function can be used in contexts where HSDatabase is not available,
|
||||
// such as during migrations.
|
||||
func PolicyBytes(tx *gorm.DB, cfg *types.Config) ([]byte, error) {
|
||||
switch cfg.Policy.Mode {
|
||||
case types.PolicyModeFile:
|
||||
path := cfg.Policy.Path
|
||||
|
||||
// It is fine to start headscale without a policy file.
|
||||
if len(path) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
absPath := util.AbsolutePathFromConfigPath(path)
|
||||
|
||||
return os.ReadFile(absPath)
|
||||
|
||||
case types.PolicyModeDB:
|
||||
p, err := GetPolicy(tx)
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrPolicyNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.Data == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return []byte(p.Data), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -57,10 +57,6 @@ func CreatePreAuthKey(
|
||||
return nil, ErrPreAuthKeyNotTaggedOrOwned
|
||||
}
|
||||
|
||||
// If uid != nil && len(aclTags) > 0:
|
||||
// Both are allowed: UserID tracks "created by", tags define node ownership
|
||||
// This is valid per the new model
|
||||
|
||||
var (
|
||||
user *types.User
|
||||
userID *uint
|
||||
@@ -158,22 +154,17 @@ func CreatePreAuthKey(
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) ListPreAuthKeys(uid types.UserID) ([]types.PreAuthKey, error) {
|
||||
func (hsdb *HSDatabase) ListPreAuthKeys() ([]types.PreAuthKey, error) {
|
||||
return Read(hsdb.DB, func(rx *gorm.DB) ([]types.PreAuthKey, error) {
|
||||
return ListPreAuthKeysByUser(rx, uid)
|
||||
return ListPreAuthKeys(rx)
|
||||
})
|
||||
}
|
||||
|
||||
// ListPreAuthKeysByUser returns the list of PreAuthKeys for a user.
|
||||
func ListPreAuthKeysByUser(tx *gorm.DB, uid types.UserID) ([]types.PreAuthKey, error) {
|
||||
user, err := GetUserByID(tx, uid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ListPreAuthKeys returns all PreAuthKeys in the database.
|
||||
func ListPreAuthKeys(tx *gorm.DB) ([]types.PreAuthKey, error) {
|
||||
var keys []types.PreAuthKey
|
||||
|
||||
keys := []types.PreAuthKey{}
|
||||
|
||||
err = tx.Preload("User").Where(&types.PreAuthKey{UserID: &user.ID}).Find(&keys).Error
|
||||
err := tx.Preload("User").Find(&keys).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -298,34 +289,35 @@ func GetPreAuthKey(tx *gorm.DB, key string) (*types.PreAuthKey, error) {
|
||||
// DestroyPreAuthKey destroys a preauthkey. Returns error if the PreAuthKey
|
||||
// does not exist. This also clears the auth_key_id on any nodes that reference
|
||||
// this key.
|
||||
func DestroyPreAuthKey(tx *gorm.DB, pak types.PreAuthKey) error {
|
||||
func DestroyPreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
return tx.Transaction(func(db *gorm.DB) error {
|
||||
// First, clear the foreign key reference on any nodes using this key
|
||||
err := db.Model(&types.Node{}).
|
||||
Where("auth_key_id = ?", pak.ID).
|
||||
Where("auth_key_id = ?", id).
|
||||
Update("auth_key_id", nil).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear auth_key_id on nodes: %w", err)
|
||||
}
|
||||
|
||||
// Then delete the pre-auth key
|
||||
if result := db.Unscoped().Delete(pak); result.Error != nil {
|
||||
return result.Error
|
||||
err = tx.Unscoped().Delete(&types.PreAuthKey{}, id).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error {
|
||||
func (hsdb *HSDatabase) ExpirePreAuthKey(id uint64) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return ExpirePreAuthKey(tx, k)
|
||||
return ExpirePreAuthKey(tx, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) DeletePreAuthKey(k *types.PreAuthKey) error {
|
||||
func (hsdb *HSDatabase) DeletePreAuthKey(id uint64) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return DestroyPreAuthKey(tx, *k)
|
||||
return DestroyPreAuthKey(tx, id)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -341,7 +333,7 @@ func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
}
|
||||
|
||||
// MarkExpirePreAuthKey marks a PreAuthKey as expired.
|
||||
func ExpirePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
func ExpirePreAuthKey(tx *gorm.DB, id uint64) error {
|
||||
now := time.Now()
|
||||
return tx.Model(&types.PreAuthKey{}).Where("id = ?", k.ID).Update("expiration", now).Error
|
||||
return tx.Model(&types.PreAuthKey{}).Where("id = ?", id).Update("expiration", now).Error
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestCreatePreAuthKey(t *testing.T) {
|
||||
assert.NotEmpty(t, key.Key)
|
||||
|
||||
// List keys for the user
|
||||
keys, err := db.ListPreAuthKeys(types.UserID(user.ID))
|
||||
keys, err := db.ListPreAuthKeys()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, keys, 1)
|
||||
|
||||
@@ -49,15 +49,6 @@ func TestCreatePreAuthKey(t *testing.T) {
|
||||
assert.Equal(t, user.ID, keys[0].User.ID)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error_list_invalid_user_id",
|
||||
test: func(t *testing.T, db *HSDatabase) {
|
||||
t.Helper()
|
||||
|
||||
_, err := db.ListPreAuthKeys(1000000)
|
||||
assert.Error(t, err)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -101,7 +92,7 @@ func TestPreAuthKeyACLTags(t *testing.T) {
|
||||
_, err = db.CreatePreAuthKey(user.TypedID(), false, false, nil, tagsWithDuplicate)
|
||||
require.NoError(t, err)
|
||||
|
||||
listedPaks, err := db.ListPreAuthKeys(types.UserID(user.ID))
|
||||
listedPaks, err := db.ListPreAuthKeys()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listedPaks, 1)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
ErrInvalidAutoVacuum = errors.New("invalid auto_vacuum")
|
||||
ErrWALAutocheckpoint = errors.New("wal_autocheckpoint must be >= -1")
|
||||
ErrInvalidSynchronous = errors.New("invalid synchronous")
|
||||
ErrInvalidTxLock = errors.New("invalid txlock")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -225,6 +226,62 @@ func (s Synchronous) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// TxLock represents SQLite transaction lock mode.
|
||||
// Transaction lock mode determines when write locks are acquired during transactions.
|
||||
//
|
||||
// Lock Acquisition Behavior:
|
||||
//
|
||||
// DEFERRED - SQLite default, acquire lock lazily:
|
||||
// - Transaction starts without any lock
|
||||
// - First read acquires SHARED lock
|
||||
// - First write attempts to upgrade to RESERVED lock
|
||||
// - If another transaction holds RESERVED: SQLITE_BUSY (potential deadlock)
|
||||
// - Can cause deadlocks when multiple connections attempt concurrent writes
|
||||
//
|
||||
// IMMEDIATE - Recommended for write-heavy workloads:
|
||||
// - Transaction immediately acquires RESERVED lock at BEGIN
|
||||
// - If lock unavailable, waits up to busy_timeout before failing
|
||||
// - Other writers queue orderly instead of deadlocking
|
||||
// - Prevents the upgrade-lock deadlock scenario
|
||||
// - Slight overhead for read-only transactions that don't need locks
|
||||
//
|
||||
// EXCLUSIVE - Maximum isolation:
|
||||
// - Transaction immediately acquires EXCLUSIVE lock at BEGIN
|
||||
// - No other connections can read or write
|
||||
// - Highest isolation but lowest concurrency
|
||||
// - Rarely needed in practice
|
||||
type TxLock string
|
||||
|
||||
const (
|
||||
// TxLockDeferred acquires locks lazily (SQLite default).
|
||||
// Risk of SQLITE_BUSY deadlocks with concurrent writers. Use for read-heavy workloads.
|
||||
TxLockDeferred TxLock = "deferred"
|
||||
|
||||
// TxLockImmediate acquires write lock immediately (RECOMMENDED for production).
|
||||
// Prevents deadlocks by acquiring RESERVED lock at transaction start.
|
||||
// Writers queue orderly, respecting busy_timeout.
|
||||
TxLockImmediate TxLock = "immediate"
|
||||
|
||||
// TxLockExclusive acquires exclusive lock immediately.
|
||||
// Maximum isolation, no concurrent reads or writes. Rarely needed.
|
||||
TxLockExclusive TxLock = "exclusive"
|
||||
)
|
||||
|
||||
// IsValid returns true if the TxLock is valid.
|
||||
func (t TxLock) IsValid() bool {
|
||||
switch t {
|
||||
case TxLockDeferred, TxLockImmediate, TxLockExclusive, "":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
func (t TxLock) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
// Config holds SQLite database configuration with type-safe enums.
|
||||
// This configuration balances performance, durability, and operational requirements
|
||||
// for Headscale's SQLite database usage patterns.
|
||||
@@ -236,6 +293,7 @@ type Config struct {
|
||||
WALAutocheckpoint int // pages (-1 = default/not set, 0 = disabled, >0 = enabled)
|
||||
Synchronous Synchronous // synchronous mode (affects durability vs performance)
|
||||
ForeignKeys bool // enable foreign key constraints (data integrity)
|
||||
TxLock TxLock // transaction lock mode (affects write concurrency)
|
||||
}
|
||||
|
||||
// Default returns the production configuration optimized for Headscale's usage patterns.
|
||||
@@ -244,6 +302,7 @@ type Config struct {
|
||||
// - Data durability with good performance (NORMAL synchronous)
|
||||
// - Automatic space management (INCREMENTAL auto-vacuum)
|
||||
// - Data integrity (foreign key constraints enabled)
|
||||
// - Safe concurrent writes (IMMEDIATE transaction lock)
|
||||
// - Reasonable timeout for busy database scenarios (10s)
|
||||
func Default(path string) *Config {
|
||||
return &Config{
|
||||
@@ -254,6 +313,7 @@ func Default(path string) *Config {
|
||||
WALAutocheckpoint: 1000,
|
||||
Synchronous: SynchronousNormal,
|
||||
ForeignKeys: true,
|
||||
TxLock: TxLockImmediate,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,6 +352,10 @@ func (c *Config) Validate() error {
|
||||
return fmt.Errorf("%w: %s", ErrInvalidSynchronous, c.Synchronous)
|
||||
}
|
||||
|
||||
if c.TxLock != "" && !c.TxLock.IsValid() {
|
||||
return fmt.Errorf("%w: %s", ErrInvalidTxLock, c.TxLock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -332,12 +396,20 @@ func (c *Config) ToURL() (string, error) {
|
||||
baseURL = "file:" + c.Path
|
||||
}
|
||||
|
||||
// Add parameters without encoding = signs
|
||||
if len(pragmas) > 0 {
|
||||
var queryParts []string
|
||||
for _, pragma := range pragmas {
|
||||
queryParts = append(queryParts, "_pragma="+pragma)
|
||||
}
|
||||
// Build query parameters
|
||||
queryParts := make([]string, 0, 1+len(pragmas))
|
||||
|
||||
// Add _txlock first (it's a connection parameter, not a pragma)
|
||||
if c.TxLock != "" {
|
||||
queryParts = append(queryParts, "_txlock="+string(c.TxLock))
|
||||
}
|
||||
|
||||
// Add pragma parameters
|
||||
for _, pragma := range pragmas {
|
||||
queryParts = append(queryParts, "_pragma="+pragma)
|
||||
}
|
||||
|
||||
if len(queryParts) > 0 {
|
||||
baseURL += "?" + strings.Join(queryParts, "&")
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +71,52 @@ func TestSynchronous(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxLock(t *testing.T) {
|
||||
tests := []struct {
|
||||
mode TxLock
|
||||
valid bool
|
||||
}{
|
||||
{TxLockDeferred, true},
|
||||
{TxLockImmediate, true},
|
||||
{TxLockExclusive, true},
|
||||
{TxLock(""), true}, // empty is valid (uses driver default)
|
||||
{TxLock("IMMEDIATE"), false}, // uppercase is invalid
|
||||
{TxLock("INVALID"), false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
name := string(tt.mode)
|
||||
if name == "" {
|
||||
name = "empty"
|
||||
}
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if got := tt.mode.IsValid(); got != tt.valid {
|
||||
t.Errorf("TxLock(%q).IsValid() = %v, want %v", tt.mode, got, tt.valid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxLockString(t *testing.T) {
|
||||
tests := []struct {
|
||||
mode TxLock
|
||||
want string
|
||||
}{
|
||||
{TxLockDeferred, "deferred"},
|
||||
{TxLockImmediate, "immediate"},
|
||||
{TxLockExclusive, "exclusive"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.want, func(t *testing.T) {
|
||||
if got := tt.mode.String(); got != tt.want {
|
||||
t.Errorf("TxLock.String() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -104,6 +150,21 @@ func TestConfigValidate(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid txlock",
|
||||
config: &Config{
|
||||
Path: "/path/to/db.sqlite",
|
||||
TxLock: TxLock("INVALID"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid txlock immediate",
|
||||
config: &Config{
|
||||
Path: "/path/to/db.sqlite",
|
||||
TxLock: TxLockImmediate,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -123,9 +184,9 @@ func TestConfigToURL(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "default config",
|
||||
name: "default config includes txlock immediate",
|
||||
config: Default("/path/to/db.sqlite"),
|
||||
want: "file:/path/to/db.sqlite?_pragma=busy_timeout=10000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=INCREMENTAL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=NORMAL&_pragma=foreign_keys=ON",
|
||||
want: "file:/path/to/db.sqlite?_txlock=immediate&_pragma=busy_timeout=10000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=INCREMENTAL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=NORMAL&_pragma=foreign_keys=ON",
|
||||
},
|
||||
{
|
||||
name: "memory config",
|
||||
@@ -183,6 +244,47 @@ func TestConfigToURL(t *testing.T) {
|
||||
},
|
||||
want: "file:/full.db?_pragma=busy_timeout=15000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=FULL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=EXTRA&_pragma=foreign_keys=ON",
|
||||
},
|
||||
{
|
||||
name: "with txlock immediate",
|
||||
config: &Config{
|
||||
Path: "/test.db",
|
||||
BusyTimeout: 5000,
|
||||
TxLock: TxLockImmediate,
|
||||
WALAutocheckpoint: -1,
|
||||
ForeignKeys: true,
|
||||
},
|
||||
want: "file:/test.db?_txlock=immediate&_pragma=busy_timeout=5000&_pragma=foreign_keys=ON",
|
||||
},
|
||||
{
|
||||
name: "with txlock deferred",
|
||||
config: &Config{
|
||||
Path: "/test.db",
|
||||
TxLock: TxLockDeferred,
|
||||
WALAutocheckpoint: -1,
|
||||
ForeignKeys: true,
|
||||
},
|
||||
want: "file:/test.db?_txlock=deferred&_pragma=foreign_keys=ON",
|
||||
},
|
||||
{
|
||||
name: "with txlock exclusive",
|
||||
config: &Config{
|
||||
Path: "/test.db",
|
||||
TxLock: TxLockExclusive,
|
||||
WALAutocheckpoint: -1,
|
||||
},
|
||||
want: "file:/test.db?_txlock=exclusive",
|
||||
},
|
||||
{
|
||||
name: "empty txlock omitted from URL",
|
||||
config: &Config{
|
||||
Path: "/test.db",
|
||||
TxLock: "",
|
||||
BusyTimeout: 1000,
|
||||
WALAutocheckpoint: -1,
|
||||
ForeignKeys: true,
|
||||
},
|
||||
want: "file:/test.db?_pragma=busy_timeout=1000&_pragma=foreign_keys=ON",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -209,3 +311,10 @@ func TestConfigToURLInvalid(t *testing.T) {
|
||||
t.Error("Config.ToURL() with invalid config should return error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultConfigHasTxLockImmediate(t *testing.T) {
|
||||
config := Default("/test.db")
|
||||
if config.TxLock != TxLockImmediate {
|
||||
t.Errorf("Default().TxLock = %q, want %q", config.TxLock, TxLockImmediate)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,48 +10,11 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
"gopkg.in/check.v1"
|
||||
"zombiezen.com/go/postgrestest"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
var (
|
||||
tmpDir string
|
||||
db *HSDatabase
|
||||
)
|
||||
|
||||
func (s *Suite) SetUpTest(c *check.C) {
|
||||
s.ResetDB(c)
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownTest(c *check.C) {
|
||||
// os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
func (s *Suite) ResetDB(c *check.C) {
|
||||
// if len(tmpDir) != 0 {
|
||||
// os.RemoveAll(tmpDir)
|
||||
// }
|
||||
|
||||
var err error
|
||||
db, err = newSQLiteTestDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): make this a t.Helper when we dont depend
|
||||
// on check test framework.
|
||||
func newSQLiteTestDB() (*HSDatabase, error) {
|
||||
var err error
|
||||
tmpDir, err = os.MkdirTemp("", "headscale-db-test-*")
|
||||
tmpDir, err := os.MkdirTemp("", "headscale-db-test-*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -59,14 +22,18 @@ func newSQLiteTestDB() (*HSDatabase, error) {
|
||||
log.Printf("database path: %s", tmpDir+"/headscale_test.db")
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
|
||||
db, err = NewHeadscaleDatabase(
|
||||
types.DatabaseConfig{
|
||||
Type: types.DatabaseSqlite,
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: tmpDir + "/headscale_test.db",
|
||||
db, err := NewHeadscaleDatabase(
|
||||
&types.Config{
|
||||
Database: types.DatabaseConfig{
|
||||
Type: types.DatabaseSqlite,
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: tmpDir + "/headscale_test.db",
|
||||
},
|
||||
},
|
||||
Policy: types.PolicyConfig{
|
||||
Mode: types.PolicyModeDB,
|
||||
},
|
||||
},
|
||||
"",
|
||||
emptyCache(),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -109,18 +76,22 @@ func newHeadscaleDBFromPostgresURL(t *testing.T, pu *url.URL) *HSDatabase {
|
||||
port, _ := strconv.Atoi(pu.Port())
|
||||
|
||||
db, err := NewHeadscaleDatabase(
|
||||
types.DatabaseConfig{
|
||||
Type: types.DatabasePostgres,
|
||||
Postgres: types.PostgresConfig{
|
||||
Host: pu.Hostname(),
|
||||
User: pu.User.Username(),
|
||||
Name: strings.TrimLeft(pu.Path, "/"),
|
||||
Pass: pass,
|
||||
Port: port,
|
||||
Ssl: "disable",
|
||||
&types.Config{
|
||||
Database: types.DatabaseConfig{
|
||||
Type: types.DatabasePostgres,
|
||||
Postgres: types.PostgresConfig{
|
||||
Host: pu.Hostname(),
|
||||
User: pu.User.Username(),
|
||||
Name: strings.TrimLeft(pu.Path, "/"),
|
||||
Pass: pass,
|
||||
Port: port,
|
||||
Ssl: "disable",
|
||||
},
|
||||
},
|
||||
Policy: types.PolicyConfig{
|
||||
Mode: types.PolicyModeDB,
|
||||
},
|
||||
},
|
||||
"",
|
||||
emptyCache(),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
119
hscontrol/db/testdata/sqlite/request_tags_migration_test.sql
vendored
Normal file
119
hscontrol/db/testdata/sqlite/request_tags_migration_test.sql
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
-- Test SQL dump for RequestTags migration (202601121700-migrate-hostinfo-request-tags)
|
||||
-- and forced_tags->tags rename migration (202511131445-node-forced-tags-to-tags)
|
||||
--
|
||||
-- This dump simulates a 0.27.x database where:
|
||||
-- - Tags from --advertise-tags were stored only in host_info.RequestTags
|
||||
-- - The tags column is still named forced_tags
|
||||
--
|
||||
-- Test scenarios:
|
||||
-- 1. Node with RequestTags that user is authorized for (should be migrated)
|
||||
-- 2. Node with RequestTags that user is NOT authorized for (should be rejected)
|
||||
-- 3. Node with existing forced_tags that should be preserved
|
||||
-- 4. Node with RequestTags that overlap with existing tags (no duplicates)
|
||||
-- 5. Node without RequestTags (should be unchanged)
|
||||
-- 6. Node with RequestTags via group membership (should be migrated)
|
||||
|
||||
PRAGMA foreign_keys=OFF;
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
-- Migrations table - includes all migrations BEFORE the two tag migrations
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
INSERT INTO migrations VALUES('202312101416');
|
||||
INSERT INTO migrations VALUES('202312101430');
|
||||
INSERT INTO migrations VALUES('202402151347');
|
||||
INSERT INTO migrations VALUES('2024041121742');
|
||||
INSERT INTO migrations VALUES('202406021630');
|
||||
INSERT INTO migrations VALUES('202409271400');
|
||||
INSERT INTO migrations VALUES('202407191627');
|
||||
INSERT INTO migrations VALUES('202408181235');
|
||||
INSERT INTO migrations VALUES('202501221827');
|
||||
INSERT INTO migrations VALUES('202501311657');
|
||||
INSERT INTO migrations VALUES('202502070949');
|
||||
INSERT INTO migrations VALUES('202502131714');
|
||||
INSERT INTO migrations VALUES('202502171819');
|
||||
INSERT INTO migrations VALUES('202505091439');
|
||||
INSERT INTO migrations VALUES('202505141324');
|
||||
INSERT INTO migrations VALUES('202507021200');
|
||||
INSERT INTO migrations VALUES('202510311551');
|
||||
INSERT INTO migrations VALUES('202511101554-drop-old-idx');
|
||||
INSERT INTO migrations VALUES('202511011637-preauthkey-bcrypt');
|
||||
INSERT INTO migrations VALUES('202511122344-remove-newline-index');
|
||||
-- Note: 202511131445-node-forced-tags-to-tags is NOT included - it will run
|
||||
-- Note: 202601121700-migrate-hostinfo-request-tags is NOT included - it will run
|
||||
|
||||
-- Users table
|
||||
-- Note: User names must match the usernames in the policy (with @)
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
INSERT INTO users VALUES(1,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'user1@example.com','User One','user1@example.com',NULL,NULL,NULL);
|
||||
INSERT INTO users VALUES(2,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'user2@example.com','User Two','user2@example.com',NULL,NULL,NULL);
|
||||
INSERT INTO users VALUES(3,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'admin1@example.com','Admin One','admin1@example.com',NULL,NULL,NULL);
|
||||
|
||||
-- Pre-auth keys table
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,`prefix` text,`hash` blob,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
|
||||
-- API keys table
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
|
||||
-- Nodes table - using OLD schema with forced_tags (not tags)
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
|
||||
-- Node 1: user1 owns it, has RequestTags for tag:server (user1 is authorized for this tag)
|
||||
-- Expected: tag:server should be added to tags
|
||||
INSERT INTO nodes VALUES(1,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e01','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605501','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57701','[]','{"RequestTags":["tag:server"]}','100.64.0.1','fd7a:115c:a1e0::1','node1','node1',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 2: user1 owns it, has RequestTags for tag:unauthorized (user1 is NOT authorized for this tag)
|
||||
-- Expected: tag:unauthorized should be rejected, tags stays empty
|
||||
INSERT INTO nodes VALUES(2,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e02','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605502','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57702','[]','{"RequestTags":["tag:unauthorized"]}','100.64.0.2','fd7a:115c:a1e0::2','node2','node2',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 3: user2 owns it, has RequestTags for tag:client (user2 is authorized)
|
||||
-- Also has existing forced_tags that should be preserved
|
||||
-- Expected: tag:client added, tag:existing preserved
|
||||
INSERT INTO nodes VALUES(3,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e03','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605503','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57703','[]','{"RequestTags":["tag:client"]}','100.64.0.3','fd7a:115c:a1e0::3','node3','node3',2,'oidc','["tag:existing"]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 4: user1 owns it, has RequestTags for tag:server which already exists in forced_tags
|
||||
-- Expected: no duplicates, tags should be ["tag:server"]
|
||||
INSERT INTO nodes VALUES(4,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e04','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605504','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57704','[]','{"RequestTags":["tag:server"]}','100.64.0.4','fd7a:115c:a1e0::4','node4','node4',1,'oidc','["tag:server"]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 5: user2 owns it, no RequestTags in host_info
|
||||
-- Expected: tags unchanged (empty)
|
||||
INSERT INTO nodes VALUES(5,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e05','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605505','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57705','[]','{}','100.64.0.5','fd7a:115c:a1e0::5','node5','node5',2,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 6: admin1 owns it, has RequestTags for tag:admin (admin1 is in group:admins which owns tag:admin)
|
||||
-- Expected: tag:admin should be added via group membership
|
||||
INSERT INTO nodes VALUES(6,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e06','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605506','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57706','[]','{"RequestTags":["tag:admin"]}','100.64.0.6','fd7a:115c:a1e0::6','node6','node6',3,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Node 7: user1 owns it, has multiple RequestTags (tag:server authorized, tag:forbidden not authorized)
|
||||
-- Expected: tag:server added, tag:forbidden rejected
|
||||
INSERT INTO nodes VALUES(7,'mkey:a0ab77456320823945ae0331823e3c0d516fae9585bd42698dfa1ac3d7679e07','nodekey:7c84167ab68f494942de14deb83587fd841843de2bac105b6c670048c1605507','discokey:53075b3c6cad3b62a2a29caea61beeb93f66b8c75cb89dac465236a5bbf57707','[]','{"RequestTags":["tag:server","tag:forbidden"]}','100.64.0.7','fd7a:115c:a1e0::7','node7','node7',1,'oidc','[]',NULL,'0001-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00','[]','2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL);
|
||||
|
||||
-- Policies table with tagOwners defining who can use which tags
|
||||
-- Note: Usernames in policy must contain @ (e.g., user1@example.com or just user1@)
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
INSERT INTO policies VALUES(1,'2024-01-01 00:00:00+00:00','2024-01-01 00:00:00+00:00',NULL,'{
|
||||
"groups": {
|
||||
"group:admins": ["admin1@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@example.com"],
|
||||
"tag:client": ["user1@example.com", "user2@example.com"],
|
||||
"tag:admin": ["group:admins"]
|
||||
},
|
||||
"acls": [
|
||||
{"action": "accept", "src": ["*"], "dst": ["*:*"]}
|
||||
]
|
||||
}');
|
||||
|
||||
-- Indexes (using exact format expected by schema validation)
|
||||
DELETE FROM sqlite_sequence;
|
||||
INSERT INTO sqlite_sequence VALUES('users',3);
|
||||
INSERT INTO sqlite_sequence VALUES('nodes',7);
|
||||
INSERT INTO sqlite_sequence VALUES('policies',1);
|
||||
CREATE INDEX idx_users_deleted_at ON users(deleted_at);
|
||||
CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix);
|
||||
CREATE INDEX idx_policies_deleted_at ON policies(deleted_at);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL;
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != '';
|
||||
|
||||
COMMIT;
|
||||
@@ -58,12 +58,12 @@ func DestroyUser(tx *gorm.DB, uid types.UserID) error {
|
||||
return ErrUserStillHasNodes
|
||||
}
|
||||
|
||||
keys, err := ListPreAuthKeysByUser(tx, uid)
|
||||
keys, err := ListPreAuthKeys(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, key := range keys {
|
||||
err = DestroyPreAuthKey(tx, key)
|
||||
err = DestroyPreAuthKey(tx, key.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derpserver"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/net/wsconn"
|
||||
@@ -45,7 +46,7 @@ type DERPServer struct {
|
||||
serverURL string
|
||||
key key.NodePrivate
|
||||
cfg *types.DERPConfig
|
||||
tailscaleDERP *derp.Server
|
||||
tailscaleDERP *derpserver.Server
|
||||
}
|
||||
|
||||
func NewDERPServer(
|
||||
@@ -54,7 +55,7 @@ func NewDERPServer(
|
||||
cfg *types.DERPConfig,
|
||||
) (*DERPServer, error) {
|
||||
log.Trace().Caller().Msg("Creating new embedded DERP server")
|
||||
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
|
||||
server := derpserver.New(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
|
||||
|
||||
if cfg.ServerVerifyClients {
|
||||
server.SetVerifyClientURL(DerpVerifyScheme + "://verify")
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
)
|
||||
|
||||
@@ -99,13 +98,13 @@ func (api headscaleV1APIServer) DeleteUser(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.state.DeleteUser(types.UserID(user.ID))
|
||||
policyChanged, err := api.h.state.DeleteUser(types.UserID(user.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// User deletion may affect policy, trigger a full policy re-evaluation.
|
||||
api.h.Change(change.UserRemoved())
|
||||
// Use the change returned from DeleteUser which includes proper policy updates
|
||||
api.h.Change(policyChanged)
|
||||
|
||||
return &v1.DeleteUserResponse{}, nil
|
||||
}
|
||||
@@ -161,13 +160,17 @@ func (api headscaleV1APIServer) CreatePreAuthKey(
|
||||
}
|
||||
}
|
||||
|
||||
user, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var userID *types.UserID
|
||||
if request.GetUser() != 0 {
|
||||
user, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userID = user.TypedID()
|
||||
}
|
||||
|
||||
preAuthKey, err := api.h.state.CreatePreAuthKey(
|
||||
user.TypedID(),
|
||||
userID,
|
||||
request.GetReusable(),
|
||||
request.GetEphemeral(),
|
||||
&expiration,
|
||||
@@ -184,16 +187,7 @@ func (api headscaleV1APIServer) ExpirePreAuthKey(
|
||||
ctx context.Context,
|
||||
request *v1.ExpirePreAuthKeyRequest,
|
||||
) (*v1.ExpirePreAuthKeyResponse, error) {
|
||||
preAuthKey, err := api.h.state.GetPreAuthKey(request.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(preAuthKey.User.ID) != request.GetUser() {
|
||||
return nil, fmt.Errorf("preauth key does not belong to user")
|
||||
}
|
||||
|
||||
err = api.h.state.ExpirePreAuthKey(preAuthKey)
|
||||
err := api.h.state.ExpirePreAuthKey(request.GetId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -205,16 +199,7 @@ func (api headscaleV1APIServer) DeletePreAuthKey(
|
||||
ctx context.Context,
|
||||
request *v1.DeletePreAuthKeyRequest,
|
||||
) (*v1.DeletePreAuthKeyResponse, error) {
|
||||
preAuthKey, err := api.h.state.GetPreAuthKey(request.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(preAuthKey.User.ID) != request.GetUser() {
|
||||
return nil, fmt.Errorf("preauth key does not belong to user")
|
||||
}
|
||||
|
||||
err = api.h.state.DeletePreAuthKey(preAuthKey)
|
||||
err := api.h.state.DeletePreAuthKey(request.GetId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -226,12 +211,7 @@ func (api headscaleV1APIServer) ListPreAuthKeys(
|
||||
ctx context.Context,
|
||||
request *v1.ListPreAuthKeysRequest,
|
||||
) (*v1.ListPreAuthKeysResponse, error) {
|
||||
user, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preAuthKeys, err := api.h.state.ListPreAuthKeys(types.UserID(user.ID))
|
||||
preAuthKeys, err := api.h.state.ListPreAuthKeys()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -339,11 +319,11 @@ func (api headscaleV1APIServer) SetTags(
|
||||
// Validate tags not empty - tagged nodes must have at least one tag
|
||||
if len(request.GetTags()) == 0 {
|
||||
return &v1.SetTagsResponse{
|
||||
Node: nil,
|
||||
}, status.Error(
|
||||
codes.InvalidArgument,
|
||||
"cannot remove all tags from a node - tagged nodes must have at least one tag",
|
||||
)
|
||||
Node: nil,
|
||||
}, status.Error(
|
||||
codes.InvalidArgument,
|
||||
"cannot remove all tags from a node - tagged nodes must have at least one tag",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate tag format
|
||||
@@ -551,8 +531,6 @@ func nodesToProto(state *state.State, nodes views.Slice[types.NodeView]) []*v1.N
|
||||
resp.User = types.TaggedDevices.Proto()
|
||||
}
|
||||
|
||||
resp.ValidTags = node.Tags().AsSlice()
|
||||
|
||||
resp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID()), node.ExitRoutes()...))
|
||||
response[index] = resp
|
||||
}
|
||||
@@ -599,14 +577,35 @@ func (api headscaleV1APIServer) CreateApiKey(
|
||||
return &v1.CreateApiKeyResponse{ApiKey: apiKey}, nil
|
||||
}
|
||||
|
||||
// apiKeyIdentifier is implemented by requests that identify an API key.
|
||||
type apiKeyIdentifier interface {
|
||||
GetId() uint64
|
||||
GetPrefix() string
|
||||
}
|
||||
|
||||
// getAPIKey retrieves an API key by ID or prefix from the request.
|
||||
// Returns InvalidArgument if neither or both are provided.
|
||||
func (api headscaleV1APIServer) getAPIKey(req apiKeyIdentifier) (*types.APIKey, error) {
|
||||
hasID := req.GetId() != 0
|
||||
hasPrefix := req.GetPrefix() != ""
|
||||
|
||||
switch {
|
||||
case hasID && hasPrefix:
|
||||
return nil, status.Error(codes.InvalidArgument, "provide either id or prefix, not both")
|
||||
case hasID:
|
||||
return api.h.state.GetAPIKeyByID(req.GetId())
|
||||
case hasPrefix:
|
||||
return api.h.state.GetAPIKey(req.GetPrefix())
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, "must provide id or prefix")
|
||||
}
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ExpireApiKey(
|
||||
ctx context.Context,
|
||||
request *v1.ExpireApiKeyRequest,
|
||||
) (*v1.ExpireApiKeyResponse, error) {
|
||||
var apiKey *types.APIKey
|
||||
var err error
|
||||
|
||||
apiKey, err = api.h.state.GetAPIKey(request.Prefix)
|
||||
apiKey, err := api.getAPIKey(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -644,12 +643,7 @@ func (api headscaleV1APIServer) DeleteApiKey(
|
||||
ctx context.Context,
|
||||
request *v1.DeleteApiKeyRequest,
|
||||
) (*v1.DeleteApiKeyResponse, error) {
|
||||
var (
|
||||
apiKey *types.APIKey
|
||||
err error
|
||||
)
|
||||
|
||||
apiKey, err = api.h.state.GetAPIKey(request.Prefix)
|
||||
apiKey, err := api.getAPIKey(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -260,3 +260,209 @@ func TestSetTags_CannotRemoveAllTags(t *testing.T) {
|
||||
assert.Contains(t, st.Message(), "cannot remove all tags")
|
||||
assert.Nil(t, resp.GetNode())
|
||||
}
|
||||
|
||||
// TestDeleteUser_ReturnsProperChangeSignal tests issue #2967 fix:
|
||||
// When a user is deleted, the state should return a non-empty change signal
|
||||
// to ensure policy manager is updated and clients are notified immediately.
|
||||
func TestDeleteUser_ReturnsProperChangeSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("test-user-to-delete")
|
||||
require.NotNil(t, user)
|
||||
|
||||
// Delete the user and verify a non-empty change is returned
|
||||
// Issue #2967: Without the fix, DeleteUser returned an empty change,
|
||||
// causing stale policy state until another user operation triggered an update.
|
||||
changeSignal, err := app.state.DeleteUser(*user.TypedID())
|
||||
require.NoError(t, err, "DeleteUser should succeed")
|
||||
assert.False(t, changeSignal.IsEmpty(), "DeleteUser should return a non-empty change signal (issue #2967)")
|
||||
}
|
||||
|
||||
// TestExpireApiKey_ByID tests that API keys can be expired by ID.
|
||||
func TestExpireApiKey_ByID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
// Create an API key
|
||||
createResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, createResp.GetApiKey())
|
||||
|
||||
// List keys to get the ID
|
||||
listResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listResp.GetApiKeys(), 1)
|
||||
|
||||
keyID := listResp.GetApiKeys()[0].GetId()
|
||||
|
||||
// Expire by ID
|
||||
_, err = apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{
|
||||
Id: keyID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify key is expired (expiration is set to now or in the past)
|
||||
listResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listResp.GetApiKeys(), 1)
|
||||
assert.NotNil(t, listResp.GetApiKeys()[0].GetExpiration(), "expiration should be set")
|
||||
}
|
||||
|
||||
// TestExpireApiKey_ByPrefix tests that API keys can still be expired by prefix.
|
||||
func TestExpireApiKey_ByPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
// Create an API key
|
||||
createResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, createResp.GetApiKey())
|
||||
|
||||
// List keys to get the prefix
|
||||
listResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listResp.GetApiKeys(), 1)
|
||||
|
||||
keyPrefix := listResp.GetApiKeys()[0].GetPrefix()
|
||||
|
||||
// Expire by prefix
|
||||
_, err = apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{
|
||||
Prefix: keyPrefix,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestDeleteApiKey_ByID tests that API keys can be deleted by ID.
|
||||
func TestDeleteApiKey_ByID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
// Create an API key
|
||||
createResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, createResp.GetApiKey())
|
||||
|
||||
// List keys to get the ID
|
||||
listResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listResp.GetApiKeys(), 1)
|
||||
|
||||
keyID := listResp.GetApiKeys()[0].GetId()
|
||||
|
||||
// Delete by ID
|
||||
_, err = apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{
|
||||
Id: keyID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify key is deleted
|
||||
listResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listResp.GetApiKeys())
|
||||
}
|
||||
|
||||
// TestDeleteApiKey_ByPrefix tests that API keys can still be deleted by prefix.
|
||||
func TestDeleteApiKey_ByPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
// Create an API key
|
||||
createResp, err := apiServer.CreateApiKey(context.Background(), &v1.CreateApiKeyRequest{})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, createResp.GetApiKey())
|
||||
|
||||
// List keys to get the prefix
|
||||
listResp, err := apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listResp.GetApiKeys(), 1)
|
||||
|
||||
keyPrefix := listResp.GetApiKeys()[0].GetPrefix()
|
||||
|
||||
// Delete by prefix
|
||||
_, err = apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{
|
||||
Prefix: keyPrefix,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify key is deleted
|
||||
listResp, err = apiServer.ListApiKeys(context.Background(), &v1.ListApiKeysRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listResp.GetApiKeys())
|
||||
}
|
||||
|
||||
// TestExpireApiKey_NoIdentifier tests that an error is returned when neither ID nor prefix is provided.
|
||||
func TestExpireApiKey_NoIdentifier(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
_, err := apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{})
|
||||
require.Error(t, err)
|
||||
st, ok := status.FromError(err)
|
||||
require.True(t, ok, "error should be a gRPC status error")
|
||||
assert.Equal(t, codes.InvalidArgument, st.Code())
|
||||
assert.Contains(t, st.Message(), "must provide id or prefix")
|
||||
}
|
||||
|
||||
// TestDeleteApiKey_NoIdentifier tests that an error is returned when neither ID nor prefix is provided.
|
||||
func TestDeleteApiKey_NoIdentifier(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
_, err := apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{})
|
||||
require.Error(t, err)
|
||||
st, ok := status.FromError(err)
|
||||
require.True(t, ok, "error should be a gRPC status error")
|
||||
assert.Equal(t, codes.InvalidArgument, st.Code())
|
||||
assert.Contains(t, st.Message(), "must provide id or prefix")
|
||||
}
|
||||
|
||||
// TestExpireApiKey_BothIdentifiers tests that an error is returned when both ID and prefix are provided.
|
||||
func TestExpireApiKey_BothIdentifiers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
_, err := apiServer.ExpireApiKey(context.Background(), &v1.ExpireApiKeyRequest{
|
||||
Id: 1,
|
||||
Prefix: "test",
|
||||
})
|
||||
require.Error(t, err)
|
||||
st, ok := status.FromError(err)
|
||||
require.True(t, ok, "error should be a gRPC status error")
|
||||
assert.Equal(t, codes.InvalidArgument, st.Code())
|
||||
assert.Contains(t, st.Message(), "provide either id or prefix, not both")
|
||||
}
|
||||
|
||||
// TestDeleteApiKey_BothIdentifiers tests that an error is returned when both ID and prefix are provided.
|
||||
func TestDeleteApiKey_BothIdentifiers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
apiServer := newHeadscaleV1APIServer(app)
|
||||
|
||||
_, err := apiServer.DeleteApiKey(context.Background(), &v1.DeleteApiKeyRequest{
|
||||
Id: 1,
|
||||
Prefix: "test",
|
||||
})
|
||||
require.Error(t, err)
|
||||
st, ok := status.FromError(err)
|
||||
require.True(t, ok, "error should be a gRPC status error")
|
||||
assert.Equal(t, codes.InvalidArgument, st.Code())
|
||||
assert.Contains(t, st.Message(), "provide either id or prefix, not both")
|
||||
}
|
||||
|
||||
@@ -92,6 +92,11 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
||||
return nil, nil //nolint:nilnil // No response needed for other nodes when self-only
|
||||
}
|
||||
|
||||
// Check if this is a self-update (the changed node is the receiving node).
|
||||
// When true, ensure the response includes the node's self info so it sees
|
||||
// its own attribute changes (e.g., tags changed via admin API).
|
||||
isSelfUpdate := r.OriginNode != 0 && r.OriginNode == nodeID
|
||||
|
||||
var (
|
||||
mapResp *tailcfg.MapResponse
|
||||
err error
|
||||
@@ -110,7 +115,12 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
||||
}
|
||||
|
||||
removedPeers := nc.computePeerDiff(currentPeerIDs)
|
||||
mapResp, err = mapper.policyChangeResponse(nodeID, version, removedPeers, currentPeers)
|
||||
// Include self node when this is a self-update (e.g., node's own tags changed)
|
||||
// so the node sees its updated self info along with new packet filters.
|
||||
mapResp, err = mapper.policyChangeResponse(nodeID, version, removedPeers, currentPeers, isSelfUpdate)
|
||||
} else if isSelfUpdate {
|
||||
// Non-policy self-update: just send the self node info
|
||||
mapResp, err = mapper.selfMapResponse(nodeID, version)
|
||||
} else {
|
||||
mapResp, err = mapper.buildFromChange(nodeID, version, &r)
|
||||
}
|
||||
|
||||
@@ -213,8 +213,7 @@ func setupBatcherWithTestData(
|
||||
|
||||
// Create database and populate it with test data
|
||||
database, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
"",
|
||||
cfg,
|
||||
emptyCache(),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,18 +69,30 @@ func newMapper(
|
||||
}
|
||||
}
|
||||
|
||||
// generateUserProfiles creates user profiles for MapResponse.
|
||||
func generateUserProfiles(
|
||||
node types.NodeView,
|
||||
peers views.Slice[types.NodeView],
|
||||
) []tailcfg.UserProfile {
|
||||
userMap := make(map[uint]*types.UserView)
|
||||
ids := make([]uint, 0, len(userMap))
|
||||
user := node.User()
|
||||
user := node.Owner()
|
||||
if !user.Valid() {
|
||||
log.Error().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Msg("node has no valid owner, skipping user profile generation")
|
||||
|
||||
return nil
|
||||
}
|
||||
userID := user.Model().ID
|
||||
userMap[userID] = &user
|
||||
ids = append(ids, userID)
|
||||
for _, peer := range peers.All() {
|
||||
peerUser := peer.User()
|
||||
peerUser := peer.Owner()
|
||||
if !peerUser.Valid() {
|
||||
continue
|
||||
}
|
||||
peerUserID := peerUser.Model().ID
|
||||
userMap[peerUserID] = &peerUser
|
||||
ids = append(ids, peerUserID)
|
||||
@@ -186,13 +198,18 @@ func (m *mapper) selfMapResponse(
|
||||
// - PeersChanged for remaining peers (their AllowedIPs may have changed due to policy)
|
||||
// - Updated PacketFilters
|
||||
// - Updated SSHPolicy (SSH rules may reference users/groups that changed)
|
||||
// - Optionally, the node's own self info (when includeSelf is true)
|
||||
// This avoids the issue where an empty Peers slice is interpreted by Tailscale
|
||||
// clients as "no change" rather than "no peers".
|
||||
// When includeSelf is true, the node's self info is included so that a node
|
||||
// whose own attributes changed (e.g., tags via admin API) sees its updated
|
||||
// self info along with the new packet filters.
|
||||
func (m *mapper) policyChangeResponse(
|
||||
nodeID types.NodeID,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
removedPeers []tailcfg.NodeID,
|
||||
currentPeers views.Slice[types.NodeView],
|
||||
includeSelf bool,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(policyResponseDebug).
|
||||
@@ -200,6 +217,10 @@ func (m *mapper) policyChangeResponse(
|
||||
WithPacketFilters().
|
||||
WithSSHPolicy()
|
||||
|
||||
if includeSelf {
|
||||
builder = builder.WithSelfNode()
|
||||
}
|
||||
|
||||
if len(removedPeers) > 0 {
|
||||
// Convert tailcfg.NodeID to types.NodeID for WithPeersRemoved
|
||||
removedIDs := make([]types.NodeID, len(removedPeers))
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
@@ -1092,6 +1092,15 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
Tags: []string{"tag:client"},
|
||||
}
|
||||
|
||||
// Create a tagged server node for valid SSH patterns
|
||||
nodeTaggedServer := types.Node{
|
||||
Hostname: "tagged-server",
|
||||
IPv4: ap("100.64.0.5"),
|
||||
UserID: ptr.To(uint(1)),
|
||||
User: ptr.To(users[0]),
|
||||
Tags: []string{"tag:server"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetNode types.Node
|
||||
@@ -1102,10 +1111,13 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
errorMessage string
|
||||
}{
|
||||
{
|
||||
name: "group-to-user",
|
||||
targetNode: nodeUser1,
|
||||
name: "group-to-tag",
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
@@ -1113,7 +1125,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1138,18 +1150,21 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "check-period-specified",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&taggedClient},
|
||||
targetNode: taggedClient,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user1@"],
|
||||
"tag:client": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "check",
|
||||
"checkPeriod": "24h",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["user1@"],
|
||||
"src": ["group:admins"],
|
||||
"dst": ["tag:client"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1157,7 +1172,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.4"},
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"*": "=",
|
||||
@@ -1176,16 +1191,19 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{
|
||||
name: "no-matching-rules",
|
||||
targetNode: nodeUser2,
|
||||
peers: types.Nodes{&nodeUser1},
|
||||
peers: types.Nodes{&nodeUser1, &nodeTaggedServer},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user1@"],
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user1@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["user1@"],
|
||||
"src": ["group:admins"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1194,14 +1212,20 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "invalid-action",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "invalid",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1211,15 +1235,21 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "invalid-check-period",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "check",
|
||||
"checkPeriod": "invalid",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1229,26 +1259,12 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "unsupported-autogroup",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&taggedClient},
|
||||
policy: `{
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["autogroup:invalid"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
expectErr: true,
|
||||
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-should-use-wildcard-with-root-excluded",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: taggedClient,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
@@ -1256,7 +1272,30 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:client"],
|
||||
"users": ["autogroup:invalid"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
expectErr: true,
|
||||
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-should-use-wildcard-with-root-excluded",
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
@@ -1282,9 +1321,12 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-plus-root-should-use-wildcard-with-root-mapped",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
@@ -1292,7 +1334,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot", "root"]
|
||||
}
|
||||
]
|
||||
@@ -1318,9 +1360,12 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "specific-users-should-map-to-themselves-not-equals",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@"]
|
||||
},
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
@@ -1328,7 +1373,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["ubuntu", "root"]
|
||||
}
|
||||
]
|
||||
|
||||
@@ -150,8 +150,7 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving source ips")
|
||||
continue
|
||||
log.Trace().Caller().Err(err).Msgf("resolving source ips")
|
||||
}
|
||||
|
||||
if ips != nil {
|
||||
@@ -164,11 +163,12 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
}
|
||||
|
||||
// Handle autogroup:self destinations (if any)
|
||||
if len(autogroupSelfDests) > 0 {
|
||||
// Tagged nodes don't participate in autogroup:self (identity is tag-based, not user-based)
|
||||
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
|
||||
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID() == node.User().ID() && !n.IsTagged() {
|
||||
if !n.IsTagged() && n.User().ID() == node.User().ID() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
@@ -234,12 +234,11 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
for _, dest := range otherDests {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
log.Debug().Caller().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -349,7 +348,7 @@ func (pol *Policy) compileSSHPolicy(
|
||||
// Build destination set for autogroup:self (same-user untagged devices only)
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID() == node.User().ID() && !n.IsTagged() {
|
||||
if !n.IsTagged() && n.User().ID() == node.User().ID() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
@@ -365,7 +364,7 @@ func (pol *Policy) compileSSHPolicy(
|
||||
// Pre-filter to same-user untagged devices for efficiency
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID() == node.User().ID() && !n.IsTagged() {
|
||||
if !n.IsTagged() && n.User().ID() == node.User().ID() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
@@ -410,7 +409,6 @@ func (pol *Policy) compileSSHPolicy(
|
||||
ips, err := dst.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
if ips != nil {
|
||||
dest.AddSet(ips)
|
||||
|
||||
@@ -406,21 +406,33 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{Name: "user2", Model: gorm.Model{ID: 2}},
|
||||
}
|
||||
|
||||
// Create test nodes
|
||||
nodeUser1 := types.Node{
|
||||
Hostname: "user1-device",
|
||||
// Create test nodes - use tagged nodes as SSH destinations
|
||||
// and untagged nodes as SSH sources (since group->username destinations
|
||||
// are not allowed per Tailscale security model, but groups can SSH to tags)
|
||||
nodeTaggedServer := types.Node{
|
||||
Hostname: "tagged-server",
|
||||
IPv4: createAddr("100.64.0.1"),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
User: ptr.To(users[0]),
|
||||
Tags: []string{"tag:server"},
|
||||
}
|
||||
nodeUser2 := types.Node{
|
||||
Hostname: "user2-device",
|
||||
nodeTaggedDB := types.Node{
|
||||
Hostname: "tagged-db",
|
||||
IPv4: createAddr("100.64.0.2"),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
User: ptr.To(users[1]),
|
||||
Tags: []string{"tag:database"},
|
||||
}
|
||||
// Add untagged node for user2 - this will be the SSH source
|
||||
// (group:admins contains user2, so user2's untagged node provides the source IPs)
|
||||
nodeUser2Untagged := types.Node{
|
||||
Hostname: "user2-device",
|
||||
IPv4: createAddr("100.64.0.3"),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
User: ptr.To(users[1]),
|
||||
}
|
||||
|
||||
nodes := types.Nodes{&nodeUser1, &nodeUser2}
|
||||
nodes := types.Nodes{&nodeTaggedServer, &nodeTaggedDB, &nodeUser2Untagged}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -431,8 +443,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "specific user mapping",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -440,7 +455,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
@@ -451,8 +466,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "multiple specific users",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -460,7 +478,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{"ubuntu", "admin", "deploy"},
|
||||
},
|
||||
},
|
||||
@@ -473,8 +491,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "autogroup:nonroot only",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -482,7 +503,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
@@ -494,8 +515,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "root only",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -503,7 +527,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{"root"},
|
||||
},
|
||||
},
|
||||
@@ -514,8 +538,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "autogroup:nonroot plus root",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -523,7 +550,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot), "root"},
|
||||
},
|
||||
},
|
||||
@@ -535,8 +562,11 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "mixed specific users and autogroups",
|
||||
targetNode: nodeUser1,
|
||||
targetNode: nodeTaggedServer,
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -544,7 +574,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot), "root", "ubuntu", "admin"},
|
||||
},
|
||||
},
|
||||
@@ -558,8 +588,12 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no matching destination",
|
||||
targetNode: nodeUser2, // Target node2, but policy only allows user1
|
||||
targetNode: nodeTaggedDB, // Target tag:database, but policy only allows tag:server
|
||||
policy: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
Tag("tag:database"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -567,7 +601,7 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")}, // Only user1, not user2
|
||||
Destinations: SSHDstAliases{tp("tag:server")}, // Only tag:server, not tag:database
|
||||
Users: []SSHUser{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
@@ -600,9 +634,9 @@ func TestCompileSSHPolicy_UserMapping(t *testing.T) {
|
||||
rule := sshPolicy.Rules[0]
|
||||
assert.Equal(t, tt.wantSSHUsers, rule.SSHUsers, "SSH users mapping should match expected")
|
||||
|
||||
// Verify principals are set correctly (should contain user2's IP since that's the source)
|
||||
// Verify principals are set correctly (should contain user2's untagged device IP since that's the source)
|
||||
require.Len(t, rule.Principals, 1)
|
||||
assert.Equal(t, "100.64.0.2", rule.Principals[0].NodeIP)
|
||||
assert.Equal(t, "100.64.0.3", rule.Principals[0].NodeIP)
|
||||
|
||||
// Verify action is set correctly
|
||||
assert.True(t, rule.Action.Accept)
|
||||
@@ -619,11 +653,13 @@ func TestCompileSSHPolicy_CheckAction(t *testing.T) {
|
||||
{Name: "user2", Model: gorm.Model{ID: 2}},
|
||||
}
|
||||
|
||||
nodeUser1 := types.Node{
|
||||
Hostname: "user1-device",
|
||||
// Use tagged nodes for SSH user mapping tests
|
||||
nodeTaggedServer := types.Node{
|
||||
Hostname: "tagged-server",
|
||||
IPv4: createAddr("100.64.0.1"),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
User: ptr.To(users[0]),
|
||||
Tags: []string{"tag:server"},
|
||||
}
|
||||
nodeUser2 := types.Node{
|
||||
Hostname: "user2-device",
|
||||
@@ -632,9 +668,12 @@ func TestCompileSSHPolicy_CheckAction(t *testing.T) {
|
||||
User: ptr.To(users[1]),
|
||||
}
|
||||
|
||||
nodes := types.Nodes{&nodeUser1, &nodeUser2}
|
||||
nodes := types.Nodes{&nodeTaggedServer, &nodeUser2}
|
||||
|
||||
policy := &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user2@")},
|
||||
},
|
||||
@@ -643,7 +682,7 @@ func TestCompileSSHPolicy_CheckAction(t *testing.T) {
|
||||
Action: "check",
|
||||
CheckPeriod: model.Duration(24 * time.Hour),
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
@@ -652,7 +691,7 @@ func TestCompileSSHPolicy_CheckAction(t *testing.T) {
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, nodeUser1.View(), nodes.ViewSlice())
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, nodeTaggedServer.View(), nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
require.Len(t, sshPolicy.Rules, 1)
|
||||
@@ -697,16 +736,17 @@ func TestSSHIntegrationReproduction(t *testing.T) {
|
||||
nodes := types.Nodes{node1, node2}
|
||||
|
||||
// Create a simple policy that reproduces the issue
|
||||
// Updated to use autogroup:self instead of username destination (per Tailscale security model)
|
||||
policy := &Policy{
|
||||
Groups: Groups{
|
||||
Group("group:integration-test"): []Username{Username("user1@")},
|
||||
Group("group:integration-test"): []Username{Username("user1@"), Username("user2@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:integration-test")},
|
||||
Destinations: SSHDstAliases{up("user2@")}, // Target user2
|
||||
Users: []SSHUser{SSHUser("ssh-it-user")}, // This is the key - specific user
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")}, // Users can SSH to their own devices
|
||||
Users: []SSHUser{SSHUser("ssh-it-user")}, // This is the key - specific user
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -715,7 +755,7 @@ func TestSSHIntegrationReproduction(t *testing.T) {
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test SSH policy compilation for node2 (target)
|
||||
// Test SSH policy compilation for node2 (owned by user2, who is in the group)
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, node2.View(), nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
@@ -1647,3 +1687,176 @@ func TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {
|
||||
require.Contains(t, routerPrincipals, "100.64.0.2", "router rule should include user1's other device (unfiltered sources)")
|
||||
require.Contains(t, routerPrincipals, "100.64.0.3", "router rule should include user2's device (unfiltered sources)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfWithNonExistentUserInGroup verifies that when a group
|
||||
// contains a non-existent user, partial resolution still works correctly.
|
||||
// This reproduces the issue from https://github.com/juanfont/headscale/issues/2990
|
||||
// where autogroup:self breaks when groups contain users that don't have
|
||||
// registered nodes.
|
||||
func TestAutogroupSelfWithNonExistentUserInGroup(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "superadmin"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "admin"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "direction"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
// superadmin's device
|
||||
{ID: 1, User: ptr.To(users[0]), IPv4: ap("100.64.0.1"), Hostname: "superadmin-device"},
|
||||
// admin's device
|
||||
{ID: 2, User: ptr.To(users[1]), IPv4: ap("100.64.0.2"), Hostname: "admin-device"},
|
||||
// direction's device
|
||||
{ID: 3, User: ptr.To(users[2]), IPv4: ap("100.64.0.3"), Hostname: "direction-device"},
|
||||
// tagged servers
|
||||
{ID: 4, IPv4: ap("100.64.0.10"), Hostname: "common-server", Tags: []string{"tag:common"}},
|
||||
{ID: 5, IPv4: ap("100.64.0.11"), Hostname: "tech-server", Tags: []string{"tag:tech"}},
|
||||
{ID: 6, IPv4: ap("100.64.0.12"), Hostname: "privileged-server", Tags: []string{"tag:privileged"}},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
Groups: Groups{
|
||||
// group:superadmin contains "phantom_user" who doesn't exist
|
||||
Group("group:superadmin"): []Username{Username("superadmin@"), Username("phantom_user@")},
|
||||
Group("group:admin"): []Username{Username("admin@")},
|
||||
Group("group:direction"): []Username{Username("direction@")},
|
||||
},
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:common"): Owners{gp("group:superadmin")},
|
||||
Tag("tag:tech"): Owners{gp("group:superadmin")},
|
||||
Tag("tag:privileged"): Owners{gp("group:superadmin")},
|
||||
},
|
||||
ACLs: []ACL{
|
||||
{
|
||||
// Rule 1: all groups -> tag:common
|
||||
Action: "accept",
|
||||
Sources: []Alias{gp("group:superadmin"), gp("group:admin"), gp("group:direction")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(tp("tag:common"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
// Rule 2: superadmin + admin -> tag:tech
|
||||
Action: "accept",
|
||||
Sources: []Alias{gp("group:superadmin"), gp("group:admin")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(tp("tag:tech"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
// Rule 3: superadmin -> tag:privileged + autogroup:self
|
||||
Action: "accept",
|
||||
Sources: []Alias{gp("group:superadmin")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(tp("tag:privileged"), tailcfg.PortRangeAny),
|
||||
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
containsIP := func(rules []tailcfg.FilterRule, ip string) bool {
|
||||
addr := netip.MustParseAddr(ip)
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, dp := range rule.DstPorts {
|
||||
// DstPort IPs may be bare addresses or CIDR prefixes
|
||||
pref, err := netip.ParsePrefix(dp.IP)
|
||||
if err != nil {
|
||||
// Try as bare address
|
||||
a, err2 := netip.ParseAddr(dp.IP)
|
||||
if err2 != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if a == addr {
|
||||
return true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if pref.Contains(addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
containsSrcIP := func(rules []tailcfg.FilterRule, ip string) bool {
|
||||
addr := netip.MustParseAddr(ip)
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, srcIP := range rule.SrcIPs {
|
||||
pref, err := netip.ParsePrefix(srcIP)
|
||||
if err != nil {
|
||||
a, err2 := netip.ParseAddr(srcIP)
|
||||
if err2 != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if a == addr {
|
||||
return true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if pref.Contains(addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Test superadmin's device: should have rules with tag:common, tag:tech, tag:privileged destinations
|
||||
// and superadmin's IP should appear in sources (partial resolution of group:superadmin works)
|
||||
superadminNode := nodes[0].View()
|
||||
superadminRules, err := policy.compileFilterRulesForNode(users, superadminNode, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
assert.True(t, containsIP(superadminRules, "100.64.0.10"), "rules should include tag:common server")
|
||||
assert.True(t, containsIP(superadminRules, "100.64.0.11"), "rules should include tag:tech server")
|
||||
assert.True(t, containsIP(superadminRules, "100.64.0.12"), "rules should include tag:privileged server")
|
||||
|
||||
// Key assertion: superadmin's IP should appear as a source in rules
|
||||
// despite phantom_user in group:superadmin causing a partial resolution error
|
||||
assert.True(t, containsSrcIP(superadminRules, "100.64.0.1"),
|
||||
"superadmin's IP should appear in sources despite phantom_user in group:superadmin")
|
||||
|
||||
// Test admin's device: admin is in group:admin which has NO phantom users.
|
||||
// The key bug was: when group:superadmin (with phantom_user) appeared as a source
|
||||
// alongside group:admin, the error from resolving group:superadmin caused its
|
||||
// partial result to be discarded via `continue`. With the fix, superadmin's IPs
|
||||
// from group:superadmin are retained alongside admin's IPs from group:admin.
|
||||
adminNode := nodes[1].View()
|
||||
adminRules, err := policy.compileFilterRulesForNode(users, adminNode, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rule 1 sources: [group:superadmin, group:admin, group:direction]
|
||||
// Without fix: group:superadmin discarded -> only admin + direction IPs in sources
|
||||
// With fix: superadmin IP preserved -> superadmin + admin + direction IPs in sources
|
||||
assert.True(t, containsIP(adminRules, "100.64.0.10"),
|
||||
"admin rules should include tag:common server (group:admin resolves correctly)")
|
||||
assert.True(t, containsSrcIP(adminRules, "100.64.0.1"),
|
||||
"superadmin's IP should be in sources for rules seen by admin (partial resolution preserved)")
|
||||
assert.True(t, containsSrcIP(adminRules, "100.64.0.2"),
|
||||
"admin's own IP should be in sources")
|
||||
|
||||
// Test direction's device: similar to admin, verifies group:direction sources work
|
||||
directionNode := nodes[2].View()
|
||||
directionRules, err := policy.compileFilterRulesForNode(users, directionNode, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
assert.True(t, containsIP(directionRules, "100.64.0.10"),
|
||||
"direction rules should include tag:common server")
|
||||
assert.True(t, containsSrcIP(directionRules, "100.64.0.3"),
|
||||
"direction's own IP should be in sources")
|
||||
// With fix: superadmin's IP preserved in rules that include group:superadmin
|
||||
assert.True(t, containsSrcIP(directionRules, "100.64.0.1"),
|
||||
"superadmin's IP should be in sources for rule 1 (partial resolution preserved)")
|
||||
}
|
||||
|
||||
@@ -315,15 +315,23 @@ func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[typ
|
||||
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
|
||||
for _, node := range nodes.All() {
|
||||
filter, err := pm.compileFilterRulesForNodeLocked(node)
|
||||
if err != nil || len(filter) == 0 {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// Include all nodes in nodeMatchers, even those with empty filters.
|
||||
// Empty filters result in empty matchers where CanAccess() returns false,
|
||||
// but the node still needs to be in the map so hasFilterX is true.
|
||||
// This ensures symmetric visibility works correctly: if node A can access
|
||||
// node B, both should see each other regardless of B's filter rules.
|
||||
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
|
||||
}
|
||||
|
||||
// Check each node pair for peer relationships.
|
||||
// Start j at i+1 to avoid checking the same pair twice and creating duplicates.
|
||||
// We check both directions (i->j and j->i) since ACLs can be asymmetric.
|
||||
// We use symmetric visibility: if EITHER node can access the other, BOTH see
|
||||
// each other. This matches the global filter path behavior and ensures that
|
||||
// one-way access rules (e.g., admin -> tagged server) still allow both nodes
|
||||
// to see each other as peers, which is required for network connectivity.
|
||||
for i := range nodes.Len() {
|
||||
nodeI := nodes.At(i)
|
||||
matchersI, hasFilterI := nodeMatchers[nodeI.ID()]
|
||||
@@ -332,13 +340,16 @@ func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[typ
|
||||
nodeJ := nodes.At(j)
|
||||
matchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]
|
||||
|
||||
// Check if nodeI can access nodeJ
|
||||
if hasFilterI && nodeI.CanAccess(matchersI, nodeJ) {
|
||||
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
|
||||
}
|
||||
// If either node can access the other, both should see each other as peers.
|
||||
// This symmetric visibility is required for proper network operation:
|
||||
// - Admin with *:* rule should see tagged servers (even if servers
|
||||
// can't access admin)
|
||||
// - Servers should see admin so they can respond to admin's connections
|
||||
canIAccessJ := hasFilterI && nodeI.CanAccess(matchersI, nodeJ)
|
||||
canJAccessI := hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI)
|
||||
|
||||
// Check if nodeJ can access nodeI
|
||||
if hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI) {
|
||||
if canIAccessJ || canJAccessI {
|
||||
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
|
||||
ret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)
|
||||
}
|
||||
}
|
||||
@@ -802,34 +813,55 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
|
||||
newNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
// Track which users are affected by changes
|
||||
// Track which users are affected by changes.
|
||||
// Tagged nodes don't participate in autogroup:self (identity is tag-based),
|
||||
// so we skip them when collecting affected users, except when tag status changes
|
||||
// (which affects the user's device set).
|
||||
affectedUsers := make(map[uint]struct{})
|
||||
|
||||
// Check for removed nodes
|
||||
// Check for removed nodes (only non-tagged nodes affect autogroup:self)
|
||||
for nodeID, oldNode := range oldNodeMap {
|
||||
if _, exists := newNodeMap[nodeID]; !exists {
|
||||
affectedUsers[oldNode.User().ID()] = struct{}{}
|
||||
if !oldNode.IsTagged() {
|
||||
affectedUsers[oldNode.User().ID()] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for added nodes
|
||||
// Check for added nodes (only non-tagged nodes affect autogroup:self)
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if _, exists := oldNodeMap[nodeID]; !exists {
|
||||
affectedUsers[newNode.User().ID()] = struct{}{}
|
||||
if !newNode.IsTagged() {
|
||||
affectedUsers[newNode.User().ID()] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for modified nodes (user changes, tag changes, IP changes)
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if oldNode, exists := oldNodeMap[nodeID]; exists {
|
||||
// Check if user changed
|
||||
if oldNode.User().ID() != newNode.User().ID() {
|
||||
affectedUsers[oldNode.User().ID()] = struct{}{}
|
||||
affectedUsers[newNode.User().ID()] = struct{}{}
|
||||
// Check if tag status changed — this affects the user's autogroup:self device set.
|
||||
// Use the non-tagged version to get the user ID safely.
|
||||
if oldNode.IsTagged() != newNode.IsTagged() {
|
||||
if !oldNode.IsTagged() {
|
||||
// Was untagged, now tagged: user lost a device
|
||||
affectedUsers[oldNode.User().ID()] = struct{}{}
|
||||
} else {
|
||||
// Was tagged, now untagged: user gained a device
|
||||
affectedUsers[newNode.User().ID()] = struct{}{}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if tag status changed
|
||||
if oldNode.IsTagged() != newNode.IsTagged() {
|
||||
// Skip tagged nodes for remaining checks — they don't participate in autogroup:self
|
||||
if newNode.IsTagged() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if user changed (both versions are non-tagged here)
|
||||
if oldNode.User().ID() != newNode.User().ID() {
|
||||
affectedUsers[oldNode.User().ID()] = struct{}{}
|
||||
affectedUsers[newNode.User().ID()] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -850,9 +882,9 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
|
||||
}
|
||||
}
|
||||
|
||||
// Clear cache entries for affected users only
|
||||
// Clear cache entries for affected users only.
|
||||
// For autogroup:self, we need to clear all nodes belonging to affected users
|
||||
// because autogroup:self rules depend on the entire user's device set
|
||||
// because autogroup:self rules depend on the entire user's device set.
|
||||
for nodeID := range pm.filterRulesMap {
|
||||
// Find the user for this cached node
|
||||
var nodeUserID uint
|
||||
@@ -861,6 +893,12 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
|
||||
// Check in new nodes first
|
||||
for _, node := range newNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
// Tagged nodes don't participate in autogroup:self,
|
||||
// so their cache doesn't need user-based invalidation.
|
||||
if node.IsTagged() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
nodeUserID = node.User().ID()
|
||||
found = true
|
||||
break
|
||||
@@ -871,6 +909,10 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
|
||||
if !found {
|
||||
for _, node := range oldNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
if node.IsTagged() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
nodeUserID = node.User().ID()
|
||||
found = true
|
||||
break
|
||||
|
||||
@@ -729,3 +729,608 @@ func TestTagPropagationToPeerMap(t *testing.T) {
|
||||
|
||||
require.False(t, canAccess, "user2 should NOT be able to access user1 after tag:web is removed (ReduceNodes should filter out)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfWithAdminOverride reproduces issue #2990:
|
||||
// When autogroup:self is combined with an admin rule (group:admin -> *:*),
|
||||
// tagged nodes become invisible to admins because BuildPeerMap uses asymmetric
|
||||
// peer visibility in the autogroup:self path.
|
||||
//
|
||||
// The fix requires symmetric visibility: if admin can access tagged node,
|
||||
// BOTH admin and tagged node should see each other as peers.
|
||||
func TestAutogroupSelfWithAdminOverride(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "admin", Email: "admin@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user1", Email: "user1@example.com"},
|
||||
}
|
||||
|
||||
// Admin has a regular device
|
||||
adminNode := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "admin-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// user1 has a tagged server
|
||||
user1TaggedNode := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "user1-server",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
Tags: []string{"tag:server"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{adminNode, user1TaggedNode}
|
||||
|
||||
// Policy from issue #2990:
|
||||
// - group:admin has full access to everything (*:*)
|
||||
// - autogroup:member -> autogroup:self (allows users to see their own devices)
|
||||
//
|
||||
// Bug: The tagged server becomes invisible to admin because:
|
||||
// 1. Admin can access tagged server (via *:* rule)
|
||||
// 2. Tagged server CANNOT access admin (no rule for that)
|
||||
// 3. With asymmetric logic, tagged server is not added to admin's peer list
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:admin": ["admin@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:server": ["user1@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["*:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// Admin should see the tagged server as a peer (via group:admin -> *:* rule)
|
||||
adminPeers := peerMap[adminNode.ID]
|
||||
require.True(t, slices.ContainsFunc(adminPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == user1TaggedNode.ID
|
||||
}), "admin should see tagged server as peer via *:* rule (issue #2990)")
|
||||
|
||||
// Tagged server should also see admin as a peer (symmetric visibility)
|
||||
// Even though tagged server cannot ACCESS admin, it should still SEE admin
|
||||
// because admin CAN access it. This is required for proper network operation.
|
||||
taggedPeers := peerMap[user1TaggedNode.ID]
|
||||
require.True(t, slices.ContainsFunc(taggedPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == adminNode.ID
|
||||
}), "tagged server should see admin as peer (symmetric visibility)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfSymmetricVisibility verifies that peer visibility is symmetric:
|
||||
// if node A can access node B, then both A and B should see each other as peers.
|
||||
// This is the same behavior as the global filter path.
|
||||
func TestAutogroupSelfSymmetricVisibility(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@example.com"},
|
||||
}
|
||||
|
||||
// user1 has device A
|
||||
deviceA := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "device-a",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// user2 has device B (tagged)
|
||||
deviceB := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "device-b",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
Tags: []string{"tag:web"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{deviceA, deviceB}
|
||||
|
||||
// One-way rule: user1 can access tag:web, but tag:web cannot access user1
|
||||
policy := `{
|
||||
"tagOwners": {
|
||||
"tag:web": ["user2@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["user1@example.com"],
|
||||
"dst": ["tag:web:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// Device A (user1) should see device B (tag:web) as peer
|
||||
aPeers := peerMap[deviceA.ID]
|
||||
require.True(t, slices.ContainsFunc(aPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == deviceB.ID
|
||||
}), "device A should see device B as peer (user1 -> tag:web rule)")
|
||||
|
||||
// Device B (tag:web) should ALSO see device A as peer (symmetric visibility)
|
||||
// Even though B cannot ACCESS A, B should still SEE A as a peer
|
||||
bPeers := peerMap[deviceB.ID]
|
||||
require.True(t, slices.ContainsFunc(bPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == deviceA.ID
|
||||
}), "device B should see device A as peer (symmetric visibility)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfDoesNotBreakOtherUsersAccess reproduces the Discord scenario
|
||||
// where enabling autogroup:self for superadmins should NOT break access for
|
||||
// other users who don't use autogroup:self.
|
||||
//
|
||||
// Scenario:
|
||||
// - Rule 1: [superadmin, admin, direction] -> [tag:common:*]
|
||||
// - Rule 2: [superadmin, admin] -> [tag:tech:*]
|
||||
// - Rule 3: [superadmin] -> [tag:privileged:*, autogroup:self:*]
|
||||
//
|
||||
// Expected behavior:
|
||||
// - Superadmin sees: tag:common, tag:tech, tag:privileged, and own devices
|
||||
// - Admin sees: tag:common, tag:tech
|
||||
// - Direction sees: tag:common
|
||||
// - All tagged nodes should be visible to users who can access them.
|
||||
func TestAutogroupSelfDoesNotBreakOtherUsersAccess(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "superadmin", Email: "superadmin@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "admin", Email: "admin@example.com"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "direction", Email: "direction@example.com"},
|
||||
{Model: gorm.Model{ID: 4}, Name: "tagowner", Email: "tagowner@example.com"},
|
||||
}
|
||||
|
||||
// Create nodes:
|
||||
// - superadmin's device
|
||||
// - admin's device
|
||||
// - direction's device
|
||||
// - tagged server (tag:common)
|
||||
// - tagged server (tag:tech)
|
||||
// - tagged server (tag:privileged)
|
||||
|
||||
superadminDevice := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "superadmin-laptop",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
adminDevice := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "admin-laptop",
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
IPv4: ap("100.64.0.2"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
directionDevice := &types.Node{
|
||||
ID: 3,
|
||||
Hostname: "direction-laptop",
|
||||
User: ptr.To(users[2]),
|
||||
UserID: ptr.To(users[2].ID),
|
||||
IPv4: ap("100.64.0.3"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
commonServer := &types.Node{
|
||||
ID: 4,
|
||||
Hostname: "common-server",
|
||||
User: ptr.To(users[3]),
|
||||
UserID: ptr.To(users[3].ID),
|
||||
IPv4: ap("100.64.0.4"),
|
||||
Tags: []string{"tag:common"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
techServer := &types.Node{
|
||||
ID: 5,
|
||||
Hostname: "tech-server",
|
||||
User: ptr.To(users[3]),
|
||||
UserID: ptr.To(users[3].ID),
|
||||
IPv4: ap("100.64.0.5"),
|
||||
Tags: []string{"tag:tech"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
privilegedServer := &types.Node{
|
||||
ID: 6,
|
||||
Hostname: "privileged-server",
|
||||
User: ptr.To(users[3]),
|
||||
UserID: ptr.To(users[3].ID),
|
||||
IPv4: ap("100.64.0.6"),
|
||||
Tags: []string{"tag:privileged"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
superadminDevice,
|
||||
adminDevice,
|
||||
directionDevice,
|
||||
commonServer,
|
||||
techServer,
|
||||
privilegedServer,
|
||||
}
|
||||
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:superadmin": ["superadmin@example.com"],
|
||||
"group:admin": ["admin@example.com"],
|
||||
"group:direction": ["direction@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:common": ["tagowner@example.com"],
|
||||
"tag:tech": ["tagowner@example.com"],
|
||||
"tag:privileged": ["tagowner@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:superadmin", "group:admin", "group:direction"],
|
||||
"dst": ["tag:common:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:superadmin", "group:admin"],
|
||||
"dst": ["tag:tech:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:superadmin"],
|
||||
"dst": ["tag:privileged:*", "autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// Helper to check if node A sees node B
|
||||
canSee := func(a, b types.NodeID) bool {
|
||||
peers := peerMap[a]
|
||||
|
||||
return slices.ContainsFunc(peers, func(n types.NodeView) bool {
|
||||
return n.ID() == b
|
||||
})
|
||||
}
|
||||
|
||||
// Superadmin should see all tagged servers
|
||||
require.True(t, canSee(superadminDevice.ID, commonServer.ID),
|
||||
"superadmin should see tag:common")
|
||||
require.True(t, canSee(superadminDevice.ID, techServer.ID),
|
||||
"superadmin should see tag:tech")
|
||||
require.True(t, canSee(superadminDevice.ID, privilegedServer.ID),
|
||||
"superadmin should see tag:privileged")
|
||||
|
||||
// Admin should see tag:common and tag:tech (but NOT tag:privileged)
|
||||
require.True(t, canSee(adminDevice.ID, commonServer.ID),
|
||||
"admin should see tag:common")
|
||||
require.True(t, canSee(adminDevice.ID, techServer.ID),
|
||||
"admin should see tag:tech")
|
||||
require.False(t, canSee(adminDevice.ID, privilegedServer.ID),
|
||||
"admin should NOT see tag:privileged")
|
||||
|
||||
// Direction should see tag:common only
|
||||
require.True(t, canSee(directionDevice.ID, commonServer.ID),
|
||||
"direction should see tag:common")
|
||||
require.False(t, canSee(directionDevice.ID, techServer.ID),
|
||||
"direction should NOT see tag:tech")
|
||||
require.False(t, canSee(directionDevice.ID, privilegedServer.ID),
|
||||
"direction should NOT see tag:privileged")
|
||||
|
||||
// Tagged servers should see their authorized users (symmetric visibility)
|
||||
require.True(t, canSee(commonServer.ID, superadminDevice.ID),
|
||||
"tag:common should see superadmin (symmetric)")
|
||||
require.True(t, canSee(commonServer.ID, adminDevice.ID),
|
||||
"tag:common should see admin (symmetric)")
|
||||
require.True(t, canSee(commonServer.ID, directionDevice.ID),
|
||||
"tag:common should see direction (symmetric)")
|
||||
|
||||
require.True(t, canSee(techServer.ID, superadminDevice.ID),
|
||||
"tag:tech should see superadmin (symmetric)")
|
||||
require.True(t, canSee(techServer.ID, adminDevice.ID),
|
||||
"tag:tech should see admin (symmetric)")
|
||||
|
||||
require.True(t, canSee(privilegedServer.ID, superadminDevice.ID),
|
||||
"tag:privileged should see superadmin (symmetric)")
|
||||
}
|
||||
|
||||
// TestEmptyFilterNodesStillVisible verifies that nodes with empty filter rules
|
||||
// (e.g., tagged servers that are only destinations, never sources) are still
|
||||
// visible to nodes that can access them.
|
||||
func TestEmptyFilterNodesStillVisible(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "admin", Email: "admin@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "tagowner", Email: "tagowner@example.com"},
|
||||
}
|
||||
|
||||
adminDevice := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "admin-laptop",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// Tagged server - only a destination, never a source in any rule
|
||||
// This means its compiled filter rules will be empty
|
||||
taggedServer := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "server",
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
IPv4: ap("100.64.0.2"),
|
||||
Tags: []string{"tag:server"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{adminDevice, taggedServer}
|
||||
|
||||
// Policy where tagged server is ONLY a destination
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:admin": ["admin@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:server": ["tagowner@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["tag:server:*", "autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// Admin should see the tagged server
|
||||
adminPeers := peerMap[adminDevice.ID]
|
||||
require.True(t, slices.ContainsFunc(adminPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == taggedServer.ID
|
||||
}), "admin should see tagged server")
|
||||
|
||||
// Tagged server should see admin (symmetric visibility)
|
||||
// Even though the server has no outbound rules (empty filter)
|
||||
serverPeers := peerMap[taggedServer.ID]
|
||||
require.True(t, slices.ContainsFunc(serverPeers, func(n types.NodeView) bool {
|
||||
return n.ID() == adminDevice.ID
|
||||
}), "tagged server should see admin (symmetric visibility)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfCombinedWithTags verifies that autogroup:self combined with
|
||||
// specific tags in the same rule provides "combined access" - users get both
|
||||
// tagged nodes AND their own devices.
|
||||
func TestAutogroupSelfCombinedWithTags(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "admin", Email: "admin@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "tagowner", Email: "tagowner@example.com"},
|
||||
}
|
||||
|
||||
// Admin has two devices
|
||||
adminLaptop := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "admin-laptop",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
adminPhone := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "admin-phone",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.2"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// Tagged web server
|
||||
webServer := &types.Node{
|
||||
ID: 3,
|
||||
Hostname: "web-server",
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
IPv4: ap("100.64.0.3"),
|
||||
Tags: []string{"tag:web"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{adminLaptop, adminPhone, webServer}
|
||||
|
||||
// Combined rule: admin gets both tag:web AND autogroup:self
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:admin": ["admin@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:web": ["tagowner@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["tag:web:*", "autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// Helper to check visibility
|
||||
canSee := func(a, b types.NodeID) bool {
|
||||
peers := peerMap[a]
|
||||
|
||||
return slices.ContainsFunc(peers, func(n types.NodeView) bool {
|
||||
return n.ID() == b
|
||||
})
|
||||
}
|
||||
|
||||
// Admin laptop should see: admin phone (autogroup:self) AND web server (tag:web)
|
||||
require.True(t, canSee(adminLaptop.ID, adminPhone.ID),
|
||||
"admin laptop should see admin phone (autogroup:self)")
|
||||
require.True(t, canSee(adminLaptop.ID, webServer.ID),
|
||||
"admin laptop should see web server (tag:web)")
|
||||
|
||||
// Admin phone should see: admin laptop (autogroup:self) AND web server (tag:web)
|
||||
require.True(t, canSee(adminPhone.ID, adminLaptop.ID),
|
||||
"admin phone should see admin laptop (autogroup:self)")
|
||||
require.True(t, canSee(adminPhone.ID, webServer.ID),
|
||||
"admin phone should see web server (tag:web)")
|
||||
|
||||
// Web server should see both admin devices (symmetric visibility)
|
||||
require.True(t, canSee(webServer.ID, adminLaptop.ID),
|
||||
"web server should see admin laptop (symmetric)")
|
||||
require.True(t, canSee(webServer.ID, adminPhone.ID),
|
||||
"web server should see admin phone (symmetric)")
|
||||
}
|
||||
|
||||
// TestIssue2990SameUserTaggedDevice reproduces the exact scenario from issue #2990:
|
||||
// - One user (user1) who is in group:admin
|
||||
// - node1: user device (not tagged), belongs to user1
|
||||
// - node2: tagged with tag:admin, ALSO belongs to user1 (same user!)
|
||||
// - Rule: group:admin -> *:*
|
||||
// - Rule: autogroup:member -> autogroup:self:*
|
||||
//
|
||||
// Expected: node1 should be able to reach node2 via group:admin -> *:* rule.
|
||||
func TestIssue2990SameUserTaggedDevice(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@"},
|
||||
}
|
||||
|
||||
// node1: user device (not tagged), belongs to user1
|
||||
node1 := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "node1",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// node2: tagged with tag:admin, ALSO belongs to user1 (same user!)
|
||||
node2 := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "node2",
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
Tags: []string{"tag:admin"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{node1, node2}
|
||||
|
||||
// Exact policy from the issue report
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:admin": ["user1@"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:admin": ["group:admin"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["*:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check peer visibility
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
canSee := func(a, b types.NodeID) bool {
|
||||
peers := peerMap[a]
|
||||
|
||||
return slices.ContainsFunc(peers, func(n types.NodeView) bool {
|
||||
return n.ID() == b
|
||||
})
|
||||
}
|
||||
|
||||
// node1 should see node2 (via group:admin -> *:* and symmetric visibility)
|
||||
require.True(t, canSee(node1.ID, node2.ID),
|
||||
"node1 should see node2 as peer")
|
||||
|
||||
// node2 should see node1 (symmetric visibility)
|
||||
require.True(t, canSee(node2.ID, node1.ID),
|
||||
"node2 should see node1 as peer (symmetric visibility)")
|
||||
|
||||
// Check packet filter for node1 - should allow access to node2
|
||||
filter1, err := pm.FilterForNode(node1.View())
|
||||
require.NoError(t, err)
|
||||
t.Logf("node1 filter rules: %d", len(filter1))
|
||||
|
||||
for i, rule := range filter1 {
|
||||
t.Logf(" rule %d: SrcIPs=%v DstPorts=%v", i, rule.SrcIPs, rule.DstPorts)
|
||||
}
|
||||
|
||||
// node1's filter should include a rule allowing access to node2's IP
|
||||
// (via the group:admin -> *:* rule)
|
||||
require.NotEmpty(t, filter1,
|
||||
"node1's packet filter should have rules (group:admin -> *:*)")
|
||||
|
||||
// Check packet filter for node2 - tagged device, should have limited access
|
||||
filter2, err := pm.FilterForNode(node2.View())
|
||||
require.NoError(t, err)
|
||||
t.Logf("node2 filter rules: %d", len(filter2))
|
||||
|
||||
for i, rule := range filter2 {
|
||||
t.Logf(" rule %d: SrcIPs=%v DstPorts=%v", i, rule.SrcIPs, rule.DstPorts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,15 @@ var ErrCircularReference = errors.New("circular reference detected")
|
||||
|
||||
var ErrUndefinedTagReference = errors.New("references undefined tag")
|
||||
|
||||
// SSH validation errors.
|
||||
var (
|
||||
ErrSSHTagSourceToUserDest = errors.New("tags in SSH source cannot access user-owned devices")
|
||||
ErrSSHUserDestRequiresSameUser = errors.New("user destination requires source to contain only that same user")
|
||||
ErrSSHAutogroupSelfRequiresUserSource = errors.New("autogroup:self destination requires source to contain only users or groups, not tags or autogroup:tagged")
|
||||
ErrSSHTagSourceToAutogroupMember = errors.New("tags in SSH source cannot access autogroup:member (user-owned devices)")
|
||||
ErrSSHWildcardDestination = errors.New("wildcard (*) is not supported as SSH destination")
|
||||
)
|
||||
|
||||
type Asterix int
|
||||
|
||||
func (a Asterix) Validate() error {
|
||||
@@ -1613,6 +1622,63 @@ func validateAutogroupForSSHUser(user *AutoGroup) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSSHSrcDstCombination validates that SSH source/destination combinations
|
||||
// follow Tailscale's security model:
|
||||
// - Destination can be: tags, autogroup:self (if source is users/groups), or same-user
|
||||
// - Tags/autogroup:tagged CANNOT SSH to user destinations
|
||||
// - Username destinations require the source to be that same single user only.
|
||||
func validateSSHSrcDstCombination(sources SSHSrcAliases, destinations SSHDstAliases) error {
|
||||
// Categorize source types
|
||||
srcHasTaggedEntities := false
|
||||
srcHasGroups := false
|
||||
srcUsernames := make(map[string]bool)
|
||||
|
||||
for _, src := range sources {
|
||||
switch v := src.(type) {
|
||||
case *Tag:
|
||||
srcHasTaggedEntities = true
|
||||
case *AutoGroup:
|
||||
if v.Is(AutoGroupTagged) {
|
||||
srcHasTaggedEntities = true
|
||||
} else if v.Is(AutoGroupMember) {
|
||||
srcHasGroups = true // autogroup:member is like a group of users
|
||||
}
|
||||
case *Group:
|
||||
srcHasGroups = true
|
||||
case *Username:
|
||||
srcUsernames[string(*v)] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check destinations against source constraints
|
||||
for _, dst := range destinations {
|
||||
switch v := dst.(type) {
|
||||
case *Username:
|
||||
// Rule: Tags/autogroup:tagged CANNOT SSH to user destinations
|
||||
if srcHasTaggedEntities {
|
||||
return fmt.Errorf("%w (%s); use autogroup:tagged or specific tags as destinations instead",
|
||||
ErrSSHTagSourceToUserDest, *v)
|
||||
}
|
||||
// Rule: Username destination requires source to be that same single user only
|
||||
if srcHasGroups || len(srcUsernames) != 1 || !srcUsernames[string(*v)] {
|
||||
return fmt.Errorf("%w %q; use autogroup:self instead for same-user SSH access",
|
||||
ErrSSHUserDestRequiresSameUser, *v)
|
||||
}
|
||||
case *AutoGroup:
|
||||
// Rule: autogroup:self requires source to NOT contain tags
|
||||
if v.Is(AutoGroupSelf) && srcHasTaggedEntities {
|
||||
return ErrSSHAutogroupSelfRequiresUserSource
|
||||
}
|
||||
// Rule: autogroup:member (user-owned devices) cannot be accessed by tagged entities
|
||||
if v.Is(AutoGroupMember) && srcHasTaggedEntities {
|
||||
return ErrSSHTagSourceToAutogroupMember
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate reports if there are any errors in a policy after
|
||||
// the unmarshaling process.
|
||||
// It runs through all rules and checks if there are any inconsistencies
|
||||
@@ -1754,6 +1820,12 @@ func (p *Policy) validate() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate SSH source/destination combinations follow Tailscale's security model
|
||||
err := validateSSHSrcDstCombination(ssh.Sources, ssh.Destinations)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tagOwners := range p.TagOwners {
|
||||
@@ -1886,15 +1958,12 @@ func (a *SSHDstAliases) UnmarshalJSON(b []byte) error {
|
||||
*a = make([]Alias, len(aliases))
|
||||
for i, alias := range aliases {
|
||||
switch alias.Alias.(type) {
|
||||
case *Username, *Tag, *AutoGroup, *Host,
|
||||
// Asterix and Group is actually not supposed to be supported,
|
||||
// however we do not support autogroups at the moment
|
||||
// so we will leave it in as there is no other option
|
||||
// to dynamically give all access
|
||||
// https://tailscale.com/kb/1193/tailscale-ssh#dst
|
||||
// TODO(kradalby): remove this when we support autogroup:tagged and autogroup:member
|
||||
Asterix:
|
||||
case *Username, *Tag, *AutoGroup, *Host:
|
||||
(*a)[i] = alias.Alias
|
||||
case Asterix:
|
||||
return fmt.Errorf("%w; use 'autogroup:member' for user-owned devices, "+
|
||||
"'autogroup:tagged' for tagged devices, or specific tags/users",
|
||||
ErrSSHWildcardDestination)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"alias %T is not supported for SSH destination",
|
||||
@@ -1924,6 +1993,8 @@ func (a SSHDstAliases) MarshalJSON() ([]byte, error) {
|
||||
case *Host:
|
||||
aliases[i] = string(*v)
|
||||
case Asterix:
|
||||
// Marshal wildcard as "*" so it gets rejected during unmarshal
|
||||
// with a proper error message explaining alternatives
|
||||
aliases[i] = "*"
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SSH destination alias type: %T", v)
|
||||
|
||||
@@ -664,7 +664,8 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:web": ["admin@example.com"]
|
||||
"tag:web": ["admin@example.com"],
|
||||
"tag:server": ["admin@example.com"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
@@ -673,7 +674,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
"tag:web"
|
||||
],
|
||||
"dst": [
|
||||
"admin@example.com"
|
||||
"tag:server"
|
||||
],
|
||||
"users": ["*"]
|
||||
}
|
||||
@@ -682,7 +683,8 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
`,
|
||||
want: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:web"): Owners{ptr.To(Username("admin@example.com"))},
|
||||
Tag("tag:web"): Owners{ptr.To(Username("admin@example.com"))},
|
||||
Tag("tag:server"): Owners{ptr.To(Username("admin@example.com"))},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
@@ -691,7 +693,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
tp("tag:web"),
|
||||
},
|
||||
Destinations: SSHDstAliases{
|
||||
ptr.To(Username("admin@example.com")),
|
||||
tp("tag:server"),
|
||||
},
|
||||
Users: []SSHUser{
|
||||
SSHUser("*"),
|
||||
@@ -714,7 +716,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
"group:admins"
|
||||
],
|
||||
"dst": [
|
||||
"admin@example.com"
|
||||
"autogroup:self"
|
||||
],
|
||||
"users": ["root"],
|
||||
"checkPeriod": "24h"
|
||||
@@ -733,7 +735,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
gp("group:admins"),
|
||||
},
|
||||
Destinations: SSHDstAliases{
|
||||
ptr.To(Username("admin@example.com")),
|
||||
agp("autogroup:self"),
|
||||
},
|
||||
Users: []SSHUser{
|
||||
SSHUser("root"),
|
||||
@@ -1521,6 +1523,249 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
`,
|
||||
wantErr: `tag "tag:child" references undefined tag "tag:nonexistent"`,
|
||||
},
|
||||
// SSH source/destination validation tests (#3009, #3010)
|
||||
{
|
||||
name: "ssh-tag-to-user-rejected",
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {"tag:server": ["admin@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["tag:server"],
|
||||
"dst": ["admin@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: "tags in SSH source cannot access user-owned devices",
|
||||
},
|
||||
{
|
||||
name: "ssh-autogroup-tagged-to-user-rejected",
|
||||
input: `
|
||||
{
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:tagged"],
|
||||
"dst": ["admin@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: "tags in SSH source cannot access user-owned devices",
|
||||
},
|
||||
{
|
||||
name: "ssh-tag-to-autogroup-self-rejected",
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {"tag:server": ["admin@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["tag:server"],
|
||||
"dst": ["autogroup:self"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: "autogroup:self destination requires source to contain only users or groups",
|
||||
},
|
||||
{
|
||||
name: "ssh-group-to-user-rejected",
|
||||
input: `
|
||||
{
|
||||
"groups": {"group:admins": ["admin@", "user1@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["admin@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: `user destination requires source to contain only that same user "admin@"`,
|
||||
},
|
||||
{
|
||||
name: "ssh-same-user-to-user-allowed",
|
||||
input: `
|
||||
{
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["admin@"],
|
||||
"dst": ["admin@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{up("admin@")},
|
||||
Destinations: SSHDstAliases{up("admin@")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-group-to-autogroup-self-allowed",
|
||||
input: `
|
||||
{
|
||||
"groups": {"group:admins": ["admin@", "user1@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["autogroup:self"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("admin@"), Username("user1@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-autogroup-tagged-to-autogroup-member-rejected",
|
||||
input: `
|
||||
{
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:tagged"],
|
||||
"dst": ["autogroup:member"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: "tags in SSH source cannot access autogroup:member",
|
||||
},
|
||||
{
|
||||
name: "ssh-autogroup-tagged-to-autogroup-tagged-allowed",
|
||||
input: `
|
||||
{
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:tagged"],
|
||||
"dst": ["autogroup:tagged"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:tagged")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:tagged")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-wildcard-destination-rejected",
|
||||
input: `
|
||||
{
|
||||
"groups": {"group:admins": ["admin@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["*"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
wantErr: "wildcard (*) is not supported as SSH destination",
|
||||
},
|
||||
{
|
||||
name: "ssh-group-to-tag-allowed",
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {"tag:server": ["admin@"]},
|
||||
"groups": {"group:admins": ["admin@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("admin@")},
|
||||
},
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("admin@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-user-to-tag-allowed",
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {"tag:server": ["admin@"]},
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["admin@"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("admin@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{up("admin@")},
|
||||
Destinations: SSHDstAliases{tp("tag:server")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-autogroup-member-to-autogroup-tagged-allowed",
|
||||
input: `
|
||||
{
|
||||
"ssh": [{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:tagged"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:member")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:tagged")},
|
||||
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmps := append(util.Comparers,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
hsdb "github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -78,7 +79,7 @@ func (s *State) DebugOverview() string {
|
||||
now := time.Now()
|
||||
for _, node := range allNodes.All() {
|
||||
if node.Valid() {
|
||||
userName := node.User().Name()
|
||||
userName := node.Owner().Name()
|
||||
userNodeCounts[userName]++
|
||||
|
||||
if node.IsOnline().Valid() && node.IsOnline().Get() {
|
||||
@@ -228,7 +229,7 @@ func (s *State) DebugPolicy() (string, error) {
|
||||
|
||||
return p.Data, nil
|
||||
case types.PolicyModeFile:
|
||||
pol, err := policyBytes(s.db, s.cfg)
|
||||
pol, err := hsdb.PolicyBytes(s.db.DB, s.cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -281,7 +282,7 @@ func (s *State) DebugOverviewJSON() DebugOverviewInfo {
|
||||
|
||||
for _, node := range allNodes.All() {
|
||||
if node.Valid() {
|
||||
userName := node.User().Name()
|
||||
userName := node.Owner().Name()
|
||||
info.Users[userName]++
|
||||
|
||||
if node.IsOnline().Valid() && node.IsOnline().Get() {
|
||||
|
||||
@@ -509,15 +509,27 @@ func (s *NodeStore) DebugString() string {
|
||||
sb.WriteString(fmt.Sprintf("Users with Nodes: %d\n", len(snapshot.nodesByUser)))
|
||||
sb.WriteString("\n")
|
||||
|
||||
// User distribution
|
||||
sb.WriteString("Nodes by User:\n")
|
||||
// User distribution (shows internal UserID tracking, not display owner)
|
||||
sb.WriteString("Nodes by Internal User ID:\n")
|
||||
for userID, nodes := range snapshot.nodesByUser {
|
||||
if len(nodes) > 0 {
|
||||
userName := "unknown"
|
||||
taggedCount := 0
|
||||
if len(nodes) > 0 && nodes[0].Valid() {
|
||||
userName = nodes[0].User().Name()
|
||||
// Count tagged nodes (which have UserID set but are owned by "tagged-devices")
|
||||
for _, n := range nodes {
|
||||
if n.IsTagged() {
|
||||
taggedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if taggedCount > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" - User %d (%s): %d nodes (%d tagged)\n", userID, userName, len(nodes), taggedCount))
|
||||
} else {
|
||||
sb.WriteString(fmt.Sprintf(" - User %d (%s): %d nodes\n", userID, userName, len(nodes)))
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf(" - User %d (%s): %d nodes\n", userID, userName, len(nodes)))
|
||||
}
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -24,6 +22,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"gorm.io/gorm"
|
||||
@@ -115,8 +114,7 @@ func NewState(cfg *types.Config) (*State, error) {
|
||||
)
|
||||
|
||||
db, err := hsdb.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
cfg,
|
||||
registrationCache,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -138,12 +136,13 @@ func NewState(cfg *types.Config) (*State, error) {
|
||||
for _, node := range nodes {
|
||||
node.IsOnline = ptr.To(false)
|
||||
}
|
||||
|
||||
users, err := db.ListUsers()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading users: %w", err)
|
||||
}
|
||||
|
||||
pol, err := policyBytes(db, cfg)
|
||||
pol, err := hsdb.PolicyBytes(db.DB, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading policy: %w", err)
|
||||
}
|
||||
@@ -159,6 +158,7 @@ func NewState(cfg *types.Config) (*State, error) {
|
||||
if batchSize == 0 {
|
||||
batchSize = defaultNodeStoreBatchSize
|
||||
}
|
||||
|
||||
batchTimeout := cfg.Tuning.NodeStoreBatchTimeout
|
||||
if batchTimeout == 0 {
|
||||
batchTimeout = defaultNodeStoreBatchTimeout
|
||||
@@ -192,54 +192,14 @@ func NewState(cfg *types.Config) (*State, error) {
|
||||
func (s *State) Close() error {
|
||||
s.nodeStore.Stop()
|
||||
|
||||
if err := s.db.Close(); err != nil {
|
||||
err := s.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("closing database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// policyBytes loads policy configuration from file or database based on the configured mode.
|
||||
// Returns nil if no policy is configured, which is valid.
|
||||
func policyBytes(db *hsdb.HSDatabase, cfg *types.Config) ([]byte, error) {
|
||||
switch cfg.Policy.Mode {
|
||||
case types.PolicyModeFile:
|
||||
path := cfg.Policy.Path
|
||||
|
||||
// It is fine to start headscale without a policy file.
|
||||
if len(path) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
absPath := util.AbsolutePathFromConfigPath(path)
|
||||
policyFile, err := os.Open(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer policyFile.Close()
|
||||
|
||||
return io.ReadAll(policyFile)
|
||||
|
||||
case types.PolicyModeDB:
|
||||
p, err := db.GetPolicy()
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrPolicyNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.Data == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return []byte(p.Data), err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%w: %s", ErrUnsupportedPolicyMode, cfg.Policy.Mode)
|
||||
}
|
||||
|
||||
// SetDERPMap updates the DERP relay configuration.
|
||||
func (s *State) SetDERPMap(dm *tailcfg.DERPMap) {
|
||||
s.derpMap.Store(dm)
|
||||
@@ -253,7 +213,7 @@ func (s *State) DERPMap() tailcfg.DERPMapView {
|
||||
// ReloadPolicy reloads the access control policy and triggers auto-approval if changed.
|
||||
// Returns true if the policy changed.
|
||||
func (s *State) ReloadPolicy() ([]change.Change, error) {
|
||||
pol, err := policyBytes(s.db, s.cfg)
|
||||
pol, err := hsdb.PolicyBytes(s.db.DB, s.cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading policy: %w", err)
|
||||
}
|
||||
@@ -362,8 +322,29 @@ func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error
|
||||
|
||||
// DeleteUser permanently removes a user and all associated data (nodes, API keys, etc).
|
||||
// This operation is irreversible.
|
||||
func (s *State) DeleteUser(userID types.UserID) error {
|
||||
return s.db.DestroyUser(userID)
|
||||
// It also updates the policy manager to ensure ACL policies referencing the deleted
|
||||
// user are re-evaluated immediately, fixing issue #2967.
|
||||
func (s *State) DeleteUser(userID types.UserID) (change.Change, error) {
|
||||
err := s.db.DestroyUser(userID)
|
||||
if err != nil {
|
||||
return change.Change{}, err
|
||||
}
|
||||
|
||||
// Update policy manager with the new user list (without the deleted user)
|
||||
// This ensures that if the policy references the deleted user, it gets
|
||||
// re-evaluated immediately rather than when some other operation triggers it.
|
||||
c, err := s.updatePolicyManagerUsers()
|
||||
if err != nil {
|
||||
return change.Change{}, fmt.Errorf("updating policy after user deletion: %w", err)
|
||||
}
|
||||
|
||||
// If the policy manager doesn't detect changes, still return UserRemoved
|
||||
// to ensure peer lists are refreshed
|
||||
if c.IsEmpty() {
|
||||
c = change.UserRemoved()
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// RenameUser changes a user's name. The new name must be unique.
|
||||
@@ -426,10 +407,14 @@ func (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Cha
|
||||
|
||||
nodePtr := node.AsStruct()
|
||||
|
||||
// Use Omit("expiry") to prevent overwriting expiry during MapRequest updates.
|
||||
// Expiry should only be updated through explicit SetNodeExpiry calls or re-registration.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
err := s.db.DB.Omit("expiry").Updates(nodePtr).Error
|
||||
// Use Omit to prevent overwriting certain fields during MapRequest updates:
|
||||
// - "expiry": should only be updated through explicit SetNodeExpiry calls or re-registration
|
||||
// - "AuthKeyID", "AuthKey": prevents GORM from persisting stale PreAuthKey references that
|
||||
// may exist in NodeStore after a PreAuthKey has been deleted. The database handles setting
|
||||
// auth_key_id to NULL via ON DELETE SET NULL. Without this, Updates() would fail with a
|
||||
// foreign key constraint error when trying to reference a deleted PreAuthKey.
|
||||
// See also: https://github.com/juanfont/headscale/issues/2862
|
||||
err := s.db.DB.Omit("expiry", "AuthKeyID", "AuthKey").Updates(nodePtr).Error
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("saving node: %w", err)
|
||||
}
|
||||
@@ -591,12 +576,14 @@ func (s *State) ListNodes(nodeIDs ...types.NodeID) views.Slice[types.NodeView] {
|
||||
|
||||
// Filter nodes by the requested IDs
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
|
||||
nodeIDSet := make(map[types.NodeID]struct{}, len(nodeIDs))
|
||||
for _, id := range nodeIDs {
|
||||
nodeIDSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
var filteredNodes []types.NodeView
|
||||
|
||||
for _, node := range allNodes.All() {
|
||||
if _, exists := nodeIDSet[node.ID()]; exists {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
@@ -619,12 +606,14 @@ func (s *State) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) views.Sl
|
||||
|
||||
// For specific peerIDs, filter from all nodes
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
|
||||
nodeIDSet := make(map[types.NodeID]struct{}, len(peerIDs))
|
||||
for _, id := range peerIDs {
|
||||
nodeIDSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
var filteredNodes []types.NodeView
|
||||
|
||||
for _, node := range allNodes.All() {
|
||||
if _, exists := nodeIDSet[node.ID()]; exists {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
@@ -637,6 +626,7 @@ func (s *State) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) views.Sl
|
||||
// ListEphemeralNodes retrieves all ephemeral (temporary) nodes in the system.
|
||||
func (s *State) ListEphemeralNodes() views.Slice[types.NodeView] {
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
|
||||
var ephemeralNodes []types.NodeView
|
||||
|
||||
for _, node := range allNodes.All() {
|
||||
@@ -719,7 +709,18 @@ func (s *State) SetNodeTags(nodeID types.NodeID, tags []string) (types.NodeView,
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, nodeID)
|
||||
}
|
||||
|
||||
return s.persistNodeToDB(n)
|
||||
nodeView, c, err := s.persistNodeToDB(n)
|
||||
if err != nil {
|
||||
return nodeView, c, err
|
||||
}
|
||||
|
||||
// Set OriginNode so the mapper knows to include self info for this node.
|
||||
// When tags change, persistNodeToDB returns PolicyChange which doesn't set OriginNode,
|
||||
// so the mapper's self-update check fails and the node never sees its new tags.
|
||||
// Setting OriginNode ensures the node gets a self-update with the new tags.
|
||||
c.OriginNode = nodeID
|
||||
|
||||
return nodeView, c, nil
|
||||
}
|
||||
|
||||
// SetApprovedRoutes sets the network routes that a node is approved to advertise.
|
||||
@@ -757,7 +758,8 @@ func (s *State) SetApprovedRoutes(nodeID types.NodeID, routes []netip.Prefix) (t
|
||||
|
||||
// RenameNode changes the display name of a node.
|
||||
func (s *State) RenameNode(nodeID types.NodeID, newName string) (types.NodeView, change.Change, error) {
|
||||
if err := util.ValidateHostname(newName); err != nil {
|
||||
err := util.ValidateHostname(newName)
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
@@ -966,6 +968,11 @@ func (s *State) GetAPIKey(displayPrefix string) (*types.APIKey, error) {
|
||||
return s.db.GetAPIKey(prefix)
|
||||
}
|
||||
|
||||
// GetAPIKeyByID retrieves an API key by its database ID.
|
||||
func (s *State) GetAPIKeyByID(id uint64) (*types.APIKey, error) {
|
||||
return s.db.GetAPIKeyByID(id)
|
||||
}
|
||||
|
||||
// ExpireAPIKey marks an API key as expired.
|
||||
func (s *State) ExpireAPIKey(key *types.APIKey) error {
|
||||
return s.db.ExpireAPIKey(key)
|
||||
@@ -1025,18 +1032,18 @@ func (s *State) GetPreAuthKey(id string) (*types.PreAuthKey, error) {
|
||||
}
|
||||
|
||||
// ListPreAuthKeys returns all pre-authentication keys for a user.
|
||||
func (s *State) ListPreAuthKeys(userID types.UserID) ([]types.PreAuthKey, error) {
|
||||
return s.db.ListPreAuthKeys(userID)
|
||||
func (s *State) ListPreAuthKeys() ([]types.PreAuthKey, error) {
|
||||
return s.db.ListPreAuthKeys()
|
||||
}
|
||||
|
||||
// ExpirePreAuthKey marks a pre-authentication key as expired.
|
||||
func (s *State) ExpirePreAuthKey(preAuthKey *types.PreAuthKey) error {
|
||||
return s.db.ExpirePreAuthKey(preAuthKey)
|
||||
func (s *State) ExpirePreAuthKey(id uint64) error {
|
||||
return s.db.ExpirePreAuthKey(id)
|
||||
}
|
||||
|
||||
// DeletePreAuthKey permanently deletes a pre-authentication key.
|
||||
func (s *State) DeletePreAuthKey(preAuthKey *types.PreAuthKey) error {
|
||||
return s.db.DeletePreAuthKey(preAuthKey)
|
||||
func (s *State) DeletePreAuthKey(id uint64) error {
|
||||
return s.db.DeletePreAuthKey(id)
|
||||
}
|
||||
|
||||
// GetRegistrationCacheEntry retrieves a node registration from cache.
|
||||
@@ -1082,6 +1089,7 @@ func preserveNetInfo(existingNode types.NodeView, nodeID types.NodeID, validHost
|
||||
if existingNode.Valid() {
|
||||
existingHostinfo = existingNode.Hostinfo().AsStruct()
|
||||
}
|
||||
|
||||
return netInfoFromMapRequest(nodeID, existingHostinfo, validHostinfo)
|
||||
}
|
||||
|
||||
@@ -1104,6 +1112,167 @@ type newNodeParams struct {
|
||||
ExistingNodeForNetinfo types.NodeView
|
||||
}
|
||||
|
||||
// authNodeUpdateParams contains parameters for updating an existing node during auth.
|
||||
type authNodeUpdateParams struct {
|
||||
// Node to update; must be valid and in NodeStore.
|
||||
ExistingNode types.NodeView
|
||||
// Client data: keys, hostinfo, endpoints.
|
||||
RegEntry *types.RegisterNode
|
||||
// Pre-validated hostinfo; NetInfo preserved from ExistingNode.
|
||||
ValidHostinfo *tailcfg.Hostinfo
|
||||
// Hostname from hostinfo, or generated from keys if client omits it.
|
||||
Hostname string
|
||||
// Auth user; may differ from ExistingNode.User() on conversion.
|
||||
User *types.User
|
||||
// Overrides RegEntry.Node.Expiry; ignored for tagged nodes.
|
||||
Expiry *time.Time
|
||||
// Only used when IsConvertFromTag=true.
|
||||
RegisterMethod string
|
||||
// Set true for tagged->user conversion. Affects RegisterMethod and expiry.
|
||||
IsConvertFromTag bool
|
||||
}
|
||||
|
||||
// applyAuthNodeUpdate applies common update logic for re-authenticating or converting
|
||||
// an existing node. It updates the node in NodeStore, processes RequestTags, and
|
||||
// persists changes to the database.
|
||||
func (s *State) applyAuthNodeUpdate(params authNodeUpdateParams) (types.NodeView, error) {
|
||||
// Log the operation type
|
||||
if params.IsConvertFromTag {
|
||||
log.Info().
|
||||
Str("node.name", params.ExistingNode.Hostname()).
|
||||
Uint64("node.id", params.ExistingNode.ID().Uint64()).
|
||||
Strs("old.tags", params.ExistingNode.Tags().AsSlice()).
|
||||
Msg("Converting tagged node to user-owned node")
|
||||
} else {
|
||||
log.Info().
|
||||
Str("node.name", params.ExistingNode.Hostname()).
|
||||
Uint64("node.id", params.ExistingNode.ID().Uint64()).
|
||||
Interface("hostinfo", params.RegEntry.Node.Hostinfo).
|
||||
Msg("Updating existing node registration via reauth")
|
||||
}
|
||||
|
||||
// Process RequestTags during reauth (#2979)
|
||||
// Due to json:",omitempty", we treat empty/nil as "clear tags"
|
||||
var requestTags []string
|
||||
if params.RegEntry.Node.Hostinfo != nil {
|
||||
requestTags = params.RegEntry.Node.Hostinfo.RequestTags
|
||||
}
|
||||
|
||||
oldTags := params.ExistingNode.Tags().AsSlice()
|
||||
|
||||
// Validate tags BEFORE calling UpdateNode to ensure we don't modify NodeStore
|
||||
// if validation fails. This maintains consistency between NodeStore and database.
|
||||
rejectedTags := s.validateRequestTags(params.ExistingNode, requestTags)
|
||||
if len(rejectedTags) > 0 {
|
||||
return types.NodeView{}, fmt.Errorf(
|
||||
"%w %v are invalid or not permitted",
|
||||
ErrRequestedTagsInvalidOrNotPermitted,
|
||||
rejectedTags,
|
||||
)
|
||||
}
|
||||
|
||||
// Update existing node in NodeStore - validation passed, safe to mutate
|
||||
updatedNodeView, ok := s.nodeStore.UpdateNode(params.ExistingNode.ID(), func(node *types.Node) {
|
||||
node.NodeKey = params.RegEntry.Node.NodeKey
|
||||
node.DiscoKey = params.RegEntry.Node.DiscoKey
|
||||
node.Hostname = params.Hostname
|
||||
|
||||
// Preserve NetInfo from existing node when re-registering
|
||||
node.Hostinfo = params.ValidHostinfo
|
||||
node.Hostinfo.NetInfo = preserveNetInfo(
|
||||
params.ExistingNode,
|
||||
params.ExistingNode.ID(),
|
||||
params.ValidHostinfo,
|
||||
)
|
||||
|
||||
node.Endpoints = params.RegEntry.Node.Endpoints
|
||||
node.IsOnline = ptr.To(false)
|
||||
node.LastSeen = ptr.To(time.Now())
|
||||
|
||||
// Set RegisterMethod - for conversion this is the new method,
|
||||
// for reauth we preserve the existing one from regEntry
|
||||
if params.IsConvertFromTag {
|
||||
node.RegisterMethod = params.RegisterMethod
|
||||
} else {
|
||||
node.RegisterMethod = params.RegEntry.Node.RegisterMethod
|
||||
}
|
||||
|
||||
// Track tagged status BEFORE processing tags
|
||||
wasTagged := node.IsTagged()
|
||||
|
||||
// Process tags - may change node.Tags and node.UserID
|
||||
// Tags were pre-validated, so this will always succeed (no rejected tags)
|
||||
_ = s.processReauthTags(node, requestTags, params.User, oldTags)
|
||||
|
||||
// Handle expiry AFTER tag processing, based on transition
|
||||
// This ensures expiry is correctly set/cleared based on the NEW tagged status
|
||||
isTagged := node.IsTagged()
|
||||
|
||||
switch {
|
||||
case wasTagged && !isTagged:
|
||||
// Tagged → Personal: set expiry from client request
|
||||
if params.Expiry != nil {
|
||||
node.Expiry = params.Expiry
|
||||
} else {
|
||||
node.Expiry = params.RegEntry.Node.Expiry
|
||||
}
|
||||
case !wasTagged && isTagged:
|
||||
// Personal → Tagged: clear expiry (tagged nodes don't expire)
|
||||
node.Expiry = nil
|
||||
case params.IsConvertFromTag:
|
||||
// Explicit conversion from tagged to user-owned: set expiry from client request
|
||||
if params.Expiry != nil {
|
||||
node.Expiry = params.Expiry
|
||||
} else {
|
||||
node.Expiry = params.RegEntry.Node.Expiry
|
||||
}
|
||||
case !isTagged:
|
||||
// Personal → Personal: update expiry from client
|
||||
if params.Expiry != nil {
|
||||
node.Expiry = params.Expiry
|
||||
} else {
|
||||
node.Expiry = params.RegEntry.Node.Expiry
|
||||
}
|
||||
}
|
||||
// Tagged → Tagged: keep existing expiry (nil) - no action needed
|
||||
})
|
||||
|
||||
if !ok {
|
||||
return types.NodeView{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, params.ExistingNode.ID())
|
||||
}
|
||||
|
||||
// Persist to database
|
||||
// Omit AuthKeyID/AuthKey to prevent stale PreAuthKey references from causing FK errors.
|
||||
_, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
err := tx.Omit("AuthKeyID", "AuthKey").Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil //nolint:nilnil // side-effect only write
|
||||
})
|
||||
if err != nil {
|
||||
return types.NodeView{}, err
|
||||
}
|
||||
|
||||
// Log completion
|
||||
if params.IsConvertFromTag {
|
||||
log.Trace().
|
||||
Str("node.name", updatedNodeView.Hostname()).
|
||||
Uint64("node.id", updatedNodeView.ID().Uint64()).
|
||||
Str("node.key", updatedNodeView.NodeKey().ShortString()).
|
||||
Msg("Tagged node converted to user-owned")
|
||||
} else {
|
||||
log.Trace().
|
||||
Str("node.name", updatedNodeView.Hostname()).
|
||||
Uint64("node.id", updatedNodeView.ID().Uint64()).
|
||||
Str("node.key", updatedNodeView.NodeKey().ShortString()).
|
||||
Msg("Node re-authorized")
|
||||
}
|
||||
|
||||
return updatedNodeView, nil
|
||||
}
|
||||
|
||||
// createAndSaveNewNode creates a new node, allocates IPs, saves to DB, and adds to NodeStore.
|
||||
// It preserves netinfo from an existing node if one is provided (for faster DERP connectivity).
|
||||
func (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, error) {
|
||||
@@ -1142,12 +1311,16 @@ func (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, erro
|
||||
nodeToRegister.User = params.PreAuthKey.User
|
||||
}
|
||||
// If PreAuthKey.UserID is nil, the node is "orphaned" (system-created)
|
||||
|
||||
// Tagged nodes have key expiry disabled.
|
||||
nodeToRegister.Expiry = nil
|
||||
} else {
|
||||
// USER-OWNED NODE
|
||||
nodeToRegister.UserID = ¶ms.PreAuthKey.User.ID
|
||||
nodeToRegister.User = params.PreAuthKey.User
|
||||
nodeToRegister.Tags = nil
|
||||
}
|
||||
|
||||
nodeToRegister.AuthKey = params.PreAuthKey
|
||||
nodeToRegister.AuthKeyID = ¶ms.PreAuthKey.ID
|
||||
} else {
|
||||
@@ -1166,25 +1339,22 @@ func (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, erro
|
||||
// Process RequestTags (from tailscale up --advertise-tags) ONLY for non-PreAuthKey registrations.
|
||||
// Validate early before IP allocation to avoid resource leaks on failure.
|
||||
if params.PreAuthKey == nil && params.Hostinfo != nil && len(params.Hostinfo.RequestTags) > 0 {
|
||||
var approvedTags, rejectedTags []string
|
||||
|
||||
for _, tag := range params.Hostinfo.RequestTags {
|
||||
if s.polMan.NodeCanHaveTag(nodeToRegister.View(), tag) {
|
||||
approvedTags = append(approvedTags, tag)
|
||||
} else {
|
||||
rejectedTags = append(rejectedTags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// Reject registration if any requested tags are unauthorized
|
||||
// Validate all tags before applying - reject if any tag is not permitted
|
||||
rejectedTags := s.validateRequestTags(nodeToRegister.View(), params.Hostinfo.RequestTags)
|
||||
if len(rejectedTags) > 0 {
|
||||
return types.NodeView{}, fmt.Errorf("%w %v are invalid or not permitted", ErrRequestedTagsInvalidOrNotPermitted, rejectedTags)
|
||||
}
|
||||
|
||||
// All tags are approved - apply them
|
||||
approvedTags := params.Hostinfo.RequestTags
|
||||
if len(approvedTags) > 0 {
|
||||
nodeToRegister.Tags = approvedTags
|
||||
slices.Sort(nodeToRegister.Tags)
|
||||
nodeToRegister.Tags = slices.Compact(nodeToRegister.Tags)
|
||||
|
||||
// Tagged nodes have key expiry disabled.
|
||||
nodeToRegister.Expiry = nil
|
||||
|
||||
log.Info().
|
||||
Str("node.name", nodeToRegister.Hostname).
|
||||
Strs("tags", nodeToRegister.Tags).
|
||||
@@ -1213,12 +1383,14 @@ func (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, erro
|
||||
if err != nil {
|
||||
return types.NodeView{}, fmt.Errorf("failed to ensure unique given name: %w", err)
|
||||
}
|
||||
|
||||
nodeToRegister.GivenName = givenName
|
||||
}
|
||||
|
||||
// New node - database first to get ID, then NodeStore
|
||||
savedNode, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(&nodeToRegister).Error; err != nil {
|
||||
err := tx.Save(&nodeToRegister).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
|
||||
@@ -1239,6 +1411,114 @@ func (s *State) createAndSaveNewNode(params newNodeParams) (types.NodeView, erro
|
||||
return s.nodeStore.PutNode(*savedNode), nil
|
||||
}
|
||||
|
||||
// validateRequestTags validates that the requested tags are permitted for the node.
|
||||
// This should be called BEFORE UpdateNode to ensure we don't modify NodeStore
|
||||
// if validation fails. Returns the list of rejected tags (empty if all valid).
|
||||
func (s *State) validateRequestTags(node types.NodeView, requestTags []string) []string {
|
||||
// Empty tags = clear tags, always permitted
|
||||
if len(requestTags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var rejectedTags []string
|
||||
|
||||
for _, tag := range requestTags {
|
||||
if !s.polMan.NodeCanHaveTag(node, tag) {
|
||||
rejectedTags = append(rejectedTags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
return rejectedTags
|
||||
}
|
||||
|
||||
// processReauthTags handles tag changes during node re-authentication.
|
||||
// It processes RequestTags from the client and updates node tags accordingly.
|
||||
// Returns rejected tags (if any) for post-validation error handling.
|
||||
func (s *State) processReauthTags(
|
||||
node *types.Node,
|
||||
requestTags []string,
|
||||
user *types.User,
|
||||
oldTags []string,
|
||||
) []string {
|
||||
wasAuthKeyTagged := node.AuthKey != nil && node.AuthKey.IsTagged()
|
||||
|
||||
logEvent := log.Debug().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("request.tags", requestTags).
|
||||
Strs("current.tags", node.Tags).
|
||||
Bool("is.tagged", node.IsTagged()).
|
||||
Bool("was.authkey.tagged", wasAuthKeyTagged)
|
||||
logEvent.Msg("Processing RequestTags during reauth")
|
||||
|
||||
// Empty RequestTags means untag node (transition to user-owned)
|
||||
if len(requestTags) == 0 {
|
||||
if node.IsTagged() {
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("removed.tags", node.Tags).
|
||||
Str("user.name", user.Name).
|
||||
Bool("was.authkey.tagged", wasAuthKeyTagged).
|
||||
Msg("Reauth: removing all tags, returning node ownership to user")
|
||||
|
||||
node.Tags = []string{}
|
||||
node.UserID = &user.ID
|
||||
node.User = user
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Non-empty RequestTags: validate and apply
|
||||
var approvedTags, rejectedTags []string
|
||||
|
||||
for _, tag := range requestTags {
|
||||
if s.polMan.NodeCanHaveTag(node.View(), tag) {
|
||||
approvedTags = append(approvedTags, tag)
|
||||
} else {
|
||||
rejectedTags = append(rejectedTags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rejectedTags) > 0 {
|
||||
log.Warn().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("rejected.tags", rejectedTags).
|
||||
Msg("Reauth: requested tags are not permitted")
|
||||
|
||||
return rejectedTags
|
||||
}
|
||||
|
||||
if len(approvedTags) > 0 {
|
||||
slices.Sort(approvedTags)
|
||||
approvedTags = slices.Compact(approvedTags)
|
||||
|
||||
wasTagged := node.IsTagged()
|
||||
node.Tags = approvedTags
|
||||
|
||||
// Note: UserID is preserved as "created by" tracking, consistent with SetNodeTags
|
||||
if !wasTagged {
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("new.tags", approvedTags).
|
||||
Str("old.user", user.Name).
|
||||
Msg("Reauth: applying tags, transferring node to tagged-devices")
|
||||
} else {
|
||||
log.Info().
|
||||
Uint64("node.id", uint64(node.ID)).
|
||||
Str("node.name", node.Hostname).
|
||||
Strs("old.tags", oldTags).
|
||||
Strs("new.tags", approvedTags).
|
||||
Msg("Reauth: updating tags on already-tagged node")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleNodeFromAuthPath handles node registration through authentication flow (like OIDC).
|
||||
func (s *State) HandleNodeFromAuthPath(
|
||||
registrationID types.RegistrationID,
|
||||
@@ -1277,117 +1557,75 @@ func (s *State) HandleNodeFromAuthPath(
|
||||
regEntry.Node.Hostinfo,
|
||||
)
|
||||
|
||||
// Lookup existing nodes
|
||||
machineKey := regEntry.Node.MachineKey
|
||||
existingNodeSameUser, _ := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(user.ID))
|
||||
existingNodeAnyUser, _ := s.nodeStore.GetNodeByMachineKeyAnyUser(machineKey)
|
||||
|
||||
// Named conditions - describe WHAT we found, not HOW we check it
|
||||
nodeExistsForSameUser := existingNodeSameUser.Valid()
|
||||
nodeExistsForAnyUser := existingNodeAnyUser.Valid()
|
||||
existingNodeIsTagged := nodeExistsForAnyUser && existingNodeAnyUser.IsTagged()
|
||||
existingNodeOwnedByOtherUser := nodeExistsForAnyUser &&
|
||||
!existingNodeIsTagged &&
|
||||
existingNodeAnyUser.UserID().Get() != user.ID
|
||||
|
||||
// Create logger with common fields for all auth operations
|
||||
logger := log.With().
|
||||
Str("registration_id", registrationID.String()).
|
||||
Str("user.name", user.Name).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("method", registrationMethod).
|
||||
Logger()
|
||||
|
||||
// Common params for update operations
|
||||
updateParams := authNodeUpdateParams{
|
||||
RegEntry: regEntry,
|
||||
ValidHostinfo: validHostinfo,
|
||||
Hostname: hostname,
|
||||
User: user,
|
||||
Expiry: expiry,
|
||||
RegisterMethod: registrationMethod,
|
||||
}
|
||||
|
||||
var finalNode types.NodeView
|
||||
|
||||
// Check if node already exists with same machine key for this user
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(regEntry.Node.MachineKey, types.UserID(user.ID))
|
||||
if nodeExistsForSameUser {
|
||||
updateParams.ExistingNode = existingNodeSameUser
|
||||
|
||||
// If this node exists for this user, update the node in place.
|
||||
if existsSameUser && existingNodeSameUser.Valid() {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("registration_id", registrationID.String()).
|
||||
Str("user.name", user.Name).
|
||||
Str("registrationMethod", registrationMethod).
|
||||
Str("node.name", existingNodeSameUser.Hostname()).
|
||||
Uint64("node.id", existingNodeSameUser.ID().Uint64()).
|
||||
Msg("Updating existing node registration")
|
||||
|
||||
// Update existing node - NodeStore first, then database
|
||||
updatedNodeView, ok := s.nodeStore.UpdateNode(existingNodeSameUser.ID(), func(node *types.Node) {
|
||||
node.NodeKey = regEntry.Node.NodeKey
|
||||
node.DiscoKey = regEntry.Node.DiscoKey
|
||||
node.Hostname = hostname
|
||||
|
||||
// TODO(kradalby): We should ensure we use the same hostinfo and node merge semantics
|
||||
// when a node re-registers as we do when it sends a map request (UpdateNodeFromMapRequest).
|
||||
|
||||
// Preserve NetInfo from existing node when re-registering
|
||||
node.Hostinfo = validHostinfo
|
||||
node.Hostinfo.NetInfo = preserveNetInfo(existingNodeSameUser, existingNodeSameUser.ID(), validHostinfo)
|
||||
|
||||
node.Endpoints = regEntry.Node.Endpoints
|
||||
node.RegisterMethod = regEntry.Node.RegisterMethod
|
||||
node.IsOnline = ptr.To(false)
|
||||
node.LastSeen = ptr.To(time.Now())
|
||||
|
||||
if expiry != nil {
|
||||
node.Expiry = expiry
|
||||
} else {
|
||||
node.Expiry = regEntry.Node.Expiry
|
||||
}
|
||||
})
|
||||
|
||||
if !ok {
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
finalNode, err = s.applyAuthNodeUpdate(updateParams)
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, err
|
||||
}
|
||||
} else if existingNodeIsTagged {
|
||||
updateParams.ExistingNode = existingNodeAnyUser
|
||||
updateParams.IsConvertFromTag = true
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node.name", updatedNodeView.Hostname()).
|
||||
Uint64("node.id", updatedNodeView.ID().Uint64()).
|
||||
Str("machine.key", regEntry.Node.MachineKey.ShortString()).
|
||||
Str("node.key", updatedNodeView.NodeKey().ShortString()).
|
||||
Str("user.name", user.Name).
|
||||
Msg("Node re-authorized")
|
||||
|
||||
finalNode = updatedNodeView
|
||||
} else {
|
||||
// Node does not exist for this user with this machine key
|
||||
// Check if node exists with this machine key for a different user (for netinfo preservation)
|
||||
existingNodeAnyUser, existsAnyUser := s.nodeStore.GetNodeByMachineKeyAnyUser(regEntry.Node.MachineKey)
|
||||
|
||||
if existsAnyUser && existingNodeAnyUser.Valid() && existingNodeAnyUser.UserID().Get() != user.ID {
|
||||
// Node exists but belongs to a different user
|
||||
// Create a NEW node for the new user (do not transfer)
|
||||
// This allows the same machine to have separate node identities per user
|
||||
oldUser := existingNodeAnyUser.User()
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("existing.node.name", existingNodeAnyUser.Hostname()).
|
||||
Uint64("existing.node.id", existingNodeAnyUser.ID().Uint64()).
|
||||
Str("machine.key", regEntry.Node.MachineKey.ShortString()).
|
||||
Str("old.user", oldUser.Name()).
|
||||
Str("new.user", user.Name).
|
||||
Str("method", registrationMethod).
|
||||
Msg("Creating new node for different user (same machine key exists for another user)")
|
||||
finalNode, err = s.applyAuthNodeUpdate(updateParams)
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, err
|
||||
}
|
||||
} else if existingNodeOwnedByOtherUser {
|
||||
oldUser := existingNodeAnyUser.User()
|
||||
|
||||
// Create a completely new node
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("registration_id", registrationID.String()).
|
||||
Str("user.name", user.Name).
|
||||
Str("registrationMethod", registrationMethod).
|
||||
Str("expiresAt", fmt.Sprintf("%v", expiry)).
|
||||
Msg("Registering new node from auth callback")
|
||||
logger.Info().
|
||||
Str("existing.node.name", existingNodeAnyUser.Hostname()).
|
||||
Uint64("existing.node.id", existingNodeAnyUser.ID().Uint64()).
|
||||
Str("old.user", oldUser.Name()).
|
||||
Msg("Creating new node for different user (same machine key exists for another user)")
|
||||
|
||||
// Create and save new node
|
||||
var err error
|
||||
finalNode, err = s.createAndSaveNewNode(newNodeParams{
|
||||
User: *user,
|
||||
MachineKey: regEntry.Node.MachineKey,
|
||||
NodeKey: regEntry.Node.NodeKey,
|
||||
DiscoKey: regEntry.Node.DiscoKey,
|
||||
Hostname: hostname,
|
||||
Hostinfo: validHostinfo,
|
||||
Endpoints: regEntry.Node.Endpoints,
|
||||
Expiry: cmp.Or(expiry, regEntry.Node.Expiry),
|
||||
RegisterMethod: registrationMethod,
|
||||
ExistingNodeForNetinfo: cmp.Or(existingNodeAnyUser, types.NodeView{}),
|
||||
})
|
||||
finalNode, err = s.createNewNodeFromAuth(
|
||||
logger, user, regEntry, hostname, validHostinfo,
|
||||
expiry, registrationMethod, existingNodeAnyUser,
|
||||
)
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, err
|
||||
}
|
||||
} else {
|
||||
finalNode, err = s.createNewNodeFromAuth(
|
||||
logger, user, regEntry, hostname, validHostinfo,
|
||||
expiry, registrationMethod, types.NodeView{},
|
||||
)
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, err
|
||||
}
|
||||
@@ -1420,6 +1658,37 @@ func (s *State) HandleNodeFromAuthPath(
|
||||
return finalNode, c, nil
|
||||
}
|
||||
|
||||
// createNewNodeFromAuth creates a new node during auth callback.
|
||||
// This is used for both new registrations and when a machine already has a node
|
||||
// for a different user.
|
||||
func (s *State) createNewNodeFromAuth(
|
||||
logger zerolog.Logger,
|
||||
user *types.User,
|
||||
regEntry *types.RegisterNode,
|
||||
hostname string,
|
||||
validHostinfo *tailcfg.Hostinfo,
|
||||
expiry *time.Time,
|
||||
registrationMethod string,
|
||||
existingNodeForNetinfo types.NodeView,
|
||||
) (types.NodeView, error) {
|
||||
logger.Debug().
|
||||
Interface("expiry", expiry).
|
||||
Msg("Registering new node from auth callback")
|
||||
|
||||
return s.createAndSaveNewNode(newNodeParams{
|
||||
User: *user,
|
||||
MachineKey: regEntry.Node.MachineKey,
|
||||
NodeKey: regEntry.Node.NodeKey,
|
||||
DiscoKey: regEntry.Node.DiscoKey,
|
||||
Hostname: hostname,
|
||||
Hostinfo: validHostinfo,
|
||||
Endpoints: regEntry.Node.Endpoints,
|
||||
Expiry: cmp.Or(expiry, regEntry.Node.Expiry),
|
||||
RegisterMethod: registrationMethod,
|
||||
ExistingNodeForNetinfo: existingNodeForNetinfo,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleNodeFromPreAuthKey handles node registration using a pre-authentication key.
|
||||
func (s *State) HandleNodeFromPreAuthKey(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
@@ -1430,11 +1699,29 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.Change{}, err
|
||||
}
|
||||
|
||||
// Helper to get username for logging (handles nil User for tags-only keys)
|
||||
pakUsername := func() string {
|
||||
if pak.User != nil {
|
||||
return pak.User.Username()
|
||||
}
|
||||
|
||||
return types.TaggedDevices.Name
|
||||
}
|
||||
|
||||
// Check if node exists with same machine key before validating the key.
|
||||
// For #2830: container restarts send the same pre-auth key which may be used/expired.
|
||||
// Skip validation for existing nodes re-registering with the same NodeKey, as the
|
||||
// key was only needed for initial authentication. NodeKey rotation requires validation.
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
//
|
||||
// For tags-only keys (pak.User == nil), we skip the user-based lookup since there's
|
||||
// no user to match against. These keys create tagged nodes without user ownership.
|
||||
var existingNodeSameUser types.NodeView
|
||||
|
||||
var existsSameUser bool
|
||||
|
||||
if pak.User != nil {
|
||||
existingNodeSameUser, existsSameUser = s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
}
|
||||
|
||||
// For existing nodes, skip validation if:
|
||||
// 1. MachineKey matches (cryptographic proof of machine identity)
|
||||
@@ -1447,6 +1734,8 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
// - Container restarts may use different PAKs (e.g., env var changed)
|
||||
// - Original PAK may be deleted
|
||||
// - MachineKey + User is sufficient to prove this is the same node
|
||||
//
|
||||
// Note: For tags-only keys, existsSameUser is always false, so we always validate.
|
||||
isExistingNodeReregistering := existsSameUser && existingNodeSameUser.Valid()
|
||||
|
||||
// Check if this is a NodeKey rotation (different NodeKey)
|
||||
@@ -1492,7 +1781,7 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
logHostinfoValidation(
|
||||
machineKey.ShortString(),
|
||||
regReq.NodeKey.ShortString(),
|
||||
pak.User.Username(),
|
||||
pakUsername(),
|
||||
hostname,
|
||||
regReq.Hostinfo,
|
||||
)
|
||||
@@ -1502,12 +1791,13 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
Str("node.name", hostname).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", regReq.NodeKey.ShortString()).
|
||||
Str("user.name", pak.User.Username()).
|
||||
Str("user.name", pakUsername()).
|
||||
Msg("Registering node with pre-auth key")
|
||||
|
||||
var finalNode types.NodeView
|
||||
|
||||
// If this node exists for this user, update the node in place.
|
||||
// Note: For tags-only keys (pak.User == nil), existsSameUser is always false.
|
||||
if existsSameUser && existingNodeSameUser.Valid() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
@@ -1515,7 +1805,7 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
Uint64("node.id", existingNodeSameUser.ID().Uint64()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", existingNodeSameUser.NodeKey().ShortString()).
|
||||
Str("user.name", pak.User.Username()).
|
||||
Str("user.name", pakUsername()).
|
||||
Msg("Node re-registering with existing machine key and user, updating in place")
|
||||
|
||||
// Update existing node - NodeStore first, then database
|
||||
@@ -1542,9 +1832,11 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
node.IsOnline = ptr.To(false)
|
||||
node.LastSeen = ptr.To(time.Now())
|
||||
|
||||
// Update expiry, if it is zero, it means that the node will
|
||||
// not have an expiry anymore. If it is non-zero, we set that.
|
||||
node.Expiry = ®Req.Expiry
|
||||
// Tagged nodes keep their existing expiry (disabled).
|
||||
// User-owned nodes update expiry from the client request.
|
||||
if !node.IsTagged() {
|
||||
node.Expiry = ®Req.Expiry
|
||||
}
|
||||
})
|
||||
|
||||
if !ok {
|
||||
@@ -1553,7 +1845,8 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
// Omit AuthKeyID/AuthKey to prevent stale PreAuthKey references from causing FK errors.
|
||||
err := tx.Omit("AuthKeyID", "AuthKey").Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
@@ -1577,7 +1870,7 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
Uint64("node.id", updatedNodeView.ID().Uint64()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", updatedNodeView.NodeKey().ShortString()).
|
||||
Str("user.name", pak.User.Username()).
|
||||
Str("user.name", pakUsername()).
|
||||
Msg("Node re-authorized")
|
||||
|
||||
finalNode = updatedNodeView
|
||||
@@ -1586,7 +1879,9 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
// Check if node exists with this machine key for a different user
|
||||
existingNodeAnyUser, existsAnyUser := s.nodeStore.GetNodeByMachineKeyAnyUser(machineKey)
|
||||
|
||||
if existsAnyUser && existingNodeAnyUser.Valid() && existingNodeAnyUser.UserID().Get() != pak.User.ID {
|
||||
// For user-owned keys, check if node exists for a different user
|
||||
// For tags-only keys (pak.User == nil), this check is skipped
|
||||
if pak.User != nil && existsAnyUser && existingNodeAnyUser.Valid() && existingNodeAnyUser.UserID().Get() != pak.User.ID {
|
||||
// Node exists but belongs to a different user
|
||||
// Create a NEW node for the new user (do not transfer)
|
||||
// This allows the same machine to have separate node identities per user
|
||||
@@ -1597,17 +1892,25 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
Uint64("existing.node.id", existingNodeAnyUser.ID().Uint64()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("old.user", oldUser.Name()).
|
||||
Str("new.user", pak.User.Username()).
|
||||
Str("new.user", pakUsername()).
|
||||
Msg("Creating new node for different user (same machine key exists for another user)")
|
||||
}
|
||||
|
||||
// This is a new node for this user - create it
|
||||
// (Either completely new, or new for this user while existing for another user)
|
||||
// This is a new node - create it
|
||||
// For user-owned keys: create for the user
|
||||
// For tags-only keys: create as tagged node (createAndSaveNewNode handles this via PreAuthKey)
|
||||
|
||||
// Create and save new node
|
||||
// Note: For tags-only keys, User is empty but createAndSaveNewNode uses PreAuthKey for ownership
|
||||
var pakUser types.User
|
||||
if pak.User != nil {
|
||||
pakUser = *pak.User
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
finalNode, err = s.createAndSaveNewNode(newNodeParams{
|
||||
User: *pak.User,
|
||||
User: pakUser,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
DiscoKey: key.DiscoPublic{}, // DiscoKey not available in RegisterRequest
|
||||
@@ -1673,6 +1976,14 @@ func (s *State) updatePolicyManagerUsers() (change.Change, error) {
|
||||
return change.Change{}, nil
|
||||
}
|
||||
|
||||
// UpdatePolicyManagerUsersForTest updates the policy manager's user cache.
|
||||
// This is exposed for testing purposes to sync the policy manager after
|
||||
// creating test users via CreateUserForTest().
|
||||
func (s *State) UpdatePolicyManagerUsersForTest() error {
|
||||
_, err := s.updatePolicyManagerUsers()
|
||||
return err
|
||||
}
|
||||
|
||||
// updatePolicyManagerNodes updates the policy manager with current nodes.
|
||||
// Returns true if the policy changed and notifications should be sent.
|
||||
// TODO(kradalby): This is a temporary stepping stone, ultimately we should
|
||||
@@ -1735,7 +2046,9 @@ func (s *State) autoApproveNodes() ([]change.Change, error) {
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
|
||||
cs = append(cs, c)
|
||||
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -1805,6 +2118,7 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
if hi := req.Hostinfo; hi != nil {
|
||||
hasNewRoutes = len(hi.RoutableIPs) > 0
|
||||
}
|
||||
|
||||
needsRouteApproval = hostinfoChanged && (routesChanged(currentNode.View(), req.Hostinfo) || (hasNewRoutes && len(currentNode.ApprovedRoutes) == 0))
|
||||
if needsRouteApproval {
|
||||
// Extract announced routes from request
|
||||
@@ -1957,6 +2271,7 @@ func hostinfoEqual(oldNode types.NodeView, newHI *tailcfg.Hostinfo) bool {
|
||||
if !oldNode.Valid() || newHI == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
old := oldNode.AsStruct().Hostinfo
|
||||
|
||||
return old.Equal(newHI)
|
||||
|
||||
@@ -63,6 +63,6 @@ func logTagOperation(existingNode types.NodeView, newTags []string) {
|
||||
Str("node.name", existingNode.Hostname()).
|
||||
Uint("created.by.user", userID).
|
||||
Strs("new.tags", newTags).
|
||||
Msg("Converting user-owned node to tagged node (irreversible)")
|
||||
Msg("Converting user-owned node to tagged node")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
var (
|
||||
tmpDir string
|
||||
app *Headscale
|
||||
)
|
||||
|
||||
func (s *Suite) SetUpTest(c *check.C) {
|
||||
s.ResetDB(c)
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownTest(c *check.C) {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
func (s *Suite) ResetDB(c *check.C) {
|
||||
if len(tmpDir) != 0 {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
var err error
|
||||
tmpDir, err = os.MkdirTemp("", "autoygg-client-test2")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := types.Config{
|
||||
NoisePrivateKeyPath: tmpDir + "/noise_private.key",
|
||||
Database: types.DatabaseConfig{
|
||||
Type: "sqlite3",
|
||||
Sqlite: types.SqliteConfig{
|
||||
Path: tmpDir + "/headscale_test.db",
|
||||
},
|
||||
},
|
||||
OIDC: types.OIDCConfig{},
|
||||
}
|
||||
|
||||
app, err = NewHeadscale(&cfg)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func OIDCCallback(user, verb string) *elem.Element {
|
||||
P(elem.Text("Check out the documentation to learn more about headscale and Tailscale:")),
|
||||
Ul(
|
||||
elem.Li(nil,
|
||||
externalLink("https://github.com/juanfont/headscale/tree/main/docs", "Headscale documentation"),
|
||||
externalLink("https://headscale.net/stable/", "Headscale documentation"),
|
||||
),
|
||||
elem.Li(nil,
|
||||
externalLink("https://tailscale.com/kb/", "Tailscale knowledge base"),
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestTemplateExternalLinkSecurity(t *testing.T) {
|
||||
name: "OIDC Callback",
|
||||
html: templates.OIDCCallback("test@example.com", "Logged in").Render(),
|
||||
externalURLs: []string{
|
||||
"https://github.com/juanfont/headscale/tree/main/docs",
|
||||
"https://headscale.net/stable/",
|
||||
"https://tailscale.com/kb/",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -70,6 +70,18 @@ func (r Change) Merge(other Change) Change {
|
||||
merged.PeersRemoved = uniqueNodeIDs(append(r.PeersRemoved, other.PeersRemoved...))
|
||||
merged.PeerPatches = append(r.PeerPatches, other.PeerPatches...)
|
||||
|
||||
// Preserve OriginNode for self-update detection.
|
||||
// If either change has OriginNode set, keep it so the mapper
|
||||
// can detect self-updates and send the node its own changes.
|
||||
if merged.OriginNode == 0 {
|
||||
merged.OriginNode = other.OriginNode
|
||||
}
|
||||
|
||||
// Preserve TargetNode for targeted responses.
|
||||
if merged.TargetNode == 0 {
|
||||
merged.TargetNode = other.TargetNode
|
||||
}
|
||||
|
||||
if r.Reason != "" && other.Reason != "" && r.Reason != other.Reason {
|
||||
merged.Reason = r.Reason + "; " + other.Reason
|
||||
} else if other.Reason != "" {
|
||||
|
||||
@@ -233,6 +233,36 @@ func TestChange_Merge(t *testing.T) {
|
||||
r2: Change{Reason: "update"},
|
||||
want: Change{Reason: "update"},
|
||||
},
|
||||
{
|
||||
name: "OriginNode preserved from first",
|
||||
r1: Change{OriginNode: 42},
|
||||
r2: Change{IncludePolicy: true},
|
||||
want: Change{OriginNode: 42, IncludePolicy: true},
|
||||
},
|
||||
{
|
||||
name: "OriginNode preserved from second when first is zero",
|
||||
r1: Change{IncludePolicy: true},
|
||||
r2: Change{OriginNode: 42},
|
||||
want: Change{OriginNode: 42, IncludePolicy: true},
|
||||
},
|
||||
{
|
||||
name: "OriginNode first wins when both set",
|
||||
r1: Change{OriginNode: 1},
|
||||
r2: Change{OriginNode: 2},
|
||||
want: Change{OriginNode: 1},
|
||||
},
|
||||
{
|
||||
name: "TargetNode preserved from first",
|
||||
r1: Change{TargetNode: 42},
|
||||
r2: Change{IncludeSelf: true},
|
||||
want: Change{TargetNode: 42, IncludeSelf: true},
|
||||
},
|
||||
{
|
||||
name: "TargetNode preserved from second when first is zero",
|
||||
r1: Change{IncludeSelf: true},
|
||||
r2: Change{TargetNode: 42},
|
||||
want: Change{TargetNode: 42, IncludeSelf: true},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -377,7 +377,7 @@ func (node *Node) Proto() *v1.Node {
|
||||
Name: node.Hostname,
|
||||
GivenName: node.GivenName,
|
||||
User: nil, // Will be set below based on node type
|
||||
ForcedTags: node.Tags,
|
||||
Tags: node.Tags,
|
||||
Online: node.IsOnline != nil && *node.IsOnline,
|
||||
|
||||
// Only ApprovedRoutes and AvailableRoutes is set here. SubnetRoutes has
|
||||
@@ -719,7 +719,13 @@ func (node Node) DebugString() string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (nv NodeView) UserView() UserView {
|
||||
// Owner returns the owner for display purposes.
|
||||
// For tagged nodes, returns TaggedDevices. For user-owned nodes, returns the user.
|
||||
func (nv NodeView) Owner() UserView {
|
||||
if nv.IsTagged() {
|
||||
return TaggedDevices.View()
|
||||
}
|
||||
|
||||
return nv.User()
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,13 @@ package types
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
jsonv1 "encoding/json"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
"github.com/go-json-experiment/json/jsontext"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
@@ -48,8 +50,17 @@ func (v UserView) AsStruct() *User {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v UserView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v UserView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v UserView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *UserView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -58,20 +69,51 @@ func (v *UserView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x User
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v UserView) Model() gorm.Model { return v.ж.Model }
|
||||
func (v UserView) Name() string { return v.ж.Name }
|
||||
func (v UserView) DisplayName() string { return v.ж.DisplayName }
|
||||
func (v UserView) Email() string { return v.ж.Email }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x User
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v UserView) Model() gorm.Model { return v.ж.Model }
|
||||
|
||||
// Name (username) for the user, is used if email is empty
|
||||
// Should not be used, please use Username().
|
||||
// It is unique if ProviderIdentifier is not set.
|
||||
func (v UserView) Name() string { return v.ж.Name }
|
||||
|
||||
// Typically the full name of the user
|
||||
func (v UserView) DisplayName() string { return v.ж.DisplayName }
|
||||
|
||||
// Email of the user
|
||||
// Should not be used, please use Username().
|
||||
func (v UserView) Email() string { return v.ж.Email }
|
||||
|
||||
// ProviderIdentifier is a unique or not set identifier of the
|
||||
// user from OIDC. It is the combination of `iss`
|
||||
// and `sub` claim in the OIDC token.
|
||||
// It is unique if set.
|
||||
// It is unique together with Name.
|
||||
func (v UserView) ProviderIdentifier() sql.NullString { return v.ж.ProviderIdentifier }
|
||||
func (v UserView) Provider() string { return v.ж.Provider }
|
||||
func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL }
|
||||
|
||||
// Provider is the origin of the user account,
|
||||
// same as RegistrationMethod, without authkey.
|
||||
func (v UserView) Provider() string { return v.ж.Provider }
|
||||
func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _UserViewNeedsRegeneration = User(struct {
|
||||
@@ -112,8 +154,17 @@ func (v NodeView) AsStruct() *Node {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v NodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v NodeView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v NodeView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *NodeView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -122,7 +173,20 @@ func (v *NodeView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x Node
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x Node
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
@@ -139,22 +203,52 @@ func (v NodeView) IPv4() views.ValuePointer[netip.Addr] { return views.ValuePo
|
||||
|
||||
func (v NodeView) IPv6() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.IPv6) }
|
||||
|
||||
func (v NodeView) Hostname() string { return v.ж.Hostname }
|
||||
func (v NodeView) GivenName() string { return v.ж.GivenName }
|
||||
// Hostname represents the name given by the Tailscale
|
||||
// client during registration
|
||||
func (v NodeView) Hostname() string { return v.ж.Hostname }
|
||||
|
||||
// Givenname represents either:
|
||||
// a DNS normalized version of Hostname
|
||||
// a valid name set by the User
|
||||
//
|
||||
// GivenName is the name used in all DNS related
|
||||
// parts of headscale.
|
||||
func (v NodeView) GivenName() string { return v.ж.GivenName }
|
||||
|
||||
// UserID is set for ALL nodes (tagged and user-owned) to track "created by".
|
||||
// For tagged nodes, this is informational only - the tag is the owner.
|
||||
// For user-owned nodes, this identifies the owner.
|
||||
// Only nil for orphaned nodes (should not happen in normal operation).
|
||||
func (v NodeView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) }
|
||||
|
||||
func (v NodeView) User() UserView { return v.ж.User.View() }
|
||||
func (v NodeView) RegisterMethod() string { return v.ж.RegisterMethod }
|
||||
func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) }
|
||||
func (v NodeView) User() UserView { return v.ж.User.View() }
|
||||
func (v NodeView) RegisterMethod() string { return v.ж.RegisterMethod }
|
||||
|
||||
// Tags is the definitive owner for tagged nodes.
|
||||
// When non-empty, the node is "tagged" and tags define its identity.
|
||||
// Empty for user-owned nodes.
|
||||
// Tags cannot be removed once set (one-way transition).
|
||||
func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) }
|
||||
|
||||
// When a node has been created with a PreAuthKey, we need to
|
||||
// prevent the preauthkey from being deleted before the node.
|
||||
// The preauthkey can define "tags" of the node so we need it
|
||||
// around.
|
||||
func (v NodeView) AuthKeyID() views.ValuePointer[uint64] { return views.ValuePointerOf(v.ж.AuthKeyID) }
|
||||
|
||||
func (v NodeView) AuthKey() PreAuthKeyView { return v.ж.AuthKey.View() }
|
||||
func (v NodeView) Expiry() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Expiry) }
|
||||
|
||||
// LastSeen is when the node was last in contact with
|
||||
// headscale. It is best effort and not persisted.
|
||||
func (v NodeView) LastSeen() views.ValuePointer[time.Time] {
|
||||
return views.ValuePointerOf(v.ж.LastSeen)
|
||||
}
|
||||
|
||||
// ApprovedRoutes is a list of routes that the node is allowed to announce
|
||||
// as a subnet router. They are not necessarily the routes that the node
|
||||
// announces at the moment.
|
||||
// See [Node.Hostinfo]
|
||||
func (v NodeView) ApprovedRoutes() views.Slice[netip.Prefix] {
|
||||
return views.SliceOf(v.ж.ApprovedRoutes)
|
||||
}
|
||||
@@ -223,8 +317,17 @@ func (v PreAuthKeyView) AsStruct() *PreAuthKey {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v PreAuthKeyView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v PreAuthKeyView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v PreAuthKeyView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *PreAuthKeyView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -233,23 +336,50 @@ func (v *PreAuthKeyView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x PreAuthKey
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v PreAuthKeyView) ID() uint64 { return v.ж.ID }
|
||||
func (v PreAuthKeyView) Key() string { return v.ж.Key }
|
||||
func (v PreAuthKeyView) Prefix() string { return v.ж.Prefix }
|
||||
func (v PreAuthKeyView) Hash() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Hash) }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *PreAuthKeyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x PreAuthKey
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v PreAuthKeyView) ID() uint64 { return v.ж.ID }
|
||||
|
||||
// Legacy plaintext key (for backwards compatibility)
|
||||
func (v PreAuthKeyView) Key() string { return v.ж.Key }
|
||||
|
||||
// New bcrypt-based authentication
|
||||
func (v PreAuthKeyView) Prefix() string { return v.ж.Prefix }
|
||||
|
||||
// bcrypt
|
||||
func (v PreAuthKeyView) Hash() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Hash) }
|
||||
|
||||
// For tagged keys: UserID tracks who created the key (informational)
|
||||
// For user-owned keys: UserID tracks the node owner
|
||||
// Can be nil for system-created tagged keys
|
||||
func (v PreAuthKeyView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) }
|
||||
|
||||
func (v PreAuthKeyView) User() UserView { return v.ж.User.View() }
|
||||
func (v PreAuthKeyView) Reusable() bool { return v.ж.Reusable }
|
||||
func (v PreAuthKeyView) Ephemeral() bool { return v.ж.Ephemeral }
|
||||
func (v PreAuthKeyView) Used() bool { return v.ж.Used }
|
||||
func (v PreAuthKeyView) User() UserView { return v.ж.User.View() }
|
||||
func (v PreAuthKeyView) Reusable() bool { return v.ж.Reusable }
|
||||
func (v PreAuthKeyView) Ephemeral() bool { return v.ж.Ephemeral }
|
||||
func (v PreAuthKeyView) Used() bool { return v.ж.Used }
|
||||
|
||||
// Tags to assign to nodes registered with this key.
|
||||
// Tags are copied to the node during registration.
|
||||
// If non-empty, this creates tagged nodes (not user-owned).
|
||||
func (v PreAuthKeyView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) }
|
||||
func (v PreAuthKeyView) CreatedAt() views.ValuePointer[time.Time] {
|
||||
return views.ValuePointerOf(v.ж.CreatedAt)
|
||||
|
||||
@@ -174,9 +174,17 @@ func (u UserView) TailscaleUserProfile() tailcfg.UserProfile {
|
||||
}
|
||||
|
||||
func (u *User) Proto() *v1.User {
|
||||
// Use Name if set, otherwise fall back to Username() which provides
|
||||
// a display-friendly identifier (Email > ProviderIdentifier > ID).
|
||||
// This ensures OIDC users (who typically have empty Name) display
|
||||
// their email, while CLI users retain their original Name.
|
||||
name := u.Name
|
||||
if name == "" {
|
||||
name = u.Username()
|
||||
}
|
||||
return &v1.User{
|
||||
Id: uint64(u.ID),
|
||||
Name: u.Name,
|
||||
Name: name,
|
||||
CreatedAt: timestamppb.New(u.CreatedAt),
|
||||
DisplayName: u.DisplayName,
|
||||
Email: u.Email,
|
||||
|
||||
@@ -43,6 +43,7 @@ func ValidateUsername(username string) error {
|
||||
}
|
||||
|
||||
atCount := 0
|
||||
|
||||
for _, char := range username {
|
||||
switch {
|
||||
case unicode.IsLetter(char),
|
||||
@@ -87,18 +88,21 @@ func ValidateHostname(name string) error {
|
||||
strings.ToLower(name),
|
||||
)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") {
|
||||
return fmt.Errorf(
|
||||
"hostname %q cannot start or end with a hyphen",
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, ".") || strings.HasSuffix(name, ".") {
|
||||
return fmt.Errorf(
|
||||
"hostname %q cannot start or end with a dot",
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if invalidDNSRegex.MatchString(name) {
|
||||
return fmt.Errorf(
|
||||
"hostname %q contains invalid characters, only lowercase letters, numbers, hyphens and dots are allowed",
|
||||
@@ -120,7 +124,8 @@ func ValidateHostname(name string) error {
|
||||
// After transformation, validates the result.
|
||||
func NormaliseHostname(name string) (string, error) {
|
||||
// Early return if already valid
|
||||
if err := ValidateHostname(name); err == nil {
|
||||
err := ValidateHostname(name)
|
||||
if err == nil {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
@@ -136,7 +141,8 @@ func NormaliseHostname(name string) (string, error) {
|
||||
}
|
||||
|
||||
// Validate result after transformation
|
||||
if err := ValidateHostname(name); err != nil {
|
||||
err = ValidateHostname(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(
|
||||
"hostname invalid after normalisation: %w",
|
||||
err,
|
||||
|
||||
@@ -2814,7 +2814,7 @@ func TestACLTagPropagation(t *testing.T) {
|
||||
assert.NotNil(c, node, "Node should still exist")
|
||||
|
||||
if node != nil {
|
||||
assert.ElementsMatch(c, tt.tagChange, node.GetValidTags(), "Tags should be updated")
|
||||
assert.ElementsMatch(c, tt.tagChange, node.GetTags(), "Tags should be updated")
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "verifying tag change applied")
|
||||
|
||||
@@ -3042,3 +3042,790 @@ func TestACLTagPropagationPortSpecific(t *testing.T) {
|
||||
|
||||
t.Log("Test PASSED: Port-specific ACL changes propagated correctly")
|
||||
}
|
||||
|
||||
// TestACLGroupWithUnknownUser tests issue #2967 where a group containing
|
||||
// a reference to a non-existent user should not break connectivity for
|
||||
// valid users in the same group. The expected behavior is that unknown
|
||||
// users are silently ignored during group resolution.
|
||||
func TestACLGroupWithUnknownUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// This test verifies that when a group contains a reference to a
|
||||
// non-existent user (e.g., "nonexistent@"), the valid users in
|
||||
// the group should still be able to connect to each other.
|
||||
//
|
||||
// Issue: https://github.com/juanfont/headscale/issues/2967
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Create a policy with a group that includes a non-existent user
|
||||
// alongside valid users. The group should still work for valid users.
|
||||
policy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
// This group contains a reference to "nonexistent@" which does not exist
|
||||
policyv2.Group("group:test"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
policyv2.Username("nonexistent@"), // This user does not exist
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:test")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(groupp("group:test"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithPackages("curl"),
|
||||
tsic.WithWebserver(80),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(policy),
|
||||
hsic.WithTestName("acl-unknown-user"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user1Clients, 1)
|
||||
|
||||
user2Clients, err := scenario.ListTailscaleClients("user2")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user2Clients, 1)
|
||||
|
||||
user1 := user1Clients[0]
|
||||
user2 := user2Clients[0]
|
||||
|
||||
// Get FQDNs for connectivity test
|
||||
user1FQDN, err := user1.FQDN()
|
||||
require.NoError(t, err)
|
||||
user2FQDN, err := user2.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that user1 can reach user2 (valid users should be able to communicate)
|
||||
// This is the key assertion for issue #2967: valid users should work
|
||||
// even if the group contains references to non-existent users.
|
||||
t.Log("Testing connectivity: user1 -> user2 (should succeed despite unknown user in group)")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should be able to reach user2")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 30*time.Second, 500*time.Millisecond, "user1 should reach user2")
|
||||
|
||||
// Test that user2 can reach user1 (bidirectional)
|
||||
t.Log("Testing connectivity: user2 -> user1 (should succeed despite unknown user in group)")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should be able to reach user1")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 30*time.Second, 500*time.Millisecond, "user2 should reach user1")
|
||||
|
||||
t.Log("Test PASSED: Valid users can communicate despite unknown user reference in group")
|
||||
}
|
||||
|
||||
// TestACLGroupAfterUserDeletion tests issue #2967 scenario where a user
|
||||
// is deleted but their reference remains in an ACL group. The remaining
|
||||
// valid users should still be able to communicate.
|
||||
func TestACLGroupAfterUserDeletion(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// This test verifies that when a user is deleted from headscale but
|
||||
// their reference remains in an ACL group, the remaining valid users
|
||||
// in the group should still be able to connect to each other.
|
||||
//
|
||||
// Issue: https://github.com/juanfont/headscale/issues/2967
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", "user2", "user3"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Create a policy with a group containing all three users
|
||||
policy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:all"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
policyv2.Username("user3@"),
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:all")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(groupp("group:all"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithPackages("curl"),
|
||||
tsic.WithWebserver(80),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(policy),
|
||||
hsic.WithTestName("acl-deleted-user"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB), // Use DB mode so policy persists after user deletion
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user1Clients, 1)
|
||||
|
||||
user2Clients, err := scenario.ListTailscaleClients("user2")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user2Clients, 1)
|
||||
|
||||
user3Clients, err := scenario.ListTailscaleClients("user3")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user3Clients, 1)
|
||||
|
||||
user1 := user1Clients[0]
|
||||
user2 := user2Clients[0]
|
||||
|
||||
// Get FQDNs for connectivity test
|
||||
user1FQDN, err := user1.FQDN()
|
||||
require.NoError(t, err)
|
||||
user2FQDN, err := user2.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 1: Verify initial connectivity - all users can reach each other
|
||||
t.Log("Step 1: Verifying initial connectivity between all users")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should be able to reach user2 initially")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 30*time.Second, 500*time.Millisecond, "initial user1 -> user2 connectivity")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should be able to reach user1 initially")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 30*time.Second, 500*time.Millisecond, "initial user2 -> user1 connectivity")
|
||||
|
||||
// Step 2: Get user3's node and user, then delete them
|
||||
t.Log("Step 2: Deleting user3's node and user from headscale")
|
||||
|
||||
// First, get user3's node ID
|
||||
nodes, err := headscale.ListNodes("user3")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 1, "user3 should have exactly one node")
|
||||
user3NodeID := nodes[0].GetId()
|
||||
|
||||
// Delete user3's node first (required before deleting the user)
|
||||
err = headscale.DeleteNode(user3NodeID)
|
||||
require.NoError(t, err, "failed to delete user3's node")
|
||||
|
||||
// Now get user3's user ID and delete the user
|
||||
user3, err := GetUserByName(headscale, "user3")
|
||||
require.NoError(t, err, "user3 should exist")
|
||||
|
||||
// Now delete user3 (after their nodes are deleted)
|
||||
err = headscale.DeleteUser(user3.GetId())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify user3 is deleted
|
||||
_, err = GetUserByName(headscale, "user3")
|
||||
require.Error(t, err, "user3 should be deleted")
|
||||
|
||||
// Step 3: Verify that user1 and user2 can still communicate (before triggering policy refresh)
|
||||
// The policy still references "user3@" in the group, but since user3 is deleted,
|
||||
// connectivity may still work due to cached/stale policy state.
|
||||
t.Log("Step 3: Verifying connectivity still works immediately after user3 deletion (stale cache)")
|
||||
|
||||
// Test that user1 can still reach user2
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should still be able to reach user2 after user3 deletion (stale cache)")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after user3 deletion")
|
||||
|
||||
// Step 4: Create a NEW user - this triggers updatePolicyManagerUsers() which
|
||||
// re-evaluates the policy. According to issue #2967, this is when the bug manifests:
|
||||
// the deleted user3@ in the group causes the entire group to fail resolution.
|
||||
t.Log("Step 4: Creating a new user (user4) to trigger policy re-evaluation")
|
||||
|
||||
_, err = headscale.CreateUser("user4")
|
||||
require.NoError(t, err, "failed to create user4")
|
||||
|
||||
// Verify user4 was created
|
||||
_, err = GetUserByName(headscale, "user4")
|
||||
require.NoError(t, err, "user4 should exist after creation")
|
||||
|
||||
// Step 5: THIS IS THE CRITICAL TEST - verify connectivity STILL works after
|
||||
// creating a new user. Without the fix, the group containing the deleted user3@
|
||||
// would fail to resolve, breaking connectivity for user1 and user2.
|
||||
t.Log("Step 5: Verifying connectivity AFTER creating new user (this triggers the bug)")
|
||||
|
||||
// Test that user1 can still reach user2 AFTER the policy refresh triggered by user creation
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should still reach user2 after policy refresh (BUG if this fails)")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after policy refresh (issue #2967)")
|
||||
|
||||
// Test that user2 can still reach user1
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should still reach user1 after policy refresh (BUG if this fails)")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user2 -> user1 after policy refresh (issue #2967)")
|
||||
|
||||
t.Log("Test PASSED: Remaining users can communicate after deleted user and policy refresh")
|
||||
}
|
||||
|
||||
// TestACLGroupDeletionExactReproduction reproduces issue #2967 exactly as reported:
|
||||
// The reporter had ACTIVE pinging between nodes while making changes.
|
||||
// The bug is that deleting a user and then creating a new user causes
|
||||
// connectivity to break for remaining users in the group.
|
||||
//
|
||||
// Key difference from other tests: We keep multiple nodes ACTIVE and pinging
|
||||
// each other throughout the test, just like the reporter's scenario.
|
||||
//
|
||||
// Reporter's steps (v0.28.0-beta.1):
|
||||
// 1. Start pinging between nodes
|
||||
// 2. Create policy with group:admin = [user1@]
|
||||
// 3. Create users "deleteable" and "existinguser"
|
||||
// 4. Add deleteable@ to ACL: Pinging continues
|
||||
// 5. Delete deleteable: Pinging continues
|
||||
// 6. Add existinguser@ to ACL: Pinging continues
|
||||
// 7. Create new user "anotheruser": Pinging continues
|
||||
// 8. Add anotherinvaliduser@ to ACL: Pinging stops.
|
||||
func TestACLGroupDeletionExactReproduction(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Issue: https://github.com/juanfont/headscale/issues/2967
|
||||
|
||||
const userToDelete = "user2"
|
||||
|
||||
// We need 3 users with active nodes to properly test this:
|
||||
// - user1: will remain throughout (like "ritty" in the issue)
|
||||
// - user2: will be deleted (like "deleteable" in the issue)
|
||||
// - user3: will remain and should still be able to ping user1 after user2 deletion
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", userToDelete, "user3"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Initial policy: all three users in group, can communicate with each other
|
||||
initialPolicy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:admin"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username(userToDelete + "@"),
|
||||
policyv2.Username("user3@"),
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:admin")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
// Use *:* like the reporter's ACL
|
||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithPackages("curl"),
|
||||
tsic.WithWebserver(80),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(initialPolicy),
|
||||
hsic.WithTestName("acl-exact-repro"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get all clients
|
||||
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user1Clients, 1)
|
||||
user1 := user1Clients[0]
|
||||
|
||||
user3Clients, err := scenario.ListTailscaleClients("user3")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user3Clients, 1)
|
||||
user3 := user3Clients[0]
|
||||
|
||||
user1FQDN, err := user1.FQDN()
|
||||
require.NoError(t, err)
|
||||
user3FQDN, err := user3.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 1: Verify initial connectivity - user1 and user3 can ping each other
|
||||
t.Log("Step 1: Verifying initial connectivity (user1 <-> user3)")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user3FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should reach user3")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user3")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user3.Curl(url)
|
||||
assert.NoError(c, err, "user3 should reach user1")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user3 -> user1")
|
||||
|
||||
t.Log("Step 1: PASSED - initial connectivity works")
|
||||
|
||||
// Step 2: Delete user2's node and user (like reporter deleting "deleteable")
|
||||
// The ACL still references user2@ but user2 no longer exists
|
||||
t.Log("Step 2: Deleting user2 (node + user) from database - ACL still references user2@")
|
||||
|
||||
nodes, err := headscale.ListNodes(userToDelete)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 1)
|
||||
err = headscale.DeleteNode(nodes[0].GetId())
|
||||
require.NoError(t, err)
|
||||
|
||||
userToDeleteObj, err := GetUserByName(headscale, userToDelete)
|
||||
require.NoError(t, err, "user to delete should exist")
|
||||
|
||||
err = headscale.DeleteUser(userToDeleteObj.GetId())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Step 2: DONE - user2 deleted, ACL still has user2@ reference")
|
||||
|
||||
// Step 3: Verify connectivity still works after user2 deletion
|
||||
// This tests the immediate effect of the fix - policy should be updated
|
||||
t.Log("Step 3: Verifying connectivity STILL works after user2 deletion")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user3FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should still reach user3 after user2 deletion")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user3 after user2 deletion")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user3.Curl(url)
|
||||
assert.NoError(c, err, "user3 should still reach user1 after user2 deletion")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user3 -> user1 after user2 deletion")
|
||||
|
||||
t.Log("Step 3: PASSED - connectivity works after user2 deletion")
|
||||
|
||||
// Step 4: Create a NEW user - this triggers updatePolicyManagerUsers()
|
||||
// According to the reporter, this is when the bug manifests
|
||||
t.Log("Step 4: Creating new user (user4) - this triggers policy re-evaluation")
|
||||
|
||||
_, err = headscale.CreateUser("user4")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 5: THE CRITICAL TEST - verify connectivity STILL works
|
||||
// Without the fix: DeleteUser didn't update policy, so when CreateUser
|
||||
// triggers updatePolicyManagerUsers(), the stale user2@ is now unknown,
|
||||
// potentially breaking the group.
|
||||
t.Log("Step 5: Verifying connectivity AFTER creating new user (BUG trigger point)")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user3FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "BUG #2967: user1 should still reach user3 after user4 creation")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user3 after user4 creation (issue #2967)")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user3.Curl(url)
|
||||
assert.NoError(c, err, "BUG #2967: user3 should still reach user1 after user4 creation")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user3 -> user1 after user4 creation (issue #2967)")
|
||||
|
||||
// Additional verification: check filter rules are not empty
|
||||
filter, err := headscale.DebugFilter()
|
||||
require.NoError(t, err)
|
||||
t.Logf("Filter rules: %d", len(filter))
|
||||
require.NotEmpty(t, filter, "Filter rules should not be empty")
|
||||
|
||||
t.Log("Test PASSED: Connectivity maintained throughout user deletion and creation")
|
||||
t.Log("Issue #2967 would cause 'pinging to stop' at Step 5")
|
||||
}
|
||||
|
||||
// TestACLDynamicUnknownUserAddition tests the v0.28.0-beta.1 scenario from issue #2967:
|
||||
// "Pinging still stops when a non-registered user is added to a group"
|
||||
//
|
||||
// This test verifies that when a policy is DYNAMICALLY updated (via SetPolicy)
|
||||
// to include a non-existent user in a group, connectivity for valid users
|
||||
// is maintained. The v2 policy engine should gracefully handle unknown users.
|
||||
//
|
||||
// Steps:
|
||||
// 1. Start with a valid policy (only existing users in group)
|
||||
// 2. Verify connectivity works
|
||||
// 3. Update policy to add unknown user to the group
|
||||
// 4. Verify connectivity STILL works for valid users.
|
||||
func TestACLDynamicUnknownUserAddition(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Issue: https://github.com/juanfont/headscale/issues/2967
|
||||
// Comment: "Pinging still stops when a non-registered user is added to a group"
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Start with a VALID policy - only existing users in the group
|
||||
validPolicy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:test"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:test")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithPackages("curl"),
|
||||
tsic.WithWebserver(80),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(validPolicy),
|
||||
hsic.WithTestName("acl-dynamic-unknown"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user1Clients, 1)
|
||||
user1 := user1Clients[0]
|
||||
|
||||
user2Clients, err := scenario.ListTailscaleClients("user2")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user2Clients, 1)
|
||||
user2 := user2Clients[0]
|
||||
|
||||
user1FQDN, err := user1.FQDN()
|
||||
require.NoError(t, err)
|
||||
user2FQDN, err := user2.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 1: Verify initial connectivity with VALID policy
|
||||
t.Log("Step 1: Verifying initial connectivity with valid policy (no unknown users)")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should reach user2")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "initial user1 -> user2")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should reach user1")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "initial user2 -> user1")
|
||||
|
||||
t.Log("Step 1: PASSED - connectivity works with valid policy")
|
||||
|
||||
// Step 2: DYNAMICALLY update policy to add unknown user
|
||||
// This mimics the v0.28.0-beta.1 scenario where a non-existent user is added
|
||||
t.Log("Step 2: Updating policy to add unknown user (nonexistent@) to the group")
|
||||
|
||||
policyWithUnknown := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:test"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
policyv2.Username("nonexistent@"), // Added unknown user
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:test")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = headscale.SetPolicy(policyWithUnknown)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for policy to propagate
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 3: THE CRITICAL TEST - verify connectivity STILL works
|
||||
// v0.28.0-beta.1 issue: "Pinging still stops when a non-registered user is added to a group"
|
||||
// With v2 policy graceful error handling, this should pass
|
||||
t.Log("Step 3: Verifying connectivity AFTER adding unknown user to policy")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should STILL reach user2 after adding unknown user")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after unknown user added")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should STILL reach user1 after adding unknown user")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user2 -> user1 after unknown user added")
|
||||
|
||||
t.Log("Step 3: PASSED - connectivity maintained after adding unknown user")
|
||||
t.Log("Test PASSED: v0.28.0-beta.1 scenario - unknown user added dynamically, valid users still work")
|
||||
}
|
||||
|
||||
// TestACLDynamicUnknownUserRemoval tests the scenario from issue #2967 comments:
|
||||
// "Removing all invalid users from ACL restores connectivity"
|
||||
//
|
||||
// This test verifies that:
|
||||
// 1. Start with a policy containing unknown user
|
||||
// 2. Connectivity still works (v2 graceful handling)
|
||||
// 3. Update policy to remove unknown user
|
||||
// 4. Connectivity remains working
|
||||
//
|
||||
// This ensures the fix handles both:
|
||||
// - Adding unknown users (tested above)
|
||||
// - Removing unknown users from policy.
|
||||
func TestACLDynamicUnknownUserRemoval(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Issue: https://github.com/juanfont/headscale/issues/2967
|
||||
// Comment: "Removing all invalid users from ACL restores connectivity"
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Start with a policy that INCLUDES an unknown user
|
||||
policyWithUnknown := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:test"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
policyv2.Username("invaliduser@"), // Unknown user from the start
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:test")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithPackages("curl"),
|
||||
tsic.WithWebserver(80),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(policyWithUnknown),
|
||||
hsic.WithTestName("acl-unknown-removal"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithPolicyMode(types.PolicyModeDB),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user1Clients, 1)
|
||||
user1 := user1Clients[0]
|
||||
|
||||
user2Clients, err := scenario.ListTailscaleClients("user2")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, user2Clients, 1)
|
||||
user2 := user2Clients[0]
|
||||
|
||||
user1FQDN, err := user1.FQDN()
|
||||
require.NoError(t, err)
|
||||
user2FQDN, err := user2.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 1: Verify initial connectivity WITH unknown user in policy
|
||||
// With v2 graceful handling, this should work
|
||||
t.Log("Step 1: Verifying connectivity with unknown user in policy (v2 graceful handling)")
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should reach user2 even with unknown user in policy")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "initial user1 -> user2 with unknown")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should reach user1 even with unknown user in policy")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "initial user2 -> user1 with unknown")
|
||||
|
||||
t.Log("Step 1: PASSED - connectivity works even with unknown user (v2 graceful handling)")
|
||||
|
||||
// Step 2: Update policy to REMOVE the unknown user
|
||||
t.Log("Step 2: Updating policy to remove unknown user")
|
||||
|
||||
cleanPolicy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:test"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
// invaliduser@ removed
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:test")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = headscale.SetPolicy(cleanPolicy)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for policy to propagate
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Step 3: Verify connectivity after removing unknown user
|
||||
// Issue comment: "Removing all invalid users from ACL restores connectivity"
|
||||
t.Log("Step 3: Verifying connectivity AFTER removing unknown user")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
|
||||
result, err := user1.Curl(url)
|
||||
assert.NoError(c, err, "user1 should reach user2 after removing unknown user")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after unknown removed")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
|
||||
result, err := user2.Curl(url)
|
||||
assert.NoError(c, err, "user2 should reach user1 after removing unknown user")
|
||||
assert.Len(c, result, 13, "expected hostname response")
|
||||
}, 60*time.Second, 500*time.Millisecond, "user2 -> user1 after unknown removed")
|
||||
|
||||
t.Log("Step 3: PASSED - connectivity maintained after removing unknown user")
|
||||
t.Log("Test PASSED: Removing unknown users from policy works correctly")
|
||||
}
|
||||
|
||||
@@ -446,10 +446,9 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
strconv.FormatUint(userMap[userName].GetId(), 10),
|
||||
"expire",
|
||||
key.GetKey(),
|
||||
"--id",
|
||||
strconv.FormatUint(key.GetId(), 10),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
@@ -530,7 +529,7 @@ func TestAuthKeyDeleteKey(t *testing.T) {
|
||||
// DELETE the pre-auth key using the API
|
||||
t.Logf("Deleting pre-auth key ID %d using API", authKeyID)
|
||||
|
||||
err = headscale.DeleteAuthKey(userID, authKeyString)
|
||||
err = headscale.DeleteAuthKey(authKeyID)
|
||||
require.NoError(t, err)
|
||||
t.Logf("Successfully deleted auth key")
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ func TestUserCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -63,8 +64,11 @@ func TestUserCommand(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
var listUsers []*v1.User
|
||||
var result []string
|
||||
var (
|
||||
listUsers []*v1.User
|
||||
result []string
|
||||
)
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err := executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -102,6 +106,7 @@ func TestUserCommand(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var listAfterRenameUsers []*v1.User
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err := executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -127,6 +132,7 @@ func TestUserCommand(t *testing.T) {
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
|
||||
var listByUsername []*v1.User
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -143,6 +149,7 @@ func TestUserCommand(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for user list by username")
|
||||
|
||||
slices.SortFunc(listByUsername, sortWithID)
|
||||
|
||||
want := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
@@ -156,6 +163,7 @@ func TestUserCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
var listByID []*v1.User
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -172,6 +180,7 @@ func TestUserCommand(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for user list by ID")
|
||||
|
||||
slices.SortFunc(listByID, sortWithID)
|
||||
|
||||
want = []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
@@ -198,6 +207,7 @@ func TestUserCommand(t *testing.T) {
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterIDDelete []*v1.User
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err := executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -212,6 +222,7 @@ func TestUserCommand(t *testing.T) {
|
||||
assert.NoError(ct, err)
|
||||
|
||||
slices.SortFunc(listAfterIDDelete, sortWithID)
|
||||
|
||||
want := []*v1.User{
|
||||
{
|
||||
Id: 2,
|
||||
@@ -238,6 +249,7 @@ func TestUserCommand(t *testing.T) {
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterNameDelete []v1.User
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -265,6 +277,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -275,10 +288,12 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := make([]*v1.PreAuthKey, count)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
for index := range count {
|
||||
var preAuthKey v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err := executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -307,14 +322,13 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
assert.Len(t, keys, 3)
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
"1",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
@@ -371,28 +385,26 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
// Test key expiry - use the full key from creation, not the masked one from listing
|
||||
// Test key expiry
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
"1",
|
||||
"expire",
|
||||
keys[0].GetKey(),
|
||||
"--id",
|
||||
strconv.FormatUint(keys[0].GetId(), 10),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
"1",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
@@ -416,6 +428,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -426,6 +439,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -445,14 +459,13 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth key creation without expiry")
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
"1",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
@@ -460,7 +473,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
&listedPreAuthKeys,
|
||||
)
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list without expiry")
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list")
|
||||
|
||||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 2)
|
||||
@@ -481,6 +494,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -491,6 +505,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var preAuthReusableKey v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -510,6 +525,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for reusable preauth key creation")
|
||||
|
||||
var preAuthEphemeralKey v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -532,14 +548,13 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
assert.False(t, preAuthEphemeralKey.GetReusable())
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
"1",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
@@ -565,6 +580,7 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -607,8 +623,10 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for user2 preauth key creation")
|
||||
|
||||
var listNodes []*v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, 1, "Should have exactly 1 node for user1")
|
||||
@@ -650,6 +668,7 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, 2, "Should have 2 nodes after re-login")
|
||||
@@ -659,6 +678,123 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func TestTaggedNodesCLIOutput(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
user1 := "user1"
|
||||
user2 := "user2"
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{user1},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("tagcli"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
u2, err := headscale.CreateUser(user2)
|
||||
require.NoError(t, err)
|
||||
|
||||
var user2Key v1.PreAuthKey
|
||||
|
||||
// Create a tagged PreAuthKey for user2
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
"--user",
|
||||
strconv.FormatUint(u2.GetId(), 10),
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
"--tags",
|
||||
"tag:test1,tag:test2",
|
||||
},
|
||||
&user2Key,
|
||||
)
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for user2 tagged preauth key creation")
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
require.Len(t, allClients, 1)
|
||||
|
||||
client := allClients[0]
|
||||
|
||||
// Log out from user1
|
||||
err = client.Logout()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.NotContains(ct, []string{"Starting", "Running"}, status.BackendState,
|
||||
"Expected node to be logged out, backend state: %s", status.BackendState)
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
|
||||
// Log in with the tagged PreAuthKey (from user2, with tags)
|
||||
err = client.Login(headscale.GetEndpoint(), user2Key.GetKey())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState, "Expected node to be logged in, backend state: %s", status.BackendState)
|
||||
// With tags-as-identity model, tagged nodes show as TaggedDevices user (2147455555)
|
||||
assert.Equal(ct, "userid:2147455555", status.Self.UserID.String(), "Expected node to be logged in as tagged-devices user")
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
|
||||
// Wait for the second node to appear
|
||||
var listNodes []*v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, 2, "Should have 2 nodes after re-login with tagged key")
|
||||
assert.Equal(ct, user1, listNodes[0].GetUser().GetName(), "First node should belong to user1")
|
||||
assert.Equal(ct, "tagged-devices", listNodes[1].GetUser().GetName(), "Second node should be tagged-devices")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
|
||||
// Test: tailscale status output should show "tagged-devices" not "userid:2147455555"
|
||||
// This is the fix for issue #2970 - the Tailscale client should display user-friendly names
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
stdout, stderr, err := client.Execute([]string{"tailscale", "status"})
|
||||
assert.NoError(ct, err, "tailscale status command should succeed, stderr: %s", stderr)
|
||||
|
||||
t.Logf("Tailscale status output:\n%s", stdout)
|
||||
|
||||
// The output should contain "tagged-devices" for tagged nodes
|
||||
assert.Contains(ct, stdout, "tagged-devices", "Tailscale status should show 'tagged-devices' for tagged nodes")
|
||||
|
||||
// The output should NOT show the raw numeric userid to the user
|
||||
assert.NotContains(ct, stdout, "userid:2147455555", "Tailscale status should not show numeric userid for tagged nodes")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
func TestApiKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@@ -669,6 +805,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -701,6 +838,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
assert.Len(t, keys, 5)
|
||||
|
||||
var listedAPIKeys []v1.ApiKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -775,6 +913,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
var listedAfterExpireAPIKeys []v1.ApiKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -816,6 +955,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedAPIKeysAfterDelete []v1.ApiKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
@@ -831,6 +971,77 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list after delete")
|
||||
|
||||
assert.Len(t, listedAPIKeysAfterDelete, 4)
|
||||
|
||||
// Test expire by ID (using key at index 0)
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"expire",
|
||||
"--id",
|
||||
strconv.FormatUint(listedAPIKeysAfterDelete[0].GetId(), 10),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var listedAPIKeysAfterExpireByID []v1.ApiKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedAPIKeysAfterExpireByID,
|
||||
)
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list after expire by ID")
|
||||
|
||||
// Verify the key was expired
|
||||
for idx := range listedAPIKeysAfterExpireByID {
|
||||
if listedAPIKeysAfterExpireByID[idx].GetId() == listedAPIKeysAfterDelete[0].GetId() {
|
||||
assert.True(t, listedAPIKeysAfterExpireByID[idx].GetExpiration().AsTime().Before(time.Now()),
|
||||
"Key expired by ID should have expiration in the past")
|
||||
}
|
||||
}
|
||||
|
||||
// Test delete by ID (using key at index 1)
|
||||
deletedKeyID := listedAPIKeysAfterExpireByID[1].GetId()
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"delete",
|
||||
"--id",
|
||||
strconv.FormatUint(deletedKeyID, 10),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var listedAPIKeysAfterDeleteByID []v1.ApiKey
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&listedAPIKeysAfterDeleteByID,
|
||||
)
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list after delete by ID")
|
||||
|
||||
assert.Len(t, listedAPIKeysAfterDeleteByID, 3)
|
||||
|
||||
// Verify the specific key was deleted
|
||||
for idx := range listedAPIKeysAfterDeleteByID {
|
||||
assert.NotEqual(t, deletedKeyID, listedAPIKeysAfterDeleteByID[idx].GetId(),
|
||||
"Deleted key should not be present in the list")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeCommand(t *testing.T) {
|
||||
@@ -841,6 +1052,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -858,6 +1070,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
@@ -879,6 +1092,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -907,6 +1121,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
|
||||
// Test list all nodes after added seconds
|
||||
var listAll []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err := executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -940,6 +1155,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
otherUserMachines := make([]*v1.Node, len(otherUserRegIDs))
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range otherUserRegIDs {
|
||||
@@ -961,6 +1177,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -989,6 +1206,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
|
||||
// Test list all nodes after added otherUser
|
||||
var listAllWithotherUser []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1015,6 +1233,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
|
||||
// Test list all nodes after added otherUser
|
||||
var listOnlyotherUserMachineUser []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1066,6 +1285,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
|
||||
// Test: list main user after node is deleted
|
||||
var listOnlyMachineUserAfterDelete []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err := executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1093,6 +1313,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -1130,6 +1351,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1155,6 +1377,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
assert.Len(t, nodes, len(regIDs))
|
||||
|
||||
var listAll []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1192,6 +1415,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
var listAllAfterExpiry []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1224,6 +1448,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -1241,6 +1466,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
@@ -1262,6 +1488,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1287,6 +1514,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
assert.Len(t, nodes, len(regIDs))
|
||||
|
||||
var listAll []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1327,6 +1555,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
var listAllAfterRename []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1364,6 +1593,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "must not exceed 63 characters")
|
||||
|
||||
var listAllAfterRenameAttempt []v1.Node
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1396,6 +1626,7 @@ func TestPolicyCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -1451,6 +1682,7 @@ func TestPolicyCommand(t *testing.T) {
|
||||
// Get the current policy and check
|
||||
// if it is the same as the one we set.
|
||||
var output *policyv2.Policy
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1479,6 +1711,7 @@ func TestPolicyBrokenConfigCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
@@ -25,13 +26,15 @@ type ControlServer interface {
|
||||
CreateUser(user string) (*v1.User, error)
|
||||
CreateAuthKey(user uint64, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
|
||||
CreateAuthKeyWithTags(user uint64, reusable bool, ephemeral bool, tags []string) (*v1.PreAuthKey, error)
|
||||
DeleteAuthKey(user uint64, key string) error
|
||||
CreateAuthKeyWithOptions(opts hsic.AuthKeyOptions) (*v1.PreAuthKey, error)
|
||||
DeleteAuthKey(id uint64) error
|
||||
ListNodes(users ...string) ([]*v1.Node, error)
|
||||
DeleteNode(nodeID uint64) error
|
||||
NodesByUser() (map[string][]*v1.Node, error)
|
||||
NodesByName() (map[string]*v1.Node, error)
|
||||
ListUsers() ([]*v1.User, error)
|
||||
MapUsers() (map[string]*v1.User, error)
|
||||
DeleteUser(userID uint64) error
|
||||
ApproveRoutes(uint64, []netip.Prefix) (*v1.Node, error)
|
||||
SetNodeTags(nodeID uint64, tags []string) error
|
||||
GetCert() []byte
|
||||
|
||||
@@ -147,7 +147,18 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("derp-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
// Include run ID in hostname for easier identification of which test run owns this container
|
||||
runID := dockertestutil.GetIntegrationRunID()
|
||||
|
||||
var hostname string
|
||||
|
||||
if runID != "" {
|
||||
// Use last 6 chars of run ID (the random hash part) for brevity
|
||||
runIDShort := runID[len(runID)-6:]
|
||||
hostname = fmt.Sprintf("derp-%s-%s-%s", runIDShort, strings.ReplaceAll(version, ".", "-"), hash)
|
||||
} else {
|
||||
hostname = fmt.Sprintf("derp-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
}
|
||||
tlsCert, tlsKey, err := integrationutil.CreateCertificate(hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create certificates for headscale test: %w", err)
|
||||
|
||||
@@ -74,6 +74,7 @@ type HeadscaleInContainer struct {
|
||||
// optional config
|
||||
port int
|
||||
extraPorts []string
|
||||
hostMetricsPort string // Dynamically assigned host port for metrics/pprof access
|
||||
caCerts [][]byte
|
||||
hostPortBindings map[string][]string
|
||||
aclPolicy *policyv2.Policy
|
||||
@@ -330,7 +331,18 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := "hs-" + hash
|
||||
// Include run ID in hostname for easier identification of which test run owns this container
|
||||
runID := dockertestutil.GetIntegrationRunID()
|
||||
|
||||
var hostname string
|
||||
|
||||
if runID != "" {
|
||||
// Use last 6 chars of run ID (the random hash part) for brevity
|
||||
runIDShort := runID[len(runID)-6:]
|
||||
hostname = fmt.Sprintf("hs-%s-%s", runIDShort, hash)
|
||||
} else {
|
||||
hostname = "hs-" + hash
|
||||
}
|
||||
|
||||
hsic := &HeadscaleInContainer{
|
||||
hostname: hostname,
|
||||
@@ -438,13 +450,13 @@ func New(
|
||||
Env: env,
|
||||
}
|
||||
|
||||
// Bind metrics port to predictable host port
|
||||
// Bind metrics port to dynamic host port (kernel assigns free port)
|
||||
if runOptions.PortBindings == nil {
|
||||
runOptions.PortBindings = map[docker.Port][]docker.PortBinding{}
|
||||
}
|
||||
|
||||
runOptions.PortBindings["9090/tcp"] = []docker.PortBinding{
|
||||
{HostPort: "49090"},
|
||||
{HostPort: "0"}, // Let kernel assign a free port
|
||||
}
|
||||
|
||||
if len(hsic.hostPortBindings) > 0 {
|
||||
@@ -540,9 +552,14 @@ func New(
|
||||
|
||||
hsic.container = container
|
||||
|
||||
// Get the dynamically assigned host port for metrics/pprof
|
||||
hsic.hostMetricsPort = container.GetHostPort("9090/tcp")
|
||||
|
||||
log.Printf(
|
||||
"Ports for %s: metrics/pprof=49090\n",
|
||||
"Headscale %s metrics available at http://localhost:%s/metrics (debug at http://localhost:%s/debug/)\n",
|
||||
hsic.hostname,
|
||||
hsic.hostMetricsPort,
|
||||
hsic.hostMetricsPort,
|
||||
)
|
||||
|
||||
// Write the CA certificates to the container
|
||||
@@ -932,6 +949,13 @@ func (t *HeadscaleInContainer) GetPort() string {
|
||||
return strconv.Itoa(t.port)
|
||||
}
|
||||
|
||||
// GetHostMetricsPort returns the dynamically assigned host port for metrics/pprof access.
|
||||
// This port can be used by operators to access metrics at http://localhost:{port}/metrics
|
||||
// and debug endpoints at http://localhost:{port}/debug/ while tests are running.
|
||||
func (t *HeadscaleInContainer) GetHostMetricsPort() string {
|
||||
return t.hostMetricsPort
|
||||
}
|
||||
|
||||
// GetHealthEndpoint returns a health endpoint for the HeadscaleInContainer
|
||||
// instance.
|
||||
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
|
||||
@@ -1043,33 +1067,52 @@ func (t *HeadscaleInContainer) CreateUser(
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
// CreateAuthKey creates a new "authorisation key" for a User that can be used
|
||||
// to authorise a TailscaleClient with the Headscale instance.
|
||||
func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
user uint64,
|
||||
reusable bool,
|
||||
ephemeral bool,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
// AuthKeyOptions defines options for creating an auth key.
|
||||
type AuthKeyOptions struct {
|
||||
// User is the user ID that owns the auth key. If nil and Tags are specified,
|
||||
// the auth key is owned by the tags only (tags-as-identity model).
|
||||
User *uint64
|
||||
// Reusable indicates if the key can be used multiple times
|
||||
Reusable bool
|
||||
// Ephemeral indicates if nodes registered with this key should be ephemeral
|
||||
Ephemeral bool
|
||||
// Tags are the tags to assign to the auth key
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// CreateAuthKeyWithOptions creates a new "authorisation key" with the specified options.
|
||||
// This supports both user-owned and tags-only auth keys.
|
||||
func (t *HeadscaleInContainer) CreateAuthKeyWithOptions(opts AuthKeyOptions) (*v1.PreAuthKey, error) {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--user",
|
||||
strconv.FormatUint(user, 10),
|
||||
}
|
||||
|
||||
// Only add --user flag if User is specified
|
||||
if opts.User != nil {
|
||||
command = append(command, "--user", strconv.FormatUint(*opts.User, 10))
|
||||
}
|
||||
|
||||
command = append(command,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
)
|
||||
|
||||
if reusable {
|
||||
if opts.Reusable {
|
||||
command = append(command, "--reusable")
|
||||
}
|
||||
|
||||
if ephemeral {
|
||||
if opts.Ephemeral {
|
||||
command = append(command, "--ephemeral")
|
||||
}
|
||||
|
||||
if len(opts.Tags) > 0 {
|
||||
command = append(command, "--tags", strings.Join(opts.Tags, ","))
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
@@ -1080,6 +1123,7 @@ func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
}
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
|
||||
err = json.Unmarshal([]byte(result), &preAuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
|
||||
@@ -1088,6 +1132,20 @@ func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
return &preAuthKey, nil
|
||||
}
|
||||
|
||||
// CreateAuthKey creates a new "authorisation key" for a User that can be used
|
||||
// to authorise a TailscaleClient with the Headscale instance.
|
||||
func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
user uint64,
|
||||
reusable bool,
|
||||
ephemeral bool,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
return t.CreateAuthKeyWithOptions(AuthKeyOptions{
|
||||
User: &user,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
})
|
||||
}
|
||||
|
||||
// CreateAuthKeyWithTags creates a new "authorisation key" for a User with the specified tags.
|
||||
// This is used to create tagged PreAuthKeys for testing the tags-as-identity model.
|
||||
func (t *HeadscaleInContainer) CreateAuthKeyWithTags(
|
||||
@@ -1096,61 +1154,24 @@ func (t *HeadscaleInContainer) CreateAuthKeyWithTags(
|
||||
ephemeral bool,
|
||||
tags []string,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--user",
|
||||
strconv.FormatUint(user, 10),
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
if reusable {
|
||||
command = append(command, "--reusable")
|
||||
}
|
||||
|
||||
if ephemeral {
|
||||
command = append(command, "--ephemeral")
|
||||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
command = append(command, "--tags", strings.Join(tags, ","))
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute create auth key with tags command: %w", err)
|
||||
}
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
|
||||
err = json.Unmarshal([]byte(result), &preAuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
|
||||
}
|
||||
|
||||
return &preAuthKey, nil
|
||||
return t.CreateAuthKeyWithOptions(AuthKeyOptions{
|
||||
User: &user,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteAuthKey deletes an "authorisation key" for a User.
|
||||
// DeleteAuthKey deletes an "authorisation key" by ID.
|
||||
func (t *HeadscaleInContainer) DeleteAuthKey(
|
||||
user uint64,
|
||||
key string,
|
||||
id uint64,
|
||||
) error {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--user",
|
||||
strconv.FormatUint(user, 10),
|
||||
"preauthkeys",
|
||||
"delete",
|
||||
key,
|
||||
"--id",
|
||||
strconv.FormatUint(id, 10),
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
@@ -1312,6 +1333,31 @@ func (t *HeadscaleInContainer) MapUsers() (map[string]*v1.User, error) {
|
||||
return userMap, nil
|
||||
}
|
||||
|
||||
// DeleteUser deletes a user from the Headscale instance.
|
||||
func (t *HeadscaleInContainer) DeleteUser(userID uint64) error {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"users",
|
||||
"delete",
|
||||
"--identifier",
|
||||
strconv.FormatUint(userID, 10),
|
||||
"--force",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
_, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute delete user command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HeadscaleInContainer) SetPolicy(pol *policyv2.Policy) error {
|
||||
err := h.writePolicy(pol)
|
||||
if err != nil {
|
||||
|
||||
@@ -247,9 +247,14 @@ func (s *Scenario) AddNetwork(name string) (*dockertest.Network, error) {
|
||||
|
||||
// We run the test suite in a docker container that calls a couple of endpoints for
|
||||
// readiness checks, this ensures that we can run the tests with individual networks
|
||||
// and have the client reach the different containers
|
||||
// TODO(kradalby): Can the test-suite be renamed so we can have multiple?
|
||||
err = dockertestutil.AddContainerToNetwork(s.pool, network, "headscale-test-suite")
|
||||
// and have the client reach the different containers.
|
||||
// The container name includes the run ID to support multiple concurrent test runs.
|
||||
testSuiteName := "headscale-test-suite"
|
||||
if runID := dockertestutil.GetIntegrationRunID(); runID != "" {
|
||||
testSuiteName = "headscale-test-suite-" + runID
|
||||
}
|
||||
|
||||
err = dockertestutil.AddContainerToNetwork(s.pool, network, testSuiteName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
|
||||
}
|
||||
@@ -473,6 +478,22 @@ func (s *Scenario) CreatePreAuthKey(
|
||||
return nil, fmt.Errorf("failed to create user: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
// CreatePreAuthKeyWithOptions creates a "pre authorised key" with the specified options
|
||||
// to be created in the Headscale instance on behalf of the Scenario.
|
||||
func (s *Scenario) CreatePreAuthKeyWithOptions(opts hsic.AuthKeyOptions) (*v1.PreAuthKey, error) {
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create preauth key with options: %w", errNoHeadscaleAvailable)
|
||||
}
|
||||
|
||||
key, err := headscale.CreateAuthKeyWithOptions(opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create preauth key with options: %w", err)
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// CreatePreAuthKeyWithTags creates a "pre authorised key" with the specified tags
|
||||
// to be created in the Headscale instance on behalf of the Scenario.
|
||||
func (s *Scenario) CreatePreAuthKeyWithTags(
|
||||
|
||||
@@ -80,10 +80,15 @@ func TestSSHOneUserToAll(t *testing.T) {
|
||||
},
|
||||
SSHs: []policyv2.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
||||
Destinations: policyv2.SSHDstAliases{wildcard()},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
||||
// Use autogroup:member and autogroup:tagged instead of wildcard
|
||||
// since wildcard (*) is no longer supported for SSH destinations
|
||||
Destinations: policyv2.SSHDstAliases{
|
||||
ptr.To(policyv2.AutoGroupMember),
|
||||
ptr.To(policyv2.AutoGroupTagged),
|
||||
},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -127,6 +132,8 @@ func TestSSHOneUserToAll(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHMultipleUsersAllToAll tests that users in a group can SSH to each other's devices
|
||||
// using autogroup:self as the destination, which allows same-user SSH access.
|
||||
func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@@ -147,9 +154,13 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||
},
|
||||
SSHs: []policyv2.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@"), usernamep("user2@")},
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
||||
// Use autogroup:self to allow users to SSH to their own devices.
|
||||
// Username destinations (e.g., "user1@") now require the source
|
||||
// to be that exact same user only. For group-to-group SSH access,
|
||||
// use autogroup:self instead.
|
||||
Destinations: policyv2.SSHDstAliases{ptr.To(policyv2.AutoGroupSelf)},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
},
|
||||
},
|
||||
@@ -170,16 +181,42 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
testInterUserSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
|
||||
for _, client := range sourceClients {
|
||||
for _, peer := range targetClients {
|
||||
assertSSHHostname(t, client, peer)
|
||||
// With autogroup:self, users can SSH to their own devices, but not to other users' devices.
|
||||
// Test that user1's devices can SSH to each other
|
||||
for _, client := range nsOneClients {
|
||||
for _, peer := range nsOneClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
testInterUserSSH(nsOneClients, nsTwoClients)
|
||||
testInterUserSSH(nsTwoClients, nsOneClients)
|
||||
// Test that user2's devices can SSH to each other
|
||||
for _, client := range nsTwoClients {
|
||||
for _, peer := range nsTwoClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that user1 cannot SSH to user2's devices (autogroup:self only allows same-user)
|
||||
for _, client := range nsOneClients {
|
||||
for _, peer := range nsTwoClients {
|
||||
assertSSHPermissionDenied(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that user2 cannot SSH to user1's devices (autogroup:self only allows same-user)
|
||||
for _, client := range nsTwoClients {
|
||||
for _, peer := range nsOneClients {
|
||||
assertSSHPermissionDenied(t, client, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHNoSSHConfigured(t *testing.T) {
|
||||
@@ -248,7 +285,7 @@ func TestSSHIsBlockedInACL(t *testing.T) {
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
|
||||
Destinations: policyv2.SSHDstAliases{ptr.To(policyv2.AutoGroupSelf)},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
},
|
||||
},
|
||||
@@ -297,16 +334,19 @@ func TestSSHUserOnlyIsolation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
SSHs: []policyv2.SSH{
|
||||
// Use autogroup:self to allow users in each group to SSH to their own devices.
|
||||
// Username destinations (e.g., "user1@") require the source to be that
|
||||
// exact same user only, not a group containing that user.
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:ssh1")},
|
||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
|
||||
Destinations: policyv2.SSHDstAliases{ptr.To(policyv2.AutoGroupSelf)},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: policyv2.SSHSrcAliases{groupp("group:ssh2")},
|
||||
Destinations: policyv2.SSHDstAliases{usernamep("user2@")},
|
||||
Destinations: policyv2.SSHDstAliases{ptr.To(policyv2.AutoGroupSelf)},
|
||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
||||
},
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -307,7 +307,18 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
// Include run ID in hostname for easier identification of which test run owns this container
|
||||
runID := dockertestutil.GetIntegrationRunID()
|
||||
|
||||
var hostname string
|
||||
|
||||
if runID != "" {
|
||||
// Use last 6 chars of run ID (the random hash part) for brevity
|
||||
runIDShort := runID[len(runID)-6:]
|
||||
hostname = fmt.Sprintf("ts-%s-%s-%s", runIDShort, strings.ReplaceAll(version, ".", "-"), hash)
|
||||
} else {
|
||||
hostname = fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
}
|
||||
|
||||
tsic := &TailscaleInContainer{
|
||||
version: version,
|
||||
|
||||
@@ -11,7 +11,7 @@ repo_name: juanfont/headscale
|
||||
repo_url: https://github.com/juanfont/headscale
|
||||
|
||||
# Copyright
|
||||
copyright: Copyright © 2025 Headscale authors
|
||||
copyright: Copyright © 2026 Headscale authors
|
||||
|
||||
# Configuration
|
||||
theme:
|
||||
@@ -111,7 +111,7 @@ extra:
|
||||
- icon: fontawesome/brands/discord
|
||||
link: https://discord.gg/c84AZQhmpx
|
||||
headscale:
|
||||
version: 0.27.1
|
||||
version: 0.28.0-beta.2
|
||||
|
||||
# Extensions
|
||||
markdown_extensions:
|
||||
@@ -183,6 +183,7 @@ nav:
|
||||
- Windows: usage/connect/windows.md
|
||||
- Reference:
|
||||
- Configuration: ref/configuration.md
|
||||
- Registration methods: ref/registration.md
|
||||
- OpenID Connect: ref/oidc.md
|
||||
- Routes: ref/routes.md
|
||||
- TLS: ref/tls.md
|
||||
@@ -190,6 +191,7 @@ nav:
|
||||
- DNS: ref/dns.md
|
||||
- DERP: ref/derp.md
|
||||
- API: ref/api.md
|
||||
- Tags: ref/tags.md
|
||||
- Debug: ref/debug.md
|
||||
- Integration:
|
||||
- Reverse proxy: ref/integration/reverse-proxy.md
|
||||
|
||||
@@ -16,7 +16,10 @@ message CreateApiKeyRequest { google.protobuf.Timestamp expiration = 1; }
|
||||
|
||||
message CreateApiKeyResponse { string api_key = 1; }
|
||||
|
||||
message ExpireApiKeyRequest { string prefix = 1; }
|
||||
message ExpireApiKeyRequest {
|
||||
string prefix = 1;
|
||||
uint64 id = 2;
|
||||
}
|
||||
|
||||
message ExpireApiKeyResponse {}
|
||||
|
||||
@@ -24,6 +27,9 @@ message ListApiKeysRequest {}
|
||||
|
||||
message ListApiKeysResponse { repeated ApiKey api_keys = 1; }
|
||||
|
||||
message DeleteApiKeyRequest { string prefix = 1; }
|
||||
message DeleteApiKeyRequest {
|
||||
string prefix = 1;
|
||||
uint64 id = 2;
|
||||
}
|
||||
|
||||
message DeleteApiKeyResponse {}
|
||||
|
||||
@@ -35,7 +35,7 @@ message Node {
|
||||
|
||||
RegisterMethod register_method = 13;
|
||||
|
||||
reserved 14 to 17;
|
||||
reserved 14 to 20;
|
||||
// google.protobuf.Timestamp updated_at = 14;
|
||||
// google.protobuf.Timestamp deleted_at = 15;
|
||||
|
||||
@@ -43,14 +43,16 @@ message Node {
|
||||
// bytes endpoints = 16;
|
||||
// bytes enabled_routes = 17;
|
||||
|
||||
repeated string forced_tags = 18;
|
||||
repeated string invalid_tags = 19;
|
||||
repeated string valid_tags = 20;
|
||||
// Deprecated
|
||||
// repeated string forced_tags = 18;
|
||||
// repeated string invalid_tags = 19;
|
||||
// repeated string valid_tags = 20;
|
||||
string given_name = 21;
|
||||
bool online = 22;
|
||||
repeated string approved_routes = 23;
|
||||
repeated string available_routes = 24;
|
||||
repeated string subnet_routes = 25;
|
||||
repeated string tags = 26;
|
||||
}
|
||||
|
||||
message RegisterNodeRequest {
|
||||
@@ -58,27 +60,39 @@ message RegisterNodeRequest {
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message RegisterNodeResponse { Node node = 1; }
|
||||
message RegisterNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message GetNodeRequest { uint64 node_id = 1; }
|
||||
message GetNodeRequest {
|
||||
uint64 node_id = 1;
|
||||
}
|
||||
|
||||
message GetNodeResponse { Node node = 1; }
|
||||
message GetNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message SetTagsRequest {
|
||||
uint64 node_id = 1;
|
||||
repeated string tags = 2;
|
||||
}
|
||||
|
||||
message SetTagsResponse { Node node = 1; }
|
||||
message SetTagsResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message SetApprovedRoutesRequest {
|
||||
uint64 node_id = 1;
|
||||
repeated string routes = 2;
|
||||
}
|
||||
|
||||
message SetApprovedRoutesResponse { Node node = 1; }
|
||||
message SetApprovedRoutesResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message DeleteNodeRequest { uint64 node_id = 1; }
|
||||
message DeleteNodeRequest {
|
||||
uint64 node_id = 1;
|
||||
}
|
||||
|
||||
message DeleteNodeResponse {}
|
||||
|
||||
@@ -87,18 +101,26 @@ message ExpireNodeRequest {
|
||||
google.protobuf.Timestamp expiry = 2;
|
||||
}
|
||||
|
||||
message ExpireNodeResponse { Node node = 1; }
|
||||
message ExpireNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message RenameNodeRequest {
|
||||
uint64 node_id = 1;
|
||||
string new_name = 2;
|
||||
}
|
||||
|
||||
message RenameNodeResponse { Node node = 1; }
|
||||
message RenameNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message ListNodesRequest { string user = 1; }
|
||||
message ListNodesRequest {
|
||||
string user = 1;
|
||||
}
|
||||
|
||||
message ListNodesResponse { repeated Node nodes = 1; }
|
||||
message ListNodesResponse {
|
||||
repeated Node nodes = 1;
|
||||
}
|
||||
|
||||
message DebugCreateNodeRequest {
|
||||
string user = 1;
|
||||
@@ -107,8 +129,14 @@ message DebugCreateNodeRequest {
|
||||
repeated string routes = 4;
|
||||
}
|
||||
|
||||
message DebugCreateNodeResponse { Node node = 1; }
|
||||
message DebugCreateNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message BackfillNodeIPsRequest { bool confirmed = 1; }
|
||||
message BackfillNodeIPsRequest {
|
||||
bool confirmed = 1;
|
||||
}
|
||||
|
||||
message BackfillNodeIPsResponse { repeated string changes = 1; }
|
||||
message BackfillNodeIPsResponse {
|
||||
repeated string changes = 1;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
syntax = "proto3";
|
||||
package headscale.v1;
|
||||
option go_package = "github.com/juanfont/headscale/gen/go/v1";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "headscale/v1/user.proto";
|
||||
|
||||
option go_package = "github.com/juanfont/headscale/gen/go/v1";
|
||||
|
||||
message PreAuthKey {
|
||||
User user = 1;
|
||||
uint64 id = 2;
|
||||
@@ -25,22 +26,24 @@ message CreatePreAuthKeyRequest {
|
||||
repeated string acl_tags = 5;
|
||||
}
|
||||
|
||||
message CreatePreAuthKeyResponse { PreAuthKey pre_auth_key = 1; }
|
||||
message CreatePreAuthKeyResponse {
|
||||
PreAuthKey pre_auth_key = 1;
|
||||
}
|
||||
|
||||
message ExpirePreAuthKeyRequest {
|
||||
uint64 user = 1;
|
||||
string key = 2;
|
||||
uint64 id = 1;
|
||||
}
|
||||
|
||||
message ExpirePreAuthKeyResponse {}
|
||||
|
||||
message DeletePreAuthKeyRequest {
|
||||
uint64 user = 1;
|
||||
string key = 2;
|
||||
uint64 id = 1;
|
||||
}
|
||||
|
||||
message DeletePreAuthKeyResponse {}
|
||||
|
||||
message ListPreAuthKeysRequest { uint64 user = 1; }
|
||||
message ListPreAuthKeysRequest {}
|
||||
|
||||
message ListPreAuthKeysResponse { repeated PreAuthKey pre_auth_keys = 1; }
|
||||
message ListPreAuthKeysResponse {
|
||||
repeated PreAuthKey pre_auth_keys = 1;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user