mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-15 04:07:40 +01:00
Compare commits
72 Commits
dependabot
...
v0.27.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c87471136b | ||
|
|
e7a28a14af | ||
|
|
4912769ab3 | ||
|
|
c07cc491bf | ||
|
|
c2a58a304d | ||
|
|
fddc7117e4 | ||
|
|
881a6b9227 | ||
|
|
3fbde7a1b6 | ||
|
|
c4a8c038cd | ||
|
|
022098fe4e | ||
|
|
bd35fcf338 | ||
|
|
2d680b5ebb | ||
|
|
ed3a9c8d6d | ||
|
|
4de56c40d8 | ||
|
|
40b3d54c1f | ||
|
|
30d12dafed | ||
|
|
2b30a15a68 | ||
|
|
2938d03878 | ||
|
|
1b1c989268 | ||
|
|
3950f8f171 | ||
|
|
ee0ef396a2 | ||
|
|
7056fbb63b | ||
|
|
c91b9fc761 | ||
|
|
d41fb4d540 | ||
|
|
01c1f6f82a | ||
|
|
3f6657ae57 | ||
|
|
0512f7c57e | ||
|
|
c6427aa296 | ||
|
|
4e6d42d5bd | ||
|
|
8ff5baadbe | ||
|
|
2f3c365b68 | ||
|
|
4893cdac74 | ||
|
|
476f30ab20 | ||
|
|
233dffc186 | ||
|
|
39443184d6 | ||
|
|
0303b76e1f | ||
|
|
684239e015 | ||
|
|
81b3e8f743 | ||
|
|
50ed24847b | ||
|
|
9b962956b5 | ||
|
|
3b16b75fe6 | ||
|
|
9d236571f4 | ||
|
|
38be30b6d4 | ||
|
|
7f8b14f6f3 | ||
|
|
3326c5b7ec | ||
|
|
b6d5788231 | ||
|
|
33e9e7a71f | ||
|
|
ccd79ed8d4 | ||
|
|
f6c4b338fd | ||
|
|
306d8e1bd4 | ||
|
|
4927e9d590 | ||
|
|
8e25f7f9dd | ||
|
|
1a7a2f4196 | ||
|
|
860a8a597f | ||
|
|
a2a6d20218 | ||
|
|
d29feaef79 | ||
|
|
630bfd265a | ||
|
|
e949859d33 | ||
|
|
4d61da30d0 | ||
|
|
b87567628a | ||
|
|
51c6367bb1 | ||
|
|
be337c6a33 | ||
|
|
086fcad7d9 | ||
|
|
3e3c72ea6f | ||
|
|
43f90d205e | ||
|
|
7b8b796a71 | ||
|
|
fa619ea9f3 | ||
|
|
30a1f7e68e | ||
|
|
30cec3aa2b | ||
|
|
5d8a2c25ea | ||
|
|
b4f7782fd8 | ||
|
|
d77874373d |
763
.claude/agents/headscale-integration-tester.md
Normal file
763
.claude/agents/headscale-integration-tester.md
Normal file
@@ -0,0 +1,763 @@
|
||||
---
|
||||
name: headscale-integration-tester
|
||||
description: Use this agent when you need to execute, analyze, or troubleshoot Headscale integration tests. This includes running specific test scenarios, investigating test failures, interpreting test artifacts, validating end-to-end functionality, or ensuring integration test quality before releases. Examples: <example>Context: User has made changes to the route management code and wants to validate the changes work correctly. user: 'I've updated the route advertisement logic in poll.go. Can you run the relevant integration tests to make sure everything still works?' assistant: 'I'll use the headscale-integration-tester agent to run the subnet routing integration tests and analyze the results.' <commentary>Since the user wants to validate route-related changes with integration tests, use the headscale-integration-tester agent to execute the appropriate tests and analyze results.</commentary></example> <example>Context: A CI pipeline integration test is failing and the user needs help understanding why. user: 'The TestSubnetRouterMultiNetwork test is failing in CI. The logs show some timing issues but I can't figure out what's wrong.' assistant: 'Let me use the headscale-integration-tester agent to analyze the test failure and examine the artifacts.' <commentary>Since this involves analyzing integration test failures and interpreting test artifacts, use the headscale-integration-tester agent to investigate the issue.</commentary></example>
|
||||
color: green
|
||||
---
|
||||
|
||||
You are a specialist Quality Assurance Engineer with deep expertise in Headscale's integration testing system. You understand the Docker-based test infrastructure, real Tailscale client interactions, and the complex timing considerations involved in end-to-end network testing.
|
||||
|
||||
## Integration Test System Overview
|
||||
|
||||
The Headscale integration test system uses Docker containers running real Tailscale clients against a Headscale server. Tests validate end-to-end functionality including routing, ACLs, node lifecycle, and network coordination. The system is built around the `hi` (Headscale Integration) test runner in `cmd/hi/`.
|
||||
|
||||
## Critical Test Execution Knowledge
|
||||
|
||||
### System Requirements and Setup
|
||||
```bash
|
||||
# ALWAYS run this first to verify system readiness
|
||||
go run ./cmd/hi doctor
|
||||
```
|
||||
This command verifies:
|
||||
- Docker installation and daemon status
|
||||
- Go environment setup
|
||||
- Required container images availability
|
||||
- Sufficient disk space (critical - tests generate ~100MB logs per run)
|
||||
- Network configuration
|
||||
|
||||
### Test Execution Patterns
|
||||
|
||||
**CRITICAL TIMEOUT REQUIREMENTS**:
|
||||
- **NEVER use bash `timeout` command** - this can cause test failures and incomplete cleanup
|
||||
- **ALWAYS use the built-in `--timeout` flag** with generous timeouts (minimum 15 minutes)
|
||||
- **Increase timeout if tests ever time out** - infrastructure issues require longer timeouts
|
||||
|
||||
```bash
|
||||
# Single test execution (recommended for development)
|
||||
# ALWAYS use --timeout flag with minimum 15 minutes (900s)
|
||||
go run ./cmd/hi run "TestSubnetRouterMultiNetwork" --timeout=900s
|
||||
|
||||
# Database-heavy tests require PostgreSQL backend and longer timeouts
|
||||
go run ./cmd/hi run "TestExpireNode" --postgres --timeout=1800s
|
||||
|
||||
# Pattern matching for related tests - use longer timeout for multiple tests
|
||||
go run ./cmd/hi run "TestSubnet*" --timeout=1800s
|
||||
|
||||
# Long-running individual tests need extended timeouts
|
||||
go run ./cmd/hi run "TestNodeOnlineStatus" --timeout=2100s # Runs for 12+ minutes
|
||||
|
||||
# Full test suite (CI/validation only) - very long timeout required
|
||||
go test ./integration -timeout 45m
|
||||
```
|
||||
|
||||
**Timeout Guidelines by Test Type**:
|
||||
- **Basic functionality tests**: `--timeout=900s` (15 minutes minimum)
|
||||
- **Route/ACL tests**: `--timeout=1200s` (20 minutes)
|
||||
- **HA/failover tests**: `--timeout=1800s` (30 minutes)
|
||||
- **Long-running tests**: `--timeout=2100s` (35 minutes)
|
||||
- **Full test suite**: `-timeout 45m` (45 minutes)
|
||||
|
||||
**NEVER do this**:
|
||||
```bash
|
||||
# ❌ FORBIDDEN: Never use bash timeout command
|
||||
timeout 300 go run ./cmd/hi run "TestName"
|
||||
|
||||
# ❌ FORBIDDEN: Too short timeout will cause failures
|
||||
go run ./cmd/hi run "TestName" --timeout=60s
|
||||
```
|
||||
|
||||
### Test Categories and Timing Expectations
|
||||
- **Fast tests** (<2 min): Basic functionality, CLI operations
|
||||
- **Medium tests** (2-5 min): Route management, ACL validation
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` runs for 12 minutes
|
||||
|
||||
**CRITICAL**: Only ONE test can run at a time due to Docker port conflicts and resource constraints.
|
||||
|
||||
## Test Artifacts and Log Analysis
|
||||
|
||||
### Artifact Structure
|
||||
All test runs save comprehensive artifacts to `control_logs/TIMESTAMP-ID/`:
|
||||
```
|
||||
control_logs/20250713-213106-iajsux/
|
||||
├── hs-testname-abc123.stderr.log # Headscale server error logs
|
||||
├── hs-testname-abc123.stdout.log # Headscale server output logs
|
||||
├── hs-testname-abc123.db # Database snapshot for post-mortem
|
||||
├── hs-testname-abc123_metrics.txt # Prometheus metrics dump
|
||||
├── hs-testname-abc123-mapresponses/ # Protocol-level debug data
|
||||
├── ts-client-xyz789.stderr.log # Tailscale client error logs
|
||||
├── ts-client-xyz789.stdout.log # Tailscale client output logs
|
||||
└── ts-client-xyz789_status.json # Client network status dump
|
||||
```
|
||||
|
||||
### Log Analysis Priority Order
|
||||
When tests fail, examine artifacts in this specific order:
|
||||
|
||||
1. **Headscale server stderr logs** (`hs-*.stderr.log`): Look for errors, panics, database issues, policy evaluation failures
|
||||
2. **Tailscale client stderr logs** (`ts-*.stderr.log`): Check for authentication failures, network connectivity issues
|
||||
3. **MapResponse JSON files**: Protocol-level debugging for network map generation issues
|
||||
4. **Client status dumps** (`*_status.json`): Network state and peer connectivity information
|
||||
5. **Database snapshots** (`.db` files): For data consistency and state persistence issues
|
||||
|
||||
## Common Failure Patterns and Root Cause Analysis
|
||||
|
||||
### CRITICAL MINDSET: Code Issues vs Infrastructure Issues
|
||||
|
||||
**⚠️ IMPORTANT**: When tests fail, it is ALMOST ALWAYS a code issue with Headscale, NOT infrastructure problems. Do not immediately blame disk space, Docker issues, or timing unless you have thoroughly investigated the actual error logs first.
|
||||
|
||||
### Systematic Debugging Process
|
||||
|
||||
1. **Read the actual error message**: Don't assume - read the stderr logs completely
|
||||
2. **Check Headscale server logs first**: Most issues originate from server-side logic
|
||||
3. **Verify client connectivity**: Only after ruling out server issues
|
||||
4. **Check timing patterns**: Use proper `EventuallyWithT` patterns
|
||||
5. **Infrastructure as last resort**: Only blame infrastructure after code analysis
|
||||
|
||||
### Real Failure Patterns
|
||||
|
||||
#### 1. Timing Issues (Common but fixable)
|
||||
```go
|
||||
// ❌ Wrong: Immediate assertions after async operations
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
nodes, _ := headscale.ListNodes()
|
||||
require.Len(t, nodes[0].GetAvailableRoutes(), 1) // WILL FAIL
|
||||
|
||||
// ✅ Correct: Wait for async operations
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes[0].GetAvailableRoutes(), 1)
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
```
|
||||
|
||||
**Timeout Guidelines**:
|
||||
- Route operations: 3-5 seconds
|
||||
- Node state changes: 5-10 seconds
|
||||
- Complex scenarios: 10-15 seconds
|
||||
- Policy recalculation: 5-10 seconds
|
||||
|
||||
#### 2. NodeStore Synchronization Issues
|
||||
Route advertisements must propagate through poll requests (`poll.go:420`). NodeStore updates happen at specific synchronization points after Hostinfo changes.
|
||||
|
||||
#### 3. Test Data Management Issues
|
||||
```go
|
||||
// ❌ Wrong: Assuming array ordering
|
||||
require.Len(t, nodes[0].GetAvailableRoutes(), 1)
|
||||
|
||||
// ✅ Correct: Identify nodes by properties
|
||||
expectedRoutes := map[string]string{"1": "10.33.0.0/16"}
|
||||
for _, node := range nodes {
|
||||
nodeIDStr := fmt.Sprintf("%d", node.GetId())
|
||||
if route, shouldHaveRoute := expectedRoutes[nodeIDStr]; shouldHaveRoute {
|
||||
// Test the specific node that should have the route
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 4. Database Backend Differences
|
||||
SQLite vs PostgreSQL have different timing characteristics:
|
||||
- Use `--postgres` flag for database-intensive tests
|
||||
- PostgreSQL generally has more consistent timing
|
||||
- Some race conditions only appear with specific backends
|
||||
|
||||
## Resource Management and Cleanup
|
||||
|
||||
### Disk Space Management
|
||||
Tests consume significant disk space (~100MB per run):
|
||||
```bash
|
||||
# Check available space before running tests
|
||||
df -h
|
||||
|
||||
# Clean up test artifacts periodically
|
||||
rm -rf control_logs/older-timestamp-dirs/
|
||||
|
||||
# Clean Docker resources
|
||||
docker system prune -f
|
||||
docker volume prune -f
|
||||
```
|
||||
|
||||
### Container Cleanup
|
||||
- Successful tests clean up automatically
|
||||
- Failed tests may leave containers running
|
||||
- Manually clean if needed: `docker ps -a` and `docker rm -f <containers>`
|
||||
|
||||
## Advanced Debugging Techniques
|
||||
|
||||
### Protocol-Level Debugging
|
||||
MapResponse JSON files in `control_logs/*/hs-*-mapresponses/` contain:
|
||||
- Network topology as sent to clients
|
||||
- Peer relationships and visibility
|
||||
- Route distribution and primary route selection
|
||||
- Policy evaluation results
|
||||
|
||||
### Database State Analysis
|
||||
Use the database snapshots for post-mortem analysis:
|
||||
```bash
|
||||
# SQLite examination
|
||||
sqlite3 control_logs/TIMESTAMP/hs-*.db
|
||||
.tables
|
||||
.schema nodes
|
||||
SELECT * FROM nodes WHERE name LIKE '%problematic%';
|
||||
```
|
||||
|
||||
### Performance Analysis
|
||||
Prometheus metrics dumps show:
|
||||
- Request latencies and error rates
|
||||
- NodeStore operation timing
|
||||
- Database query performance
|
||||
- Memory usage patterns
|
||||
|
||||
## Test Development and Quality Guidelines
|
||||
|
||||
### Proper Test Patterns
|
||||
```go
|
||||
// Always use EventuallyWithT for async operations
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Test condition that may take time to become true
|
||||
}, timeout, interval, "descriptive failure message")
|
||||
|
||||
// Handle node identification correctly
|
||||
var targetNode *v1.Node
|
||||
for _, node := range nodes {
|
||||
if node.GetName() == expectedNodeName {
|
||||
targetNode = node
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, targetNode, "should find expected node")
|
||||
```
|
||||
|
||||
### Quality Validation Checklist
|
||||
- ✅ Tests use `EventuallyWithT` for asynchronous operations
|
||||
- ✅ Tests don't rely on array ordering for node identification
|
||||
- ✅ Proper cleanup and resource management
|
||||
- ✅ Tests handle both success and failure scenarios
|
||||
- ✅ Timing assumptions are realistic for operations being tested
|
||||
- ✅ Error messages are descriptive and actionable
|
||||
|
||||
## Real-World Test Failure Patterns from HA Debugging
|
||||
|
||||
### Infrastructure vs Code Issues - Detailed Examples
|
||||
|
||||
**INFRASTRUCTURE FAILURES (Rare but Real)**:
|
||||
1. **DNS Resolution in Auth Tests**: `failed to resolve "hs-pingallbyip-jax97k": no DNS fallback candidates remain`
|
||||
- **Pattern**: Client containers can't resolve headscale server hostname during logout
|
||||
- **Detection**: Error messages specifically mention DNS/hostname resolution
|
||||
- **Solution**: Docker networking reset, not code changes
|
||||
|
||||
2. **Container Creation Timeouts**: Test gets stuck during client container setup
|
||||
- **Pattern**: Tests hang indefinitely at container startup phase
|
||||
- **Detection**: No progress in logs for >2 minutes during initialization
|
||||
- **Solution**: `docker system prune -f` and retry
|
||||
|
||||
3. **Docker Port Conflicts**: Multiple tests trying to use same ports
|
||||
- **Pattern**: "bind: address already in use" errors
|
||||
- **Detection**: Port binding failures in Docker logs
|
||||
- **Solution**: Only run ONE test at a time
|
||||
|
||||
**CODE ISSUES (99% of failures)**:
|
||||
1. **Route Approval Process Failures**: Routes not getting approved when they should be
|
||||
- **Pattern**: Tests expecting approved routes but finding none
|
||||
- **Detection**: `SubnetRoutes()` returns empty when `AnnouncedRoutes()` shows routes
|
||||
- **Root Cause**: Auto-approval logic bugs, policy evaluation issues
|
||||
|
||||
2. **NodeStore Synchronization Issues**: State updates not propagating correctly
|
||||
- **Pattern**: Route changes not reflected in NodeStore or Primary Routes
|
||||
- **Detection**: Logs show route announcements but no tracking updates
|
||||
- **Root Cause**: Missing synchronization points in `poll.go:420` area
|
||||
|
||||
3. **HA Failover Architecture Issues**: Routes removed when nodes go offline
|
||||
- **Pattern**: `TestHASubnetRouterFailover` fails because approved routes disappear
|
||||
- **Detection**: Routes available on online nodes but lost when nodes disconnect
|
||||
- **Root Cause**: Conflating route approval with node connectivity
|
||||
|
||||
### Critical Test Environment Setup
|
||||
|
||||
**Pre-Test Cleanup (MANDATORY)**:
|
||||
```bash
|
||||
# ALWAYS run this before each test
|
||||
rm -rf control_logs/202507*
|
||||
docker system prune -f
|
||||
df -h # Verify sufficient disk space
|
||||
```
|
||||
|
||||
**Environment Verification**:
|
||||
```bash
|
||||
# Verify system readiness
|
||||
go run ./cmd/hi doctor
|
||||
|
||||
# Check for running containers that might conflict
|
||||
docker ps
|
||||
```
|
||||
|
||||
### Specific Test Categories and Known Issues
|
||||
|
||||
#### Route-Related Tests (Primary Focus)
|
||||
```bash
|
||||
# Core route functionality - these should work first
|
||||
# Note: Generous timeouts are required for reliable execution
|
||||
go run ./cmd/hi run "TestSubnetRouteACL" --timeout=1200s
|
||||
go run ./cmd/hi run "TestAutoApproveMultiNetwork" --timeout=1800s
|
||||
go run ./cmd/hi run "TestHASubnetRouterFailover" --timeout=1800s
|
||||
```
|
||||
|
||||
**Common Route Test Patterns**:
|
||||
- Tests validate route announcement, approval, and distribution workflows
|
||||
- Route state changes are asynchronous - may need `EventuallyWithT` wrappers
|
||||
- Route approval must respect ACL policies - test expectations encode security requirements
|
||||
- HA tests verify route persistence during node connectivity changes
|
||||
|
||||
#### Authentication Tests (Infrastructure-Prone)
|
||||
```bash
|
||||
# These tests are more prone to infrastructure issues
|
||||
# Require longer timeouts due to auth flow complexity
|
||||
go run ./cmd/hi run "TestAuthKeyLogoutAndReloginSameUser" --timeout=1200s
|
||||
go run ./cmd/hi run "TestAuthWebFlowLogoutAndRelogin" --timeout=1200s
|
||||
go run ./cmd/hi run "TestOIDCExpireNodesBasedOnTokenExpiry" --timeout=1800s
|
||||
```
|
||||
|
||||
**Common Auth Test Infrastructure Failures**:
|
||||
- DNS resolution during logout operations
|
||||
- Container creation timeouts
|
||||
- HTTP/2 stream errors (often symptoms, not root cause)
|
||||
|
||||
### Security-Critical Debugging Rules
|
||||
|
||||
**❌ FORBIDDEN CHANGES (Security & Test Integrity)**:
|
||||
1. **Never change expected test outputs** - Tests define correct behavior contracts
|
||||
- Changing `require.Len(t, routes, 3)` to `require.Len(t, routes, 2)` because test fails
|
||||
- Modifying expected status codes, node counts, or route counts
|
||||
- Removing assertions that are "inconvenient"
|
||||
- **Why forbidden**: Test expectations encode business requirements and security policies
|
||||
|
||||
2. **Never bypass security mechanisms** - Security must never be compromised for convenience
|
||||
- Using `AnnouncedRoutes()` instead of `SubnetRoutes()` in production code
|
||||
- Skipping authentication or authorization checks
|
||||
- **Why forbidden**: Security bypasses create vulnerabilities in production
|
||||
|
||||
3. **Never reduce test coverage** - Tests prevent regressions
|
||||
- Removing test cases or assertions
|
||||
- Commenting out "problematic" test sections
|
||||
- **Why forbidden**: Reduced coverage allows bugs to slip through
|
||||
|
||||
**✅ ALLOWED CHANGES (Timing & Observability)**:
|
||||
1. **Fix timing issues with proper async patterns**
|
||||
```go
|
||||
// ✅ GOOD: Add EventuallyWithT for async operations
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, expectedCount) // Keep original expectation
|
||||
}, 10*time.Second, 100*time.Millisecond, "nodes should reach expected count")
|
||||
```
|
||||
- **Why allowed**: Fixes race conditions without changing business logic
|
||||
|
||||
2. **Add MORE observability and debugging**
|
||||
- Additional logging statements
|
||||
- More detailed error messages
|
||||
- Extra assertions that verify intermediate states
|
||||
- **Why allowed**: Better observability helps debug without changing behavior
|
||||
|
||||
3. **Improve test documentation**
|
||||
- Add godoc comments explaining test purpose and business logic
|
||||
- Document timing requirements and async behavior
|
||||
- **Why encouraged**: Helps future maintainers understand intent
|
||||
|
||||
### Advanced Debugging Workflows
|
||||
|
||||
#### Route Tracking Debug Flow
|
||||
```bash
|
||||
# Run test with detailed logging and proper timeout
|
||||
go run ./cmd/hi run "TestSubnetRouteACL" --timeout=1200s > test_output.log 2>&1
|
||||
|
||||
# Check route approval process
|
||||
grep -E "(auto-approval|ApproveRoutesWithPolicy|PolicyManager)" test_output.log
|
||||
|
||||
# Check route tracking
|
||||
tail -50 control_logs/*/hs-*.stderr.log | grep -E "(announced|tracking|SetNodeRoutes)"
|
||||
|
||||
# Check for security violations
|
||||
grep -E "(AnnouncedRoutes.*SetNodeRoutes|bypass.*approval)" test_output.log
|
||||
```
|
||||
|
||||
#### HA Failover Debug Flow
|
||||
```bash
|
||||
# Test HA failover specifically with adequate timeout
|
||||
go run ./cmd/hi run "TestHASubnetRouterFailover" --timeout=1800s
|
||||
|
||||
# Check route persistence during disconnect
|
||||
grep -E "(Disconnect|NodeWentOffline|PrimaryRoutes)" control_logs/*/hs-*.stderr.log
|
||||
|
||||
# Verify routes don't disappear inappropriately
|
||||
grep -E "(removing.*routes|SetNodeRoutes.*empty)" control_logs/*/hs-*.stderr.log
|
||||
```
|
||||
|
||||
### Test Result Interpretation Guidelines
|
||||
|
||||
#### Success Patterns to Look For
|
||||
- `"updating node routes for tracking"` in logs
|
||||
- Routes appearing in `announcedRoutes` logs
|
||||
- Proper `ApproveRoutesWithPolicy` calls for auto-approval
|
||||
- Routes persisting through node connectivity changes (HA tests)
|
||||
|
||||
#### Failure Patterns to Investigate
|
||||
- `SubnetRoutes()` returning empty when `AnnouncedRoutes()` has routes
|
||||
- Routes disappearing when nodes go offline (HA architectural issue)
|
||||
- Missing `EventuallyWithT` causing timing race conditions
|
||||
- Security bypass attempts using wrong route methods
|
||||
|
||||
### Critical Testing Methodology
|
||||
|
||||
**Phase-Based Testing Approach**:
|
||||
1. **Phase 1**: Core route tests (ACL, auto-approval, basic functionality)
|
||||
2. **Phase 2**: HA and complex route scenarios
|
||||
3. **Phase 3**: Auth tests (infrastructure-sensitive, test last)
|
||||
|
||||
**Per-Test Process**:
|
||||
1. Clean environment before each test
|
||||
2. Monitor logs for route tracking and approval messages
|
||||
3. Check artifacts in `control_logs/` if test fails
|
||||
4. Focus on actual error messages, not assumptions
|
||||
5. Document results and patterns discovered
|
||||
|
||||
## Test Documentation and Code Quality Standards
|
||||
|
||||
### Adding Missing Test Documentation
|
||||
When you understand a test's purpose through debugging, always add comprehensive godoc:
|
||||
|
||||
```go
|
||||
// TestSubnetRoutes validates the complete subnet route lifecycle including
|
||||
// advertisement from clients, policy-based approval, and distribution to peers.
|
||||
// This test ensures that route security policies are properly enforced and that
|
||||
// only approved routes are distributed to the network.
|
||||
//
|
||||
// The test verifies:
|
||||
// - Route announcements are received and tracked
|
||||
// - ACL policies control route approval correctly
|
||||
// - Only approved routes appear in peer network maps
|
||||
// - Route state persists correctly in the database
|
||||
func TestSubnetRoutes(t *testing.T) {
|
||||
// Test implementation...
|
||||
}
|
||||
```
|
||||
|
||||
**Why add documentation**: Future maintainers need to understand business logic and security requirements encoded in tests.
|
||||
|
||||
### Comment Guidelines - Focus on WHY, Not WHAT
|
||||
|
||||
```go
|
||||
// ✅ GOOD: Explains reasoning and business logic
|
||||
// Wait for route propagation because NodeStore updates are asynchronous
|
||||
// and happen after poll requests complete processing
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Check that security policies are enforced...
|
||||
}, timeout, interval, "route approval must respect ACL policies")
|
||||
|
||||
// ❌ BAD: Just describes what the code does
|
||||
// Wait for routes
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Get routes and check length
|
||||
}, timeout, interval, "checking routes")
|
||||
```
|
||||
|
||||
**Why focus on WHY**: Helps maintainers understand architectural decisions and security requirements.
|
||||
|
||||
## EventuallyWithT Pattern for External Calls
|
||||
|
||||
### Overview
|
||||
EventuallyWithT is a testing pattern used to handle eventual consistency in distributed systems. In Headscale integration tests, many operations are asynchronous - clients advertise routes, the server processes them, updates propagate through the network. EventuallyWithT allows tests to wait for these operations to complete while making assertions.
|
||||
|
||||
### External Calls That Must Be Wrapped
|
||||
The following operations are **external calls** that interact with the headscale server or tailscale clients and MUST be wrapped in EventuallyWithT:
|
||||
- `headscale.ListNodes()` - Queries server state
|
||||
- `client.Status()` - Gets client network status
|
||||
- `client.Curl()` - Makes HTTP requests through the network
|
||||
- `client.Traceroute()` - Performs network diagnostics
|
||||
- `client.Execute()` when running commands that query state
|
||||
- Any operation that reads from the headscale server or tailscale client
|
||||
|
||||
### Five Key Rules for EventuallyWithT
|
||||
|
||||
1. **One External Call Per EventuallyWithT Block**
|
||||
- Each EventuallyWithT should make ONE external call (e.g., ListNodes OR Status)
|
||||
- Related assertions based on that single call can be grouped together
|
||||
- Unrelated external calls must be in separate EventuallyWithT blocks
|
||||
|
||||
2. **Variable Scoping**
|
||||
- Declare variables that need to be shared across EventuallyWithT blocks at function scope
|
||||
- Use `=` for assignment inside EventuallyWithT, not `:=` (unless the variable is only used within that block)
|
||||
- Variables declared with `:=` inside EventuallyWithT are not accessible outside
|
||||
|
||||
3. **No Nested EventuallyWithT**
|
||||
- NEVER put an EventuallyWithT inside another EventuallyWithT
|
||||
- This is a critical anti-pattern that must be avoided
|
||||
|
||||
4. **Use CollectT for Assertions**
|
||||
- Inside EventuallyWithT, use `assert` methods with the CollectT parameter
|
||||
- Helper functions called within EventuallyWithT must accept `*assert.CollectT`
|
||||
|
||||
5. **Descriptive Messages**
|
||||
- Always provide a descriptive message as the last parameter
|
||||
- Message should explain what condition is being waited for
|
||||
|
||||
### Correct Pattern Examples
|
||||
|
||||
```go
|
||||
// CORRECT: Single external call with related assertions
|
||||
var nodes []*v1.Node
|
||||
var err error
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
// These assertions are all based on the ListNodes() call
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
requireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 1)
|
||||
}, 10*time.Second, 500*time.Millisecond, "nodes should have expected route counts")
|
||||
|
||||
// CORRECT: Separate EventuallyWithT for different external call
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
// All these assertions are based on the single Status() call
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "client should see expected routes")
|
||||
|
||||
// CORRECT: Variable scoping for sharing between blocks
|
||||
var routeNode *v1.Node
|
||||
var nodeKey key.NodePublic
|
||||
|
||||
// First EventuallyWithT to get the node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.GetName() == "router" {
|
||||
routeNode = node
|
||||
nodeKey, _ = key.ParseNodePublicUntyped(mem.S(node.GetNodeKey()))
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.NotNil(c, routeNode, "should find router node")
|
||||
}, 10*time.Second, 100*time.Millisecond, "router node should exist")
|
||||
|
||||
// Second EventuallyWithT using the nodeKey from first block
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
peerStatus, ok := status.Peer[nodeKey]
|
||||
assert.True(c, ok, "peer should exist in status")
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)
|
||||
}, 10*time.Second, 100*time.Millisecond, "routes should be visible to client")
|
||||
```
|
||||
|
||||
### Incorrect Patterns to Avoid
|
||||
|
||||
```go
|
||||
// INCORRECT: Multiple unrelated external calls in same EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// First external call
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
|
||||
// Second unrelated external call - WRONG!
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
assert.NotNil(c, status)
|
||||
}, 10*time.Second, 500*time.Millisecond, "mixed operations")
|
||||
|
||||
// INCORRECT: Nested EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// NEVER do this!
|
||||
assert.EventuallyWithT(t, func(c2 *assert.CollectT) {
|
||||
status, _ := client.Status()
|
||||
assert.NotNil(c2, status)
|
||||
}, 5*time.Second, 100*time.Millisecond, "nested")
|
||||
}, 10*time.Second, 500*time.Millisecond, "outer")
|
||||
|
||||
// INCORRECT: Variable scoping error
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes() // This shadows outer 'nodes' variable
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 500*time.Millisecond, "get nodes")
|
||||
|
||||
// This will fail - nodes is nil because := created a new variable inside the block
|
||||
require.Len(t, nodes, 2) // COMPILATION ERROR or nil pointer
|
||||
|
||||
// INCORRECT: Not wrapping external calls
|
||||
nodes, err := headscale.ListNodes() // External call not wrapped!
|
||||
require.NoError(t, err)
|
||||
```
|
||||
|
||||
### Helper Functions for EventuallyWithT
|
||||
|
||||
When creating helper functions for use within EventuallyWithT:
|
||||
|
||||
```go
|
||||
// Helper function that accepts CollectT
|
||||
func requireNodeRouteCountWithCollect(c *assert.CollectT, node *v1.Node, available, approved, primary int) {
|
||||
assert.Len(c, node.GetAvailableRoutes(), available, "available routes for node %s", node.GetName())
|
||||
assert.Len(c, node.GetApprovedRoutes(), approved, "approved routes for node %s", node.GetName())
|
||||
assert.Len(c, node.GetPrimaryRoutes(), primary, "primary routes for node %s", node.GetName())
|
||||
}
|
||||
|
||||
// Usage within EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
}, 10*time.Second, 500*time.Millisecond, "route counts should match expected")
|
||||
```
|
||||
|
||||
### Operations That Must NOT Be Wrapped
|
||||
|
||||
**CRITICAL**: The following operations are **blocking/mutating operations** that change state and MUST NOT be wrapped in EventuallyWithT:
|
||||
- `tailscale set` commands (e.g., `--advertise-routes`, `--accept-routes`)
|
||||
- `headscale.ApproveRoute()` - Approves routes on server
|
||||
- `headscale.CreateUser()` - Creates users
|
||||
- `headscale.CreatePreAuthKey()` - Creates authentication keys
|
||||
- `headscale.RegisterNode()` - Registers new nodes
|
||||
- Any `client.Execute()` that modifies configuration
|
||||
- Any operation that creates, updates, or deletes resources
|
||||
|
||||
These operations:
|
||||
1. Complete synchronously or fail immediately
|
||||
2. Should not be retried automatically
|
||||
3. Need explicit error handling with `require.NoError()`
|
||||
|
||||
### Correct Pattern for Blocking Operations
|
||||
|
||||
```go
|
||||
// CORRECT: Blocking operation NOT wrapped
|
||||
status := client.MustStatus()
|
||||
command := []string{"tailscale", "set", "--advertise-routes=" + expectedRoutes[string(status.Self.ID)]}
|
||||
_, _, err = client.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
// Then wait for the result with EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Contains(c, nodes[0].GetAvailableRoutes(), expectedRoutes[string(status.Self.ID)])
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
|
||||
// INCORRECT: Blocking operation wrapped (DON'T DO THIS)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
_, _, err = client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
assert.NoError(c, err) // This might retry the command multiple times!
|
||||
}, 10*time.Second, 100*time.Millisecond, "advertise routes")
|
||||
```
|
||||
|
||||
### Assert vs Require Pattern
|
||||
|
||||
When working within EventuallyWithT blocks where you need to prevent panics:
|
||||
|
||||
```go
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// For array bounds - use require with t to prevent panic
|
||||
assert.Len(c, nodes, 6) // Test expectation
|
||||
require.GreaterOrEqual(t, len(nodes), 3, "need at least 3 nodes to avoid panic")
|
||||
|
||||
// For nil pointer access - use require with t before dereferencing
|
||||
assert.NotNil(c, srs1PeerStatus.PrimaryRoutes) // Test expectation
|
||||
require.NotNil(t, srs1PeerStatus.PrimaryRoutes, "primary routes must be set to avoid panic")
|
||||
assert.Contains(c,
|
||||
srs1PeerStatus.PrimaryRoutes.AsSlice(),
|
||||
pref,
|
||||
)
|
||||
}, 5*time.Second, 200*time.Millisecond, "checking route state")
|
||||
```
|
||||
|
||||
**Key Principle**:
|
||||
- Use `assert` with `c` (*assert.CollectT) for test expectations that can be retried
|
||||
- Use `require` with `t` (*testing.T) for MUST conditions that prevent panics
|
||||
- Within EventuallyWithT, both are available - choose based on whether failure would cause a panic
|
||||
|
||||
### Common Scenarios
|
||||
|
||||
1. **Waiting for route advertisement**:
|
||||
```go
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Contains(c, nodes[0].GetAvailableRoutes(), "10.0.0.0/24")
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
```
|
||||
|
||||
2. **Checking client sees routes**:
|
||||
```go
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Check all peers have expected routes
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
assert.Contains(c, peerStatus.AllowedIPs, expectedPrefix)
|
||||
}
|
||||
}, 10*time.Second, 100*time.Millisecond, "all peers should see route")
|
||||
```
|
||||
|
||||
3. **Sequential operations**:
|
||||
```go
|
||||
// First wait for node to appear
|
||||
var nodeID uint64
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1)
|
||||
nodeID = nodes[0].GetId()
|
||||
}, 10*time.Second, 100*time.Millisecond, "node should register")
|
||||
|
||||
// Then perform operation
|
||||
_, err := headscale.ApproveRoute(nodeID, "10.0.0.0/24")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then wait for result
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Contains(c, nodes[0].GetApprovedRoutes(), "10.0.0.0/24")
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be approved")
|
||||
```
|
||||
|
||||
## Your Core Responsibilities
|
||||
|
||||
1. **Test Execution Strategy**: Execute integration tests with appropriate configurations, understanding when to use `--postgres` and timing requirements for different test categories. Follow phase-based testing approach prioritizing route tests.
|
||||
- **Why this priority**: Route tests are less infrastructure-sensitive and validate core security logic
|
||||
|
||||
2. **Systematic Test Analysis**: When tests fail, systematically examine artifacts starting with Headscale server logs, then client logs, then protocol data. Focus on CODE ISSUES first (99% of cases), not infrastructure. Use real-world failure patterns to guide investigation.
|
||||
- **Why this approach**: Most failures are logic bugs, not environment issues - efficient debugging saves time
|
||||
|
||||
3. **Timing & Synchronization Expertise**: Understand asynchronous Headscale operations, particularly route advertisements, NodeStore synchronization at `poll.go:420`, and policy propagation. Fix timing with `EventuallyWithT` while preserving original test expectations.
|
||||
- **Why preserve expectations**: Test assertions encode business requirements and security policies
|
||||
- **Key Pattern**: Apply the EventuallyWithT pattern correctly for all external calls as documented above
|
||||
|
||||
4. **Root Cause Analysis**: Distinguish between actual code regressions (route approval logic, HA failover architecture), timing issues requiring `EventuallyWithT` patterns, and genuine infrastructure problems (DNS, Docker, container issues).
|
||||
- **Why this distinction matters**: Different problem types require completely different solution approaches
|
||||
- **EventuallyWithT Issues**: Often manifest as flaky tests or immediate assertion failures after async operations
|
||||
|
||||
5. **Security-Aware Quality Validation**: Ensure tests properly validate end-to-end functionality with realistic timing expectations and proper error handling. Never suggest security bypasses or test expectation changes. Add comprehensive godoc when you understand test business logic.
|
||||
- **Why security focus**: Integration tests are the last line of defense against security regressions
|
||||
- **EventuallyWithT Usage**: Proper use prevents race conditions without weakening security assertions
|
||||
|
||||
**CRITICAL PRINCIPLE**: Test expectations are sacred contracts that define correct system behavior. When tests fail, fix the code to match the test, never change the test to match broken code. Only timing and observability improvements are allowed - business logic expectations are immutable.
|
||||
|
||||
**EventuallyWithT PRINCIPLE**: Every external call to headscale server or tailscale client must be wrapped in EventuallyWithT. Follow the five key rules strictly: one external call per block, proper variable scoping, no nesting, use CollectT for assertions, and provide descriptive messages.
|
||||
|
||||
**Remember**: Test failures are usually code issues in Headscale that need to be fixed, not infrastructure problems to be ignored. Use the specific debugging workflows and failure patterns documented above to efficiently identify root causes. Infrastructure issues have very specific signatures - everything else is code-related.
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
3
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -52,12 +52,15 @@ body:
|
||||
If you are using a container, always provide the headscale version and not only the Docker image version.
|
||||
Please do not put "latest".
|
||||
|
||||
Describe your "headscale network". Is there a lot of nodes, are the nodes all interconnected, are some subnet routers?
|
||||
|
||||
If you are experiencing a problem during an upgrade, please provide the versions of the old and new versions of Headscale and Tailscale.
|
||||
|
||||
examples:
|
||||
- **OS**: Ubuntu 24.04
|
||||
- **Headscale version**: 0.24.3
|
||||
- **Tailscale version**: 1.80.0
|
||||
- **Number of nodes**: 20
|
||||
value: |
|
||||
- OS:
|
||||
- Headscale version:
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
9
.github/ISSUE_TEMPLATE/feature_request.yaml
vendored
@@ -16,15 +16,13 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description:
|
||||
A clear and precise description of what new or changed feature you want.
|
||||
description: A clear and precise description of what new or changed feature you want.
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Contribution
|
||||
description:
|
||||
Are you willing to contribute to the implementation of this feature?
|
||||
description: Are you willing to contribute to the implementation of this feature?
|
||||
options:
|
||||
- label: I can write the design doc for this feature
|
||||
required: false
|
||||
@@ -33,7 +31,6 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How can it be implemented?
|
||||
description:
|
||||
Free text for your ideas on how this feature could be implemented.
|
||||
description: Free text for your ideas on how this feature could be implemented.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -94,6 +94,8 @@ jobs:
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run go cross compile
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
run:
|
||||
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
./cmd/headscale
|
||||
|
||||
22
.github/workflows/integration-test-template.yml
vendored
22
.github/workflows/integration-test-template.yml
vendored
@@ -62,24 +62,10 @@ jobs:
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@e68c23e6309f2871ca8ae4763e7629b9c258e1ea # v3.8.0
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
# Our integration tests are started like a thundering herd, often
|
||||
# hitting limits of the various external repositories we depend on
|
||||
# like docker hub. This will retry jobs every 5 min, 10 times,
|
||||
# hopefully letting us avoid manual intervention and restarting jobs.
|
||||
# One could of course argue that we should invest in trying to avoid
|
||||
# this, but currently it seems like a larger investment to be cleverer
|
||||
# about this.
|
||||
# Some of the jobs might still require manual restart as they are really
|
||||
# slow and this will cause them to eventually be killed by Github actions.
|
||||
attempt_delay: 300000 # 5 min
|
||||
attempt_limit: 2
|
||||
command: |
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
${{ inputs.postgres_flag }}
|
||||
run:
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
${{ inputs.postgres_flag }}
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
|
||||
7
.github/workflows/test-integration.yaml
vendored
7
.github/workflows/test-integration.yaml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
||||
- TestACLAutogroupMember
|
||||
- TestACLAutogroupTagged
|
||||
- TestACLAutogroupSelf
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
@@ -31,8 +32,11 @@ jobs:
|
||||
- TestOIDC024UserCreation
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestOIDCFollowUpUrl
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndRelogin
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
- TestUserCommand
|
||||
- TestPreAuthKeyCommand
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
@@ -79,6 +83,7 @@ jobs:
|
||||
- TestSSHNoSSHConfigured
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
- TestSSHAutogroupSelf
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.24
|
||||
- go mod tidy -compat=1.25
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
@@ -23,10 +23,6 @@ builds:
|
||||
- linux_arm64
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.Version={{ .Version }}
|
||||
- -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash={{ .Commit }}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
|
||||
48
.mcp.json
Normal file
48
.mcp.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"claude-code-mcp": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@steipete/claude-code-mcp@latest"
|
||||
],
|
||||
"env": {}
|
||||
},
|
||||
"sequential-thinking": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@modelcontextprotocol/server-sequential-thinking"
|
||||
],
|
||||
"env": {}
|
||||
},
|
||||
"nixos": {
|
||||
"type": "stdio",
|
||||
"command": "uvx",
|
||||
"args": [
|
||||
"mcp-nixos"
|
||||
],
|
||||
"env": {}
|
||||
},
|
||||
"context7": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@upstash/context7-mcp"
|
||||
],
|
||||
"env": {}
|
||||
},
|
||||
"git": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@cyanheads/git-mcp-server"
|
||||
],
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
81
CHANGELOG.md
81
CHANGELOG.md
@@ -2,15 +2,17 @@
|
||||
|
||||
## Next
|
||||
|
||||
## 0.27.0 (2025-xx-xx)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Database integrity improvements
|
||||
|
||||
This release includes a significant database migration that addresses longstanding
|
||||
issues with the database schema and data integrity that has accumulated over the
|
||||
years. The migration introduces a `schema.sql` file as the source of truth for
|
||||
the expected database schema to ensure new migrations that will cause divergence
|
||||
does not occur again.
|
||||
This release includes a significant database migration that addresses
|
||||
longstanding issues with the database schema and data integrity that has
|
||||
accumulated over the years. The migration introduces a `schema.sql` file as the
|
||||
source of truth for the expected database schema to ensure new migrations that
|
||||
will cause divergence does not occur again.
|
||||
|
||||
These issues arose from a combination of factors discovered over time: SQLite
|
||||
foreign keys not being enforced for many early versions, all migrations being
|
||||
@@ -22,10 +24,12 @@ enforced throughout the migration process.
|
||||
We are only improving SQLite databases with this change - PostgreSQL databases
|
||||
are not affected.
|
||||
|
||||
Please read the [PR description](https://github.com/juanfont/headscale/pull/2617)
|
||||
for more technical details about the issues and solutions.
|
||||
Please read the
|
||||
[PR description](https://github.com/juanfont/headscale/pull/2617) for more
|
||||
technical details about the issues and solutions.
|
||||
|
||||
**SQLite Database Backup Example:**
|
||||
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
@@ -41,6 +45,39 @@ cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
### DERPMap update frequency
|
||||
|
||||
The default DERPMap update frequency has been changed from 24 hours to 3 hours.
|
||||
If you set the `derp.update_frequency` configuration option, it is recommended
|
||||
to change it to `3h` to ensure that the headscale instance gets the latest
|
||||
DERPMap updates when upstream is changed.
|
||||
|
||||
### Autogroups
|
||||
|
||||
This release adds support for the three missing autogroups: `self`
|
||||
(experimental), `member`, and `tagged`. Please refer to the
|
||||
[documentation](https://tailscale.com/kb/1018/autogroups/) for a detailed
|
||||
explanation.
|
||||
|
||||
`autogroup:self` is marked as experimental and should be used with caution, but
|
||||
we need help testing it. Experimental here means two things; first, generating
|
||||
the packet filter from policies that use `autogroup:self` is very expensive, and
|
||||
it might perform, or straight up not work on Headscale installations with a
|
||||
large number of nodes. Second, the implementation might have bugs or edge cases
|
||||
we are not aware of, meaning that nodes or users might gain _more_ access than
|
||||
expected. Please report bugs.
|
||||
|
||||
### Node store (in memory database)
|
||||
|
||||
Under the hood, we have added a new datastructure to store nodes in memory. This
|
||||
datastructure is called `NodeStore` and aims to reduce the reading and writing
|
||||
of nodes to the database layer. We have not benchmarked it, but expect it to
|
||||
improve performance for read heavy workloads. We think of it as, "worst case" we
|
||||
have moved the bottle neck somewhere else, and "best case" we should see a good
|
||||
improvement in compute resource usage at the expense of memory usage. We are
|
||||
quite excited for this change and think it will make it easier for us to improve
|
||||
the code base over time and make it more correct and efficient.
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Remove support for 32-bit binaries
|
||||
@@ -55,18 +92,40 @@ systemctl start headscale
|
||||
- **IMPORTANT: Backup your SQLite database before upgrading**
|
||||
- Introduces safer table renaming migration strategy
|
||||
- Addresses longstanding database integrity issues
|
||||
|
||||
- Add flag to directly manipulate the policy in the database
|
||||
[#2765](https://github.com/juanfont/headscale/pull/2765)
|
||||
- DERPmap update frequency default changed from 24h to 3h
|
||||
[#2741](https://github.com/juanfont/headscale/pull/2741)
|
||||
- DERPmap update mechanism has been improved with retry, and is now failing
|
||||
conservatively, preserving the old map upon failure.
|
||||
[#2741](https://github.com/juanfont/headscale/pull/2741)
|
||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
||||
- Fix bug where return routes were being removed by policy
|
||||
[#2767](https://github.com/juanfont/headscale/pull/2767)
|
||||
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
|
||||
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
|
||||
[#2614](https://github.com/juanfont/headscale/pull/2614)
|
||||
- Support client verify for DERP
|
||||
[#2046](https://github.com/juanfont/headscale/pull/2046)
|
||||
- Remove redundant check regarding `noise` config
|
||||
[#2658](https://github.com/juanfont/headscale/pull/2658)
|
||||
- Refactor OpenID Connect documentation
|
||||
[#2625](https://github.com/juanfont/headscale/pull/2625)
|
||||
- Don't crash if config file is missing
|
||||
[#2656](https://github.com/juanfont/headscale/pull/2656)
|
||||
- Adds `/robots.txt` endpoint to avoid crawlers
|
||||
[#2643](https://github.com/juanfont/headscale/pull/2643)
|
||||
- OIDC: Use group claim from UserInfo
|
||||
[#2663](https://github.com/juanfont/headscale/pull/2663)
|
||||
- OIDC: Update user with claims from UserInfo _before_ comparing with allowed
|
||||
groups, email and domain
|
||||
[#2663](https://github.com/juanfont/headscale/pull/2663)
|
||||
- Policy will now reject invalid fields, making it easier to spot spelling
|
||||
errors [#2764](https://github.com/juanfont/headscale/pull/2764)
|
||||
- Add FAQ entry on how to recover from an invalid policy in the database
|
||||
[#2776](https://github.com/juanfont/headscale/pull/2776)
|
||||
- EXPERIMENTAL: Add support for `autogroup:self`
|
||||
[#2789](https://github.com/juanfont/headscale/pull/2789)
|
||||
- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
@@ -223,8 +282,6 @@ working in v1 and not tested might be broken in v2 (and vice versa).
|
||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||
- Add documentation for routes
|
||||
[#2496](https://github.com/juanfont/headscale/pull/2496)
|
||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
||||
|
||||
## 0.25.1 (2025-02-25)
|
||||
|
||||
|
||||
405
CLAUDE.md
405
CLAUDE.md
@@ -205,139 +205,46 @@ The architecture supports incremental development:
|
||||
- **Policy Tests**: ACL rule evaluation and edge cases
|
||||
- **Performance Tests**: NodeStore and high-frequency operation validation
|
||||
|
||||
## Integration Test System
|
||||
## Integration Testing System
|
||||
|
||||
### Overview
|
||||
Integration tests use Docker containers running real Tailscale clients against a Headscale server. Tests validate end-to-end functionality including routing, ACLs, node lifecycle, and network coordination.
|
||||
Headscale uses Docker-based integration tests with real Tailscale clients to validate end-to-end functionality. The integration test system is complex and requires specialized knowledge for effective execution and debugging.
|
||||
|
||||
### Running Integration Tests
|
||||
### **MANDATORY: Use the headscale-integration-tester Agent**
|
||||
|
||||
**CRITICAL REQUIREMENT**: For ANY integration test execution, analysis, troubleshooting, or validation, you MUST use the `headscale-integration-tester` agent. This agent contains specialized knowledge about:
|
||||
|
||||
- Test execution strategies and timing requirements
|
||||
- Infrastructure vs code issue distinction (99% vs 1% failure patterns)
|
||||
- Security-critical debugging rules and forbidden practices
|
||||
- Comprehensive artifact analysis workflows
|
||||
- Real-world failure patterns from HA debugging experiences
|
||||
|
||||
### Quick Reference Commands
|
||||
|
||||
**System Requirements**
|
||||
```bash
|
||||
# Check if your system is ready
|
||||
# Check system requirements (always run first)
|
||||
go run ./cmd/hi doctor
|
||||
```
|
||||
This verifies Docker, Go, required images, and disk space.
|
||||
|
||||
**Test Execution Patterns**
|
||||
```bash
|
||||
# Run a single test (recommended for development)
|
||||
go run ./cmd/hi run "TestSubnetRouterMultiNetwork"
|
||||
# Run single test (recommended for development)
|
||||
go run ./cmd/hi run "TestName"
|
||||
|
||||
# Run with PostgreSQL backend (for database-heavy tests)
|
||||
go run ./cmd/hi run "TestExpireNode" --postgres
|
||||
# Use PostgreSQL for database-heavy tests
|
||||
go run ./cmd/hi run "TestName" --postgres
|
||||
|
||||
# Run multiple tests with pattern matching
|
||||
go run ./cmd/hi run "TestSubnet*"
|
||||
|
||||
# Run all integration tests (CI/full validation)
|
||||
go test ./integration -timeout 30m
|
||||
# Pattern matching for related tests
|
||||
go run ./cmd/hi run "TestPattern*"
|
||||
```
|
||||
|
||||
**Test Categories & Timing**
|
||||
- **Fast tests** (< 2 min): Basic functionality, CLI operations
|
||||
- **Medium tests** (2-5 min): Route management, ACL validation
|
||||
- **Slow tests** (5+ min): Node expiration, HA failover
|
||||
- **Long-running tests** (10+ min): `TestNodeOnlineStatus` (12 min duration)
|
||||
**Critical Notes**:
|
||||
- Only ONE test can run at a time (Docker port conflicts)
|
||||
- Tests generate ~100MB of logs per run in `control_logs/`
|
||||
- Clean environment before each test: `rm -rf control_logs/202507* && docker system prune -f`
|
||||
|
||||
### Test Infrastructure
|
||||
### Test Artifacts Location
|
||||
All test runs save comprehensive debugging artifacts to `control_logs/TIMESTAMP-ID/` including server logs, client logs, database dumps, MapResponse protocol data, and Prometheus metrics.
|
||||
|
||||
**Docker Setup**
|
||||
- Headscale server container with configurable database backend
|
||||
- Multiple Tailscale client containers with different versions
|
||||
- Isolated networks per test scenario
|
||||
- Automatic cleanup after test completion
|
||||
|
||||
**Test Artifacts**
|
||||
All test runs save artifacts to `control_logs/TIMESTAMP-ID/`:
|
||||
```
|
||||
control_logs/20250713-213106-iajsux/
|
||||
├── hs-testname-abc123.stderr.log # Headscale server logs
|
||||
├── hs-testname-abc123.stdout.log
|
||||
├── hs-testname-abc123.db # Database snapshot
|
||||
├── hs-testname-abc123_metrics.txt # Prometheus metrics
|
||||
├── hs-testname-abc123-mapresponses/ # Protocol debug data
|
||||
├── ts-client-xyz789.stderr.log # Tailscale client logs
|
||||
├── ts-client-xyz789.stdout.log
|
||||
└── ts-client-xyz789_status.json # Client status dump
|
||||
```
|
||||
|
||||
### Test Development Guidelines
|
||||
|
||||
**Timing Considerations**
|
||||
Integration tests involve real network operations and Docker container lifecycle:
|
||||
|
||||
```go
|
||||
// ❌ Wrong: Immediate assertions after async operations
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
nodes, _ := headscale.ListNodes()
|
||||
require.Len(t, nodes[0].GetAvailableRoutes(), 1) // May fail due to timing
|
||||
|
||||
// ✅ Correct: Wait for async operations to complete
|
||||
client.Execute([]string{"tailscale", "set", "--advertise-routes=10.0.0.0/24"})
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes[0].GetAvailableRoutes(), 1)
|
||||
}, 10*time.Second, 100*time.Millisecond, "route should be advertised")
|
||||
```
|
||||
|
||||
**Common Test Patterns**
|
||||
- **Route Advertisement**: Use `EventuallyWithT` for route propagation
|
||||
- **Node State Changes**: Wait for NodeStore synchronization
|
||||
- **ACL Policy Changes**: Allow time for policy recalculation
|
||||
- **Network Connectivity**: Use ping tests with retries
|
||||
|
||||
**Test Data Management**
|
||||
```go
|
||||
// Node identification: Don't assume array ordering
|
||||
expectedRoutes := map[string]string{"1": "10.33.0.0/16"}
|
||||
for _, node := range nodes {
|
||||
nodeIDStr := fmt.Sprintf("%d", node.GetId())
|
||||
if route, shouldHaveRoute := expectedRoutes[nodeIDStr]; shouldHaveRoute {
|
||||
// Test the node that should have the route
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Troubleshooting Integration Tests
|
||||
|
||||
**Common Failure Patterns**
|
||||
1. **Timing Issues**: Test assertions run before async operations complete
|
||||
- **Solution**: Use `EventuallyWithT` with appropriate timeouts
|
||||
- **Timeout Guidelines**: 3-5s for route operations, 10s for complex scenarios
|
||||
|
||||
2. **Infrastructure Problems**: Disk space, Docker issues, network conflicts
|
||||
- **Check**: `go run ./cmd/hi doctor` for system health
|
||||
- **Clean**: Remove old test containers and networks
|
||||
|
||||
3. **NodeStore Synchronization**: Tests expecting immediate data availability
|
||||
- **Key Points**: Route advertisements must propagate through poll requests
|
||||
- **Fix**: Wait for NodeStore updates after Hostinfo changes
|
||||
|
||||
4. **Database Backend Differences**: SQLite vs PostgreSQL behavior differences
|
||||
- **Use**: `--postgres` flag for database-intensive tests
|
||||
- **Note**: Some timing characteristics differ between backends
|
||||
|
||||
**Debugging Failed Tests**
|
||||
1. **Check test artifacts** in `control_logs/` for detailed logs
|
||||
2. **Examine MapResponse JSON** files for protocol-level debugging
|
||||
3. **Review Headscale stderr logs** for server-side error messages
|
||||
4. **Check Tailscale client status** for network-level issues
|
||||
|
||||
**Resource Management**
|
||||
- Tests require significant disk space (each run ~100MB of logs)
|
||||
- Docker containers are cleaned up automatically on success
|
||||
- Failed tests may leave containers running - clean manually if needed
|
||||
- Use `docker system prune` periodically to reclaim space
|
||||
|
||||
### Best Practices for Test Modifications
|
||||
|
||||
1. **Always test locally** before committing integration test changes
|
||||
2. **Use appropriate timeouts** - too short causes flaky tests, too long slows CI
|
||||
3. **Clean up properly** - ensure tests don't leave persistent state
|
||||
4. **Handle both success and failure paths** in test scenarios
|
||||
5. **Document timing requirements** for complex test scenarios
|
||||
**For all integration test work, use the headscale-integration-tester agent - it contains the complete knowledge needed for effective testing and debugging.**
|
||||
|
||||
## NodeStore Implementation Details
|
||||
|
||||
@@ -352,14 +259,108 @@ for _, node := range nodes {
|
||||
## Testing Guidelines
|
||||
|
||||
### Integration Test Patterns
|
||||
|
||||
#### **CRITICAL: EventuallyWithT Pattern for External Calls**
|
||||
|
||||
**All external calls in integration tests MUST be wrapped in EventuallyWithT blocks** to handle eventual consistency in distributed systems. External calls include:
|
||||
- `client.Status()` - Getting Tailscale client status
|
||||
- `client.Curl()` - Making HTTP requests through clients
|
||||
- `client.Traceroute()` - Running network diagnostics
|
||||
- `headscale.ListNodes()` - Querying headscale server state
|
||||
- Any other calls that interact with external systems or network operations
|
||||
|
||||
**Key Rules**:
|
||||
1. **Never use bare `require.NoError(t, err)` with external calls** - Always wrap in EventuallyWithT
|
||||
2. **Keep related assertions together** - If multiple assertions depend on the same external call, keep them in the same EventuallyWithT block
|
||||
3. **Split unrelated external calls** - Different external calls should be in separate EventuallyWithT blocks
|
||||
4. **Never nest EventuallyWithT calls** - Each EventuallyWithT should be at the same level
|
||||
5. **Declare shared variables at function scope** - Variables used across multiple EventuallyWithT blocks must be declared before first use
|
||||
|
||||
**Examples**:
|
||||
|
||||
```go
|
||||
// Use EventuallyWithT for async operations
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// CORRECT: External call wrapped in EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Related assertions using the same status call
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
assert.NotNil(c, peerStatus.PrimaryRoutes)
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedRoutes)
|
||||
}
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying client status and routes")
|
||||
|
||||
// INCORRECT: Bare external call without EventuallyWithT
|
||||
status, err := client.Status() // ❌ Will fail intermittently
|
||||
require.NoError(t, err)
|
||||
|
||||
// CORRECT: Separate EventuallyWithT for different external calls
|
||||
// First external call - headscale.ListNodes()
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
// Check expected state
|
||||
}, 10*time.Second, 100*time.Millisecond, "description")
|
||||
assert.Len(c, nodes, 2)
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
}, 10*time.Second, 500*time.Millisecond, "route state changes should propagate to nodes")
|
||||
|
||||
// Second external call - client.Status()
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "routes should be visible to client")
|
||||
|
||||
// INCORRECT: Multiple unrelated external calls in same EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes() // ❌ First external call
|
||||
assert.NoError(c, err)
|
||||
|
||||
status, err := client.Status() // ❌ Different external call - should be separate
|
||||
assert.NoError(c, err)
|
||||
}, 10*time.Second, 500*time.Millisecond, "mixed calls")
|
||||
|
||||
// CORRECT: Variable scoping for shared data
|
||||
var (
|
||||
srs1, srs2, srs3 *ipnstate.Status
|
||||
clientStatus *ipnstate.Status
|
||||
srs1PeerStatus *ipnstate.PeerStatus
|
||||
)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
srs1 = subRouter1.MustStatus() // = not :=
|
||||
srs2 = subRouter2.MustStatus()
|
||||
clientStatus = client.MustStatus()
|
||||
|
||||
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
|
||||
// assertions...
|
||||
}, 5*time.Second, 200*time.Millisecond, "checking router status")
|
||||
|
||||
// CORRECT: Wrapping client operations
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(weburl)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying HTTP connectivity")
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
tr, err := client.Traceroute(webip)
|
||||
assert.NoError(c, err)
|
||||
assertTracerouteViaIPWithCollect(c, tr, expectedRouter.MustIPv4())
|
||||
}, 5*time.Second, 200*time.Millisecond, "Verifying network path")
|
||||
```
|
||||
|
||||
**Helper Functions**:
|
||||
- Use `requirePeerSubnetRoutesWithCollect` instead of `requirePeerSubnetRoutes` inside EventuallyWithT
|
||||
- Use `requireNodeRouteCountWithCollect` instead of `requireNodeRouteCount` inside EventuallyWithT
|
||||
- Use `assertTracerouteViaIPWithCollect` instead of `assertTracerouteViaIP` inside EventuallyWithT
|
||||
|
||||
```go
|
||||
// Node route checking by actual node properties, not array position
|
||||
var routeNode *v1.Node
|
||||
for _, node := range nodes {
|
||||
@@ -375,21 +376,155 @@ for _, node := range nodes {
|
||||
- Infrastructure issues like disk space can cause test failures unrelated to code changes
|
||||
- Use `--postgres` flag when testing database-heavy scenarios
|
||||
|
||||
## Quality Assurance and Testing Requirements
|
||||
|
||||
### **MANDATORY: Always Use Specialized Testing Agents**
|
||||
|
||||
**CRITICAL REQUIREMENT**: For ANY task involving testing, quality assurance, review, or validation, you MUST use the appropriate specialized agent at the END of your task list. This ensures comprehensive quality validation and prevents regressions.
|
||||
|
||||
**Required Agents for Different Task Types**:
|
||||
|
||||
1. **Integration Testing**: Use `headscale-integration-tester` agent for:
|
||||
- Running integration tests with `cmd/hi`
|
||||
- Analyzing test failures and artifacts
|
||||
- Troubleshooting Docker-based test infrastructure
|
||||
- Validating end-to-end functionality changes
|
||||
|
||||
2. **Quality Control**: Use `quality-control-enforcer` agent for:
|
||||
- Code review and validation
|
||||
- Ensuring best practices compliance
|
||||
- Preventing common pitfalls and anti-patterns
|
||||
- Validating architectural decisions
|
||||
|
||||
**Agent Usage Pattern**: Always add the appropriate agent as the FINAL step in any task list to ensure quality validation occurs after all work is complete.
|
||||
|
||||
### Integration Test Debugging Reference
|
||||
|
||||
Test artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:
|
||||
- Headscale server logs (stderr/stdout)
|
||||
- Tailscale client logs and status
|
||||
- Database dumps and network captures
|
||||
- MapResponse JSON files for protocol debugging
|
||||
|
||||
**For integration test issues, ALWAYS use the headscale-integration-tester agent - do not attempt manual debugging.**
|
||||
|
||||
## EventuallyWithT Pattern for Integration Tests
|
||||
|
||||
### Overview
|
||||
EventuallyWithT is a testing pattern used to handle eventual consistency in distributed systems. In Headscale integration tests, many operations are asynchronous - clients advertise routes, the server processes them, updates propagate through the network. EventuallyWithT allows tests to wait for these operations to complete while making assertions.
|
||||
|
||||
### External Calls That Must Be Wrapped
|
||||
The following operations are **external calls** that interact with the headscale server or tailscale clients and MUST be wrapped in EventuallyWithT:
|
||||
- `headscale.ListNodes()` - Queries server state
|
||||
- `client.Status()` - Gets client network status
|
||||
- `client.Curl()` - Makes HTTP requests through the network
|
||||
- `client.Traceroute()` - Performs network diagnostics
|
||||
- `client.Execute()` when running commands that query state
|
||||
- Any operation that reads from the headscale server or tailscale client
|
||||
|
||||
### Operations That Must NOT Be Wrapped
|
||||
The following are **blocking operations** that modify state and should NOT be wrapped in EventuallyWithT:
|
||||
- `tailscale set` commands (e.g., `--advertise-routes`, `--exit-node`)
|
||||
- Any command that changes configuration or state
|
||||
- Use `client.MustStatus()` instead of `client.Status()` when you just need the ID for a blocking operation
|
||||
|
||||
### Five Key Rules for EventuallyWithT
|
||||
|
||||
1. **One External Call Per EventuallyWithT Block**
|
||||
- Each EventuallyWithT should make ONE external call (e.g., ListNodes OR Status)
|
||||
- Related assertions based on that single call can be grouped together
|
||||
- Unrelated external calls must be in separate EventuallyWithT blocks
|
||||
|
||||
2. **Variable Scoping**
|
||||
- Declare variables that need to be shared across EventuallyWithT blocks at function scope
|
||||
- Use `=` for assignment inside EventuallyWithT, not `:=` (unless the variable is only used within that block)
|
||||
- Variables declared with `:=` inside EventuallyWithT are not accessible outside
|
||||
|
||||
3. **No Nested EventuallyWithT**
|
||||
- NEVER put an EventuallyWithT inside another EventuallyWithT
|
||||
- This is a critical anti-pattern that must be avoided
|
||||
|
||||
4. **Use CollectT for Assertions**
|
||||
- Inside EventuallyWithT, use `assert` methods with the CollectT parameter
|
||||
- Helper functions called within EventuallyWithT must accept `*assert.CollectT`
|
||||
|
||||
5. **Descriptive Messages**
|
||||
- Always provide a descriptive message as the last parameter
|
||||
- Message should explain what condition is being waited for
|
||||
|
||||
### Correct Pattern Examples
|
||||
|
||||
```go
|
||||
// CORRECT: Blocking operation NOT wrapped
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + expectedRoutes[string(status.Self.ID)],
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
}
|
||||
|
||||
// CORRECT: Single external call with related assertions
|
||||
var nodes []*v1.Node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
requireNodeRouteCountWithCollect(c, nodes[0], 2, 2, 2)
|
||||
}, 10*time.Second, 500*time.Millisecond, "nodes should have expected route counts")
|
||||
|
||||
// CORRECT: Separate EventuallyWithT for different external call
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
requirePeerSubnetRoutesWithCollect(c, peerStatus, expectedPrefixes)
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "client should see expected routes")
|
||||
```
|
||||
|
||||
### Incorrect Patterns to Avoid
|
||||
|
||||
```go
|
||||
// INCORRECT: Blocking operation wrapped in EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// This is a blocking operation - should NOT be in EventuallyWithT!
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + expectedRoutes[string(status.Self.ID)],
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
assert.NoError(c, err)
|
||||
}, 5*time.Second, 200*time.Millisecond, "wrong pattern")
|
||||
|
||||
// INCORRECT: Multiple unrelated external calls in same EventuallyWithT
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// First external call
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 2)
|
||||
|
||||
// Second unrelated external call - WRONG!
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
assert.NotNil(c, status)
|
||||
}, 10*time.Second, 500*time.Millisecond, "mixed operations")
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Dependencies**: Use `nix develop` for consistent toolchain (Go, buf, protobuf tools, linting)
|
||||
- **Protocol Buffers**: Changes to `proto/` require `make generate` and should be committed separately
|
||||
- **Code Style**: Enforced via golangci-lint with golines (width 88) and gofumpt formatting
|
||||
- **Database**: Supports both SQLite (development) and PostgreSQL (production/testing)
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space - use headscale-integration-tester agent
|
||||
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
|
||||
|
||||
## Debugging Integration Tests
|
||||
|
||||
Test artifacts are preserved in `control_logs/TIMESTAMP-ID/` including:
|
||||
- Headscale server logs (stderr/stdout)
|
||||
- Tailscale client logs and status
|
||||
- Database dumps and network captures
|
||||
- MapResponse JSON files for protocol debugging
|
||||
|
||||
When tests fail, check these artifacts first before assuming code issues.
|
||||
- **Quality Assurance**: Always use appropriate specialized agents for testing and validation tasks
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.24-bookworm
|
||||
FROM docker.io/golang:1.25-bookworm
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This Dockerfile is more or less lifted from tailscale/tailscale
|
||||
# to ensure a similar build process when testing the HEAD of tailscale.
|
||||
|
||||
FROM golang:1.24-alpine AS build-env
|
||||
FROM golang:1.25-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src
|
||||
|
||||
|
||||
29
cmd/headscale/cli/health.go
Normal file
29
cmd/headscale/cli/health.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(healthCmd)
|
||||
}
|
||||
|
||||
var healthCmd = &cobra.Command{
|
||||
Use: "health",
|
||||
Short: "Check the health of the Headscale server",
|
||||
Long: "Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.Health(ctx, &v1.HealthRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, "Error checking health", output)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "", output)
|
||||
},
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
@@ -222,8 +221,6 @@ var listNodeRoutesCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -290,8 +287,6 @@ var expireNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -312,8 +307,6 @@ var expireNodeCmd = &cobra.Command{
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
@@ -333,8 +326,6 @@ var renameNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -360,8 +351,6 @@ var renameNodeCmd = &cobra.Command{
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
@@ -382,8 +371,6 @@ var deleteNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -401,8 +388,6 @@ var deleteNodeCmd = &cobra.Command{
|
||||
"Error getting node node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
@@ -412,16 +397,10 @@ var deleteNodeCmd = &cobra.Command{
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().GetName(),
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
confirm = util.YesNo(fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().GetName(),
|
||||
))
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
@@ -437,8 +416,6 @@ var deleteNodeCmd = &cobra.Command{
|
||||
"Error deleting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
@@ -465,8 +442,6 @@ var moveNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
@@ -476,8 +451,6 @@ var moveNodeCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error getting user: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
@@ -495,8 +468,6 @@ var moveNodeCmd = &cobra.Command{
|
||||
"Error getting node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
@@ -511,8 +482,6 @@ var moveNodeCmd = &cobra.Command{
|
||||
"Error moving node: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
@@ -535,31 +504,27 @@ If you remove IPv4 or IPv6 prefixes from the config,
|
||||
it can be run to remove the IPs that should no longer
|
||||
be assigned to nodes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Are you sure that you want to assign/remove IPs to/from nodes?",
|
||||
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("Are you sure that you want to assign/remove IPs to/from nodes?")
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if confirm {
|
||||
|
||||
if confirm || force {
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm})
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm || force})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
"Error backfilling IPs: "+status.Convert(err).Message(),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
@@ -758,8 +723,6 @@ var tagCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
|
||||
if err != nil {
|
||||
@@ -768,8 +731,6 @@ var tagCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending tags to node
|
||||
@@ -784,8 +745,6 @@ var tagCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
@@ -815,8 +774,6 @@ var approveRoutesCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
routes, err := cmd.Flags().GetStringSlice("routes")
|
||||
if err != nil {
|
||||
@@ -825,8 +782,6 @@ var approveRoutesCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending routes to node
|
||||
@@ -841,8 +796,6 @@ var approveRoutesCmd = &cobra.Command{
|
||||
fmt.Sprintf("Error while sending routes to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
|
||||
@@ -6,21 +6,30 @@ import (
|
||||
"os"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/db"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(policyCmd)
|
||||
|
||||
getPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
|
||||
policyCmd.AddCommand(getPolicy)
|
||||
|
||||
setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
if err := setPolicy.MarkFlagRequired("file"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
setPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
|
||||
policyCmd.AddCommand(setPolicy)
|
||||
|
||||
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
|
||||
@@ -41,21 +50,58 @@ var getPolicy = &cobra.Command{
|
||||
Aliases: []string{"show", "view", "fetch"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
var policy string
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
|
||||
}
|
||||
|
||||
request := &v1.GetPolicyRequest{}
|
||||
if !confirm && !force {
|
||||
ErrorOutput(nil, "Aborting command", output)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := client.GetPolicy(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
}
|
||||
|
||||
pol, err := d.GetPolicy()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading Policy from database: %s", err), output)
|
||||
}
|
||||
|
||||
policy = pol.Data
|
||||
} else {
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.GetPolicyRequest{}
|
||||
|
||||
response, err := client.GetPolicy(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
|
||||
}
|
||||
|
||||
policy = response.GetPolicy()
|
||||
}
|
||||
|
||||
// TODO(pallabpain): Maybe print this better?
|
||||
// This does not pass output as we dont support yaml, json or json-line
|
||||
// output for this command. It is HuJSON already.
|
||||
SuccessOutput("", response.GetPolicy(), "")
|
||||
SuccessOutput("", policy, "")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -81,14 +127,52 @@ var setPolicy = &cobra.Command{
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
}
|
||||
|
||||
request := &v1.SetPolicyRequest{Policy: string(policyBytes)}
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
|
||||
}
|
||||
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
if !confirm && !force {
|
||||
ErrorOutput(nil, "Aborting command", output)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := types.LoadServerConfig()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
|
||||
}
|
||||
|
||||
d, err := db.NewHeadscaleDatabase(
|
||||
cfg.Database,
|
||||
cfg.BaseDomain,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = d.SetPolicy(string(policyBytes))
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
} else {
|
||||
request := &v1.SetPolicyRequest{Policy: string(policyBytes)}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
}
|
||||
|
||||
SuccessOutput(nil, "Policy updated.", "")
|
||||
|
||||
@@ -71,19 +71,20 @@ func initConfig() {
|
||||
|
||||
disableUpdateCheck := viper.GetBool("disable_check_updates")
|
||||
if !disableUpdateCheck && !machineOutput {
|
||||
versionInfo := types.GetVersionInfo()
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
types.Version != "dev" {
|
||||
!versionInfo.Dirty {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, types.Version)
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
log.Warn().Msgf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
types.Version,
|
||||
versionInfo.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -161,16 +161,10 @@ var destroyUserCmd = &cobra.Command{
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the user %q (%d) and any associated preauthkeys?",
|
||||
user.GetName(), user.GetId(),
|
||||
),
|
||||
}
|
||||
err := survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
confirm = util.YesNo(fmt.Sprintf(
|
||||
"Do you want to remove the user %q (%d) and any associated preauthkeys?",
|
||||
user.GetName(), user.GetId(),
|
||||
))
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
|
||||
@@ -169,7 +169,14 @@ func ErrorOutput(errResult error, override string, outputFormat string) {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s\n", output(errOutput{errResult.Error()}, override, outputFormat))
|
||||
var errorMessage string
|
||||
if errResult != nil {
|
||||
errorMessage = errResult.Error()
|
||||
} else {
|
||||
errorMessage = override
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s\n", output(errOutput{errorMessage}, override, outputFormat))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
versionCmd.Flags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
@@ -15,9 +16,9 @@ var versionCmd = &cobra.Command{
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
SuccessOutput(map[string]string{
|
||||
"version": types.Version,
|
||||
"commit": types.GitCommitHash,
|
||||
}, types.Version, output)
|
||||
|
||||
info := types.GetVersionInfo()
|
||||
|
||||
SuccessOutput(info, info.String(), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
|
||||
if statsCollector != nil {
|
||||
defer statsCollector.Close()
|
||||
|
||||
|
||||
// Start stats collection immediately - no need for complex retry logic
|
||||
// The new implementation monitors Docker events and will catch containers as they start
|
||||
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
|
||||
@@ -138,9 +138,10 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
|
||||
log.Printf("=================================")
|
||||
for _, violation := range violations {
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
|
||||
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
|
||||
}
|
||||
|
||||
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,9 @@ type RunConfig struct {
|
||||
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
|
||||
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
|
||||
Verbose bool `flag:"verbose,default=false,Verbose output"`
|
||||
Stats bool `flag:"stats,default=false,Collect and display container resource usage statistics"`
|
||||
HSMemoryLimit float64 `flag:"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
TSMemoryLimit float64 `flag:"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
Stats bool `flag:"stats,default=false,Collect and display container resource usage statistics"`
|
||||
HSMemoryLimit float64 `flag:"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
TSMemoryLimit float64 `flag:"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)"`
|
||||
}
|
||||
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
@@ -74,7 +74,7 @@ func detectGoVersion() string {
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.24"
|
||||
return "1.25"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
@@ -89,7 +89,7 @@ func detectGoVersion() string {
|
||||
}
|
||||
}
|
||||
|
||||
return "1.24"
|
||||
return "1.25"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ContainerStats represents statistics for a single container
|
||||
// ContainerStats represents statistics for a single container.
|
||||
type ContainerStats struct {
|
||||
ContainerID string
|
||||
ContainerName string
|
||||
@@ -25,14 +26,14 @@ type ContainerStats struct {
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// StatsSample represents a single stats measurement
|
||||
// StatsSample represents a single stats measurement.
|
||||
type StatsSample struct {
|
||||
Timestamp time.Time
|
||||
CPUUsage float64 // CPU usage percentage
|
||||
MemoryMB float64 // Memory usage in MB
|
||||
}
|
||||
|
||||
// StatsCollector manages collection of container statistics
|
||||
// StatsCollector manages collection of container statistics.
|
||||
type StatsCollector struct {
|
||||
client *client.Client
|
||||
containers map[string]*ContainerStats
|
||||
@@ -42,7 +43,7 @@ type StatsCollector struct {
|
||||
collectionStarted bool
|
||||
}
|
||||
|
||||
// NewStatsCollector creates a new stats collector instance
|
||||
// NewStatsCollector creates a new stats collector instance.
|
||||
func NewStatsCollector() (*StatsCollector, error) {
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
@@ -56,13 +57,13 @@ func NewStatsCollector() (*StatsCollector, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartCollection begins monitoring all containers and collecting stats for hs- and ts- containers with matching run ID
|
||||
// StartCollection begins monitoring all containers and collecting stats for hs- and ts- containers with matching run ID.
|
||||
func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, verbose bool) error {
|
||||
sc.mutex.Lock()
|
||||
defer sc.mutex.Unlock()
|
||||
|
||||
if sc.collectionStarted {
|
||||
return fmt.Errorf("stats collection already started")
|
||||
return errors.New("stats collection already started")
|
||||
}
|
||||
|
||||
sc.collectionStarted = true
|
||||
@@ -82,7 +83,7 @@ func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, ver
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCollection stops all stats collection
|
||||
// StopCollection stops all stats collection.
|
||||
func (sc *StatsCollector) StopCollection() {
|
||||
// Check if already stopped without holding lock
|
||||
sc.mutex.RLock()
|
||||
@@ -94,17 +95,17 @@ func (sc *StatsCollector) StopCollection() {
|
||||
|
||||
// Signal stop to all goroutines
|
||||
close(sc.stopChan)
|
||||
|
||||
|
||||
// Wait for all goroutines to finish
|
||||
sc.wg.Wait()
|
||||
|
||||
|
||||
// Mark as stopped
|
||||
sc.mutex.Lock()
|
||||
sc.collectionStarted = false
|
||||
sc.mutex.Unlock()
|
||||
}
|
||||
|
||||
// monitorExistingContainers checks for existing containers that match our criteria
|
||||
// monitorExistingContainers checks for existing containers that match our criteria.
|
||||
func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
@@ -123,14 +124,14 @@ func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID s
|
||||
}
|
||||
}
|
||||
|
||||
// monitorDockerEvents listens for container start events and begins monitoring relevant containers
|
||||
// monitorDockerEvents listens for container start events and begins monitoring relevant containers.
|
||||
func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("type", "container")
|
||||
filter.Add("event", "start")
|
||||
|
||||
|
||||
eventOptions := events.ListOptions{
|
||||
Filters: filter,
|
||||
}
|
||||
@@ -171,7 +172,7 @@ func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string,
|
||||
}
|
||||
}
|
||||
|
||||
// shouldMonitorContainer determines if a container should be monitored
|
||||
// shouldMonitorContainer determines if a container should be monitored.
|
||||
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
|
||||
// Check if it has the correct run ID label
|
||||
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
|
||||
@@ -189,7 +190,7 @@ func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID str
|
||||
return false
|
||||
}
|
||||
|
||||
// startStatsForContainer begins stats collection for a specific container
|
||||
// startStatsForContainer begins stats collection for a specific container.
|
||||
func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerID, containerName string, verbose bool) {
|
||||
containerName = strings.TrimPrefix(containerName, "/")
|
||||
|
||||
@@ -215,7 +216,7 @@ func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerI
|
||||
go sc.collectStatsForContainer(ctx, containerID, verbose)
|
||||
}
|
||||
|
||||
// collectStatsForContainer collects stats for a specific container using Docker API streaming
|
||||
// collectStatsForContainer collects stats for a specific container using Docker API streaming.
|
||||
func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containerID string, verbose bool) {
|
||||
defer sc.wg.Done()
|
||||
|
||||
@@ -262,7 +263,7 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
// Get container stats reference without holding the main mutex
|
||||
var containerStats *ContainerStats
|
||||
var exists bool
|
||||
|
||||
|
||||
sc.mutex.RLock()
|
||||
containerStats, exists = sc.containers[containerID]
|
||||
sc.mutex.RUnlock()
|
||||
@@ -284,12 +285,12 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
|
||||
}
|
||||
}
|
||||
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats
|
||||
// calculateCPUPercent calculates CPU usage percentage from Docker stats.
|
||||
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
// CPU calculation based on Docker's implementation
|
||||
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
|
||||
|
||||
|
||||
if systemDelta > 0 && cpuDelta >= 0 {
|
||||
// Calculate CPU percentage: (container CPU delta / system CPU delta) * number of CPUs * 100
|
||||
numCPUs := float64(len(stats.CPUStats.CPUUsage.PercpuUsage))
|
||||
@@ -297,12 +298,14 @@ func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
|
||||
// Fallback: if PercpuUsage is not available, assume 1 CPU
|
||||
numCPUs = 1.0
|
||||
}
|
||||
|
||||
return (cpuDelta / systemDelta) * numCPUs * 100.0
|
||||
}
|
||||
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// ContainerStatsSummary represents summary statistics for a container
|
||||
// ContainerStatsSummary represents summary statistics for a container.
|
||||
type ContainerStatsSummary struct {
|
||||
ContainerName string
|
||||
SampleCount int
|
||||
@@ -310,21 +313,21 @@ type ContainerStatsSummary struct {
|
||||
Memory StatsSummary
|
||||
}
|
||||
|
||||
// MemoryViolation represents a container that exceeded the memory limit
|
||||
// MemoryViolation represents a container that exceeded the memory limit.
|
||||
type MemoryViolation struct {
|
||||
ContainerName string
|
||||
MaxMemoryMB float64
|
||||
LimitMB float64
|
||||
}
|
||||
|
||||
// StatsSummary represents min, max, and average for a metric
|
||||
// StatsSummary represents min, max, and average for a metric.
|
||||
type StatsSummary struct {
|
||||
Min float64
|
||||
Max float64
|
||||
Average float64
|
||||
}
|
||||
|
||||
// GetSummary returns a summary of collected statistics
|
||||
// GetSummary returns a summary of collected statistics.
|
||||
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Take snapshot of container references without holding main lock long
|
||||
sc.mutex.RLock()
|
||||
@@ -355,7 +358,7 @@ func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
// Calculate CPU stats
|
||||
cpuValues := make([]float64, len(stats))
|
||||
memoryValues := make([]float64, len(stats))
|
||||
|
||||
|
||||
for i, sample := range stats {
|
||||
cpuValues[i] = sample.CPUUsage
|
||||
memoryValues[i] = sample.MemoryMB
|
||||
@@ -375,7 +378,7 @@ func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
|
||||
return summaries
|
||||
}
|
||||
|
||||
// calculateStatsSummary calculates min, max, and average for a slice of values
|
||||
// calculateStatsSummary calculates min, max, and average for a slice of values.
|
||||
func calculateStatsSummary(values []float64) StatsSummary {
|
||||
if len(values) == 0 {
|
||||
return StatsSummary{}
|
||||
@@ -402,10 +405,10 @@ func calculateStatsSummary(values []float64) StatsSummary {
|
||||
}
|
||||
}
|
||||
|
||||
// PrintSummary prints the statistics summary to the console
|
||||
// PrintSummary prints the statistics summary to the console.
|
||||
func (sc *StatsCollector) PrintSummary() {
|
||||
summaries := sc.GetSummary()
|
||||
|
||||
|
||||
if len(summaries) == 0 {
|
||||
log.Printf("No container statistics collected")
|
||||
return
|
||||
@@ -413,18 +416,18 @@ func (sc *StatsCollector) PrintSummary() {
|
||||
|
||||
log.Printf("Container Resource Usage Summary:")
|
||||
log.Printf("================================")
|
||||
|
||||
|
||||
for _, summary := range summaries {
|
||||
log.Printf("Container: %s (%d samples)", summary.ContainerName, summary.SampleCount)
|
||||
log.Printf(" CPU Usage: Min: %6.2f%% Max: %6.2f%% Avg: %6.2f%%",
|
||||
log.Printf(" CPU Usage: Min: %6.2f%% Max: %6.2f%% Avg: %6.2f%%",
|
||||
summary.CPU.Min, summary.CPU.Max, summary.CPU.Average)
|
||||
log.Printf(" Memory Usage: Min: %6.1f MB Max: %6.1f MB Avg: %6.1f MB",
|
||||
log.Printf(" Memory Usage: Min: %6.1f MB Max: %6.1f MB Avg: %6.1f MB",
|
||||
summary.Memory.Min, summary.Memory.Max, summary.Memory.Average)
|
||||
log.Printf("")
|
||||
}
|
||||
}
|
||||
|
||||
// CheckMemoryLimits checks if any containers exceeded their memory limits
|
||||
// CheckMemoryLimits checks if any containers exceeded their memory limits.
|
||||
func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
|
||||
if hsLimitMB <= 0 && tsLimitMB <= 0 {
|
||||
return nil
|
||||
@@ -455,14 +458,14 @@ func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []Memo
|
||||
return violations
|
||||
}
|
||||
|
||||
// PrintSummaryAndCheckLimits prints the statistics summary and returns memory violations if any
|
||||
// PrintSummaryAndCheckLimits prints the statistics summary and returns memory violations if any.
|
||||
func (sc *StatsCollector) PrintSummaryAndCheckLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
|
||||
sc.PrintSummary()
|
||||
return sc.CheckMemoryLimits(hsLimitMB, tsLimitMB)
|
||||
}
|
||||
|
||||
// Close closes the stats collector and cleans up resources
|
||||
// Close closes the stats collector and cleans up resources.
|
||||
func (sc *StatsCollector) Close() error {
|
||||
sc.StopCollection()
|
||||
return sc.client.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
continue // Skip potentially dangerous paths
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(targetDir, filepath.Base(cleanName))
|
||||
targetPath := filepath.Join(targetDir, cleanName)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
@@ -77,6 +77,11 @@ func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Ensure parent directories exist
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
// Create file
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
|
||||
61
cmd/mapresponses/main.go
Normal file
61
cmd/mapresponses/main.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/creachadair/command"
|
||||
"github.com/creachadair/flax"
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
)
|
||||
|
||||
type MapConfig struct {
|
||||
Directory string `flag:"directory,Directory to read map responses from"`
|
||||
}
|
||||
|
||||
var mapConfig MapConfig
|
||||
|
||||
func main() {
|
||||
root := command.C{
|
||||
Name: "mapresponses",
|
||||
Help: "MapResponses is a tool to map and compare map responses from a directory",
|
||||
Commands: []*command.C{
|
||||
{
|
||||
Name: "online",
|
||||
Help: "",
|
||||
Usage: "run [test-pattern] [flags]",
|
||||
SetFlags: command.Flags(flax.MustBind, &mapConfig),
|
||||
Run: runOnline,
|
||||
},
|
||||
command.HelpCommand(nil),
|
||||
},
|
||||
}
|
||||
|
||||
env := root.NewEnv(nil).MergeFlags(true)
|
||||
command.RunOrFail(env, os.Args[1:])
|
||||
}
|
||||
|
||||
// runIntegrationTest executes the integration test workflow.
|
||||
func runOnline(env *command.Env) error {
|
||||
if mapConfig.Directory == "" {
|
||||
return fmt.Errorf("directory is required")
|
||||
}
|
||||
|
||||
resps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading map responses from directory: %w", err)
|
||||
}
|
||||
|
||||
expected := integrationutil.BuildExpectedOnlineMap(resps)
|
||||
|
||||
out, err := json.MarshalIndent(expected, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling expected online map: %w", err)
|
||||
}
|
||||
|
||||
os.Stderr.Write(out)
|
||||
os.Stderr.Write([]byte("\n"))
|
||||
return nil
|
||||
}
|
||||
@@ -60,7 +60,9 @@ prefixes:
|
||||
v6: fd7a:115c:a1e0::/48
|
||||
|
||||
# Strategy used for allocation of IPs to nodes, available options:
|
||||
# - sequential (default): assigns the next free IP from the previous given IP.
|
||||
# - sequential (default): assigns the next free IP from the previous given
|
||||
# IP. A best-effort approach is used and Headscale might leave holes in the
|
||||
# IP range or fill up existing holes in the IP range.
|
||||
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
|
||||
allocation: sequential
|
||||
|
||||
@@ -105,7 +107,7 @@ derp:
|
||||
|
||||
# For better connection stability (especially when using an Exit-Node and DNS is not working),
|
||||
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
|
||||
ipv4: 1.2.3.4
|
||||
ipv4: 198.51.100.1
|
||||
ipv6: 2001:db8::1
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
@@ -128,7 +130,7 @@ derp:
|
||||
auto_update_enabled: true
|
||||
|
||||
# How often should we check for DERP updates?
|
||||
update_frequency: 24h
|
||||
update_frequency: 3h
|
||||
|
||||
# Disables the automatic check for headscale updates on startup
|
||||
disable_check_updates: false
|
||||
@@ -275,9 +277,9 @@ dns:
|
||||
# `hostname.base_domain` (e.g., _myhost.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Whether to use the local DNS settings of a node (default) or override the
|
||||
# local DNS settings and force the use of Headscale's DNS configuration.
|
||||
override_local_dns: false
|
||||
# Whether to use the local DNS settings of a node or override the local DNS
|
||||
# settings (default) and force the use of Headscale's DNS configuration.
|
||||
override_local_dns: true
|
||||
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
@@ -293,8 +295,7 @@ dns:
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# a map of domains and which DNS server to use for each.
|
||||
split:
|
||||
{}
|
||||
split: {}
|
||||
# foo.bar.com:
|
||||
# - 1.1.1.1
|
||||
# darp.headscale.net:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
1: null # Disable DERP region with ID 1
|
||||
900:
|
||||
regionid: 900
|
||||
regioncode: custom
|
||||
@@ -7,9 +8,9 @@ regions:
|
||||
nodes:
|
||||
- name: 900a
|
||||
regionid: 900
|
||||
hostname: myderp.mydomain.no
|
||||
ipv4: 123.123.123.123
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
hostname: myderp.example.com
|
||||
ipv4: 198.51.100.1
|
||||
ipv6: 2001:db8::1
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derpport: 0
|
||||
|
||||
@@ -44,6 +44,15 @@ For convenience, we also [build container images with headscale](../setup/instal
|
||||
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
|
||||
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
|
||||
|
||||
## What is the recommended update path? Can I skip multiple versions while updating?
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](../setup/upgrade.md) to update your existing Headscale
|
||||
installation. Its best to update from one stable version to the next (e.g. 0.24.0 → 0.25.1 → 0.26.1) in case
|
||||
you are multiple releases behind. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the [changelog](https://github.com/juanfont/headscale/blob/main/CHANGELOG.md) for version specific
|
||||
upgrade instructions and breaking changes.
|
||||
|
||||
## Scaling / How many clients does Headscale support?
|
||||
|
||||
It depends. As often stated, Headscale is not enterprise software and our focus
|
||||
@@ -134,3 +143,19 @@ in their output of `tailscale status`. Traffic is still filtered according to th
|
||||
ping` which is always allowed in either direction.
|
||||
|
||||
See also <https://tailscale.com/kb/1087/device-visibility>.
|
||||
|
||||
## My policy is stored in the database and Headscale refuses to start due to an invalid policy. How can I recover?
|
||||
|
||||
Headscale checks if the policy is valid during startup and refuses to start if it detects an error. The error message
|
||||
indicates which part of the policy is invalid. Follow these steps to fix your policy:
|
||||
|
||||
- Dump the policy to a file: `headscale policy get --bypass-grpc-and-access-database-directly > policy.json`
|
||||
- Edit and fixup `policy.json`. Use the command `headscale policy check --file policy.json` to validate the policy.
|
||||
- Load the modified policy: `headscale policy set --bypass-grpc-and-access-database-directly --file policy.json`
|
||||
- Start Headscale as usual.
|
||||
|
||||
!!! warning "Full server configuration required"
|
||||
|
||||
The above commands to get/set the policy require a complete server configuration file including database settings. A
|
||||
minimal config to [control Headscale via remote CLI](../ref/remote-cli.md) is not sufficient. You may use `headscale
|
||||
-c /path/to/config.yaml` to specify the path to an alternative configuration file.
|
||||
|
||||
@@ -19,11 +19,11 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
|
||||
- [x] [Exit nodes](../ref/routes.md#exit-node)
|
||||
- [x] Dual stack (IPv4 and IPv6)
|
||||
- [x] Ephemeral nodes
|
||||
- [x] Embedded [DERP server](https://tailscale.com/kb/1232/derp-servers)
|
||||
- [x] Embedded [DERP server](../ref/derp.md)
|
||||
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
||||
- [x] ACL management via API
|
||||
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`, `autogroup:self`
|
||||
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||
|
||||
139
docs/ref/acls.md
139
docs/ref/acls.md
@@ -9,9 +9,38 @@ When using ACL's the User borders are no longer applied. All machines
|
||||
whichever the User have the ability to communicate with other hosts as
|
||||
long as the ACL's permits this exchange.
|
||||
|
||||
## ACLs use case example
|
||||
## ACL Setup
|
||||
|
||||
Let's build an example use case for a small business (It may be the place where
|
||||
To enable and configure ACLs in Headscale, you need to specify the path to your ACL policy file in the `policy.path` key in `config.yaml`.
|
||||
|
||||
Your ACL policy file must be formatted using [huJSON](https://github.com/tailscale/hujson).
|
||||
|
||||
Info on how these policies are written can be found
|
||||
[here](https://tailscale.com/kb/1018/acls/).
|
||||
|
||||
Please reload or restart Headscale after updating the ACL file. Headscale may be reloaded either via its systemd service
|
||||
(`sudo systemctl reload headscale`) or by sending a SIGHUP signal (`sudo kill -HUP $(pidof headscale)`) to the main
|
||||
process. Headscale logs the result of ACL policy processing after each reload.
|
||||
|
||||
## Simple Examples
|
||||
|
||||
- [**Allow All**](https://tailscale.com/kb/1192/acl-samples#allow-all-default-acl): If you define an ACL file but completely omit the `"acls"` field from its content, Headscale will default to an "allow all" policy. This means all devices connected to your tailnet will be able to communicate freely with each other.
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
- [**Deny All**](https://tailscale.com/kb/1192/acl-samples#deny-all): To prevent all communication within your tailnet, you can include an empty array for the `"acls"` field in your policy file.
|
||||
|
||||
```json
|
||||
{
|
||||
"acls": []
|
||||
}
|
||||
```
|
||||
|
||||
## Complex Example
|
||||
|
||||
Let's build a more complex example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developers and an intern.
|
||||
@@ -38,10 +67,6 @@ servers.
|
||||
|
||||

|
||||
|
||||
## ACL setup
|
||||
|
||||
ACLs have to be written in [huJSON](https://github.com/tailscale/hujson).
|
||||
|
||||
When [registering the servers](../usage/getting-started.md#register-a-node) we
|
||||
will need to add the flag `--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user
|
||||
that is registering the server should be allowed to do it. Since anyone can add
|
||||
@@ -49,14 +74,6 @@ tags to a server they can register, the check of the tags is done on headscale
|
||||
server and only valid tags are applied. A tag is valid if the user that is
|
||||
registering it is allowed to do it.
|
||||
|
||||
To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This
|
||||
will need to point to your ACL file. More info on how these policies are written can be found
|
||||
[here](https://tailscale.com/kb/1018/acls/).
|
||||
|
||||
Please reload or restart Headscale after updating the ACL file. Headscale may be reloaded either via its systemd service
|
||||
(`sudo systemctl reload headscale`) or by sending a SIGHUP signal (`sudo kill -HUP $(pidof headscale)`) to the main
|
||||
process. Headscale logs the result of ACL policy processing after each reload.
|
||||
|
||||
Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
```json title="acl.json"
|
||||
@@ -177,13 +194,93 @@ Here are the ACL's to implement the same permissions as above:
|
||||
"dst": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// We still have to allow internal users communications since nothing guarantees that each user have
|
||||
// their own users.
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
|
||||
// Allow users to access their own devices using autogroup:self (see below for more details about performance impact)
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Autogroups
|
||||
|
||||
Headscale supports several autogroups that automatically include users, destinations, or devices with specific properties. Autogroups provide a convenient way to write ACL rules without manually listing individual users or devices.
|
||||
|
||||
### `autogroup:internet`
|
||||
|
||||
Allows access to the internet through [exit nodes](routes.md#exit-node). Can only be used in ACL destinations.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:users"],
|
||||
"dst": ["autogroup:internet:*"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all users who are direct members of the tailnet. Does not include users from shared devices.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["tag:prod-app-servers:80,443"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:tagged"],
|
||||
"dst": ["tag:monitoring:9090"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:self`
|
||||
**(EXPERIMENTAL)**
|
||||
|
||||
!!! warning "The current implementation of `autogroup:self` is inefficient"
|
||||
|
||||
Includes devices where the same user is authenticated on both the source and destination. Does not include tagged devices. Can only be used in ACL destinations.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
```
|
||||
*Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.*
|
||||
|
||||
If you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.
|
||||
```json
|
||||
{
|
||||
// To allow internal users communications to their own nodes we can do following rules to allow access in case autogroup:self is causing performance issues.
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:"] }
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:nonroot`
|
||||
|
||||
Used in Tailscale SSH rules to allow access to any user except root. Can only be used in the `users` field of SSH rules.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
```
|
||||
|
||||
175
docs/ref/derp.md
Normal file
175
docs/ref/derp.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# DERP
|
||||
|
||||
A [DERP (Designated Encrypted Relay for Packets) server](https://tailscale.com/kb/1232/derp-servers) is mainly used to
|
||||
relay traffic between two nodes in case a direct connection can't be established. Headscale provides an embedded DERP
|
||||
server to ensure seamless connectivity between nodes.
|
||||
|
||||
## Configuration
|
||||
|
||||
DERP related settings are configured within the `derp` section of the [configuration file](./configuration.md). The
|
||||
following sections only use a few of the available settings, check the [example configuration](./configuration.md) for
|
||||
all available configuration options.
|
||||
|
||||
### Enable embedded DERP
|
||||
|
||||
Headscale ships with an embedded DERP server which allows to run your own self-hosted DERP server easily. The embedded
|
||||
DERP server is disabled by default and needs to be enabled. In addition, you should configure the public IPv4 and public
|
||||
IPv6 address of your Headscale server for improved connection stability:
|
||||
|
||||
```yaml title="config.yaml" hl_lines="3-5"
|
||||
derp:
|
||||
server:
|
||||
enabled: true
|
||||
ipv4: 198.51.100.1
|
||||
ipv6: 2001:db8::1
|
||||
```
|
||||
|
||||
Keep in mind that [additional ports are needed to run a DERP server](../setup/requirements.md#ports-in-use). Besides
|
||||
relaying traffic, it also uses STUN (udp/3478) to help clients discover their public IP addresses and perform NAT
|
||||
traversal. [Check DERP server connectivity](#check-derp-server-connectivity) to see if everything works.
|
||||
|
||||
### Remove Tailscale's DERP servers
|
||||
|
||||
Once enabled, Headscale's embedded DERP is added to the list of free-to-use [DERP
|
||||
servers](https://tailscale.com/kb/1232/derp-servers) offered by Tailscale Inc. To only use Headscale's embedded DERP
|
||||
server, disable the loading of the default DERP map:
|
||||
|
||||
```yaml title="config.yaml" hl_lines="6"
|
||||
derp:
|
||||
server:
|
||||
enabled: true
|
||||
ipv4: 198.51.100.1
|
||||
ipv6: 2001:db8::1
|
||||
urls: []
|
||||
```
|
||||
|
||||
!!! warning "Single point of failure"
|
||||
|
||||
Removing Tailscale's DERP servers means that there is now just a single DERP server available for clients. This is a
|
||||
single point of failure and could hamper connectivity.
|
||||
|
||||
[Check DERP server connectivity](#check-derp-server-connectivity) with your embedded DERP server before removing
|
||||
Tailscale's DERP servers.
|
||||
|
||||
### Customize DERP map
|
||||
|
||||
The DERP map offered to clients can be customized with a [dedicated YAML-configuration
|
||||
file](https://github.com/juanfont/headscale/blob/main/derp-example.yaml). This allows to modify previously loaded DERP
|
||||
maps fetched via URL or to offer your own, custom DERP servers to nodes.
|
||||
|
||||
=== "Remove specific DERP regions"
|
||||
|
||||
The free-to-use [DERP servers](https://tailscale.com/kb/1232/derp-servers) are organized into regions via a region
|
||||
ID. You can explicitly disable a specific region by setting its region ID to `null`. The following sample
|
||||
`derp.yaml` disables the New York DERP region (which has the region ID 1):
|
||||
|
||||
```yaml title="derp.yaml"
|
||||
regions:
|
||||
1: null
|
||||
```
|
||||
|
||||
Use the following configuration to serve the default DERP map (excluding New York) to nodes:
|
||||
|
||||
```yaml title="config.yaml" hl_lines="6 7"
|
||||
derp:
|
||||
server:
|
||||
enabled: false
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
paths:
|
||||
- /etc/headscale/derp.yaml
|
||||
```
|
||||
|
||||
=== "Provide custom DERP servers"
|
||||
|
||||
The following sample `derp.yaml` references two custom regions (`custom-east` with ID 900 and `custom-west` with ID 901)
|
||||
with one custom DERP server in each region. Each DERP server offers DERP relay via HTTPS on tcp/443, support for captive
|
||||
portal checks via HTTP on tcp/80 and STUN on udp/3478. See the definitions of
|
||||
[DERPMap](https://pkg.go.dev/tailscale.com/tailcfg#DERPMap),
|
||||
[DERPRegion](https://pkg.go.dev/tailscale.com/tailcfg#DERPRegion) and
|
||||
[DERPNode](https://pkg.go.dev/tailscale.com/tailcfg#DERPNode) for all available options.
|
||||
|
||||
```yaml title="derp.yaml"
|
||||
regions:
|
||||
900:
|
||||
regionid: 900
|
||||
regioncode: custom-east
|
||||
regionname: My region (east)
|
||||
nodes:
|
||||
- name: 900a
|
||||
regionid: 900
|
||||
hostname: derp900a.example.com
|
||||
ipv4: 198.51.100.1
|
||||
ipv6: 2001:db8::1
|
||||
canport80: true
|
||||
901:
|
||||
regionid: 901
|
||||
regioncode: custom-west
|
||||
regionname: My Region (west)
|
||||
nodes:
|
||||
- name: 901a
|
||||
regionid: 901
|
||||
hostname: derp901a.example.com
|
||||
ipv4: 198.51.100.2
|
||||
ipv6: 2001:db8::2
|
||||
canport80: true
|
||||
```
|
||||
|
||||
Use the following configuration to only serve the two DERP servers from the above `derp.yaml`:
|
||||
|
||||
```yaml title="config.yaml" hl_lines="5 6"
|
||||
derp:
|
||||
server:
|
||||
enabled: false
|
||||
urls: []
|
||||
paths:
|
||||
- /etc/headscale/derp.yaml
|
||||
```
|
||||
|
||||
Independent of the custom DERP map, you may choose to [enable the embedded DERP server and have it automatically added
|
||||
to the custom DERP map](#enable-embedded-derp).
|
||||
|
||||
### Verify clients
|
||||
|
||||
Access to DERP serves can be restricted to nodes that are members of your Tailnet. Relay access is denied for unknown
|
||||
clients.
|
||||
|
||||
=== "Embedded DERP"
|
||||
|
||||
Client verification is enabled by default.
|
||||
|
||||
```yaml title="config.yaml" hl_lines="3"
|
||||
derp:
|
||||
server:
|
||||
verify_clients: true
|
||||
```
|
||||
|
||||
=== "3rd-party DERP"
|
||||
|
||||
Tailscale's `derper` provides two parameters to configure client verification:
|
||||
|
||||
- Use the `-verify-client-url` parameter of the `derper` and point it towards the `/verify` endpoint of your
|
||||
Headscale server (e.g `https://headscale.example.com/verify`). The DERP server will query your Headscale instance
|
||||
as soon as a client connects with it to ask whether access should be allowed or denied. Access is allowed if
|
||||
Headscale knows about the connecting client and denied otherwise.
|
||||
- The parameter `-verify-client-url-fail-open` controls what should happen when the DERP server can't reach the
|
||||
Headscale instance. By default, it will allow access if Headscale is unreachable.
|
||||
|
||||
## Check DERP server connectivity
|
||||
|
||||
Any Tailscale client may be used to introspect the DERP map and to check for connectivity issues with DERP servers.
|
||||
|
||||
- Display DERP map: `tailscale debug derp-map`
|
||||
- Check connectivity with the embedded DERP[^1]:`tailscale debug derp headscale`
|
||||
|
||||
Additional DERP related metrics and information is available via the [metrics and debug
|
||||
endpoint](./debug.md#metrics-and-debug-endpoint).
|
||||
|
||||
[^1]:
|
||||
This assumes that the default region code of the [configuration file](./configuration.md) is used.
|
||||
|
||||
## Limitations
|
||||
|
||||
- The embedded DERP server can't be used for Tailscale's captive portal checks as it doesn't support the `/generate_204`
|
||||
endpoint via HTTP on port tcp/80.
|
||||
- There are no speed or throughput optimisations, the main purpose is to assist in node connectivity.
|
||||
@@ -1,7 +1,7 @@
|
||||
# DNS
|
||||
|
||||
Headscale supports [most DNS features](../about/features.md) from Tailscale. DNS related settings can be configured
|
||||
within `dns` section of the [configuration file](./configuration.md).
|
||||
within the `dns` section of the [configuration file](./configuration.md).
|
||||
|
||||
## Setting extra DNS records
|
||||
|
||||
@@ -23,7 +23,7 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
|
||||
|
||||
!!! warning "Limitations"
|
||||
|
||||
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.78.3/ipn/ipnlocal/local.go#L4461-L4479).
|
||||
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.86.5/ipn/ipnlocal/node_backend.go#L662).
|
||||
|
||||
1. Configure extra DNS records using one of the available configuration options:
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Running headscale behind a reverse proxy is useful when running multiple applica
|
||||
|
||||
The reverse proxy MUST be configured to support WebSockets to communicate with Tailscale clients.
|
||||
|
||||
WebSockets support is also required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).
|
||||
WebSockets support is also required when using the Headscale [embedded DERP server](../derp.md). In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).
|
||||
|
||||
### Cloudflare
|
||||
|
||||
|
||||
@@ -13,3 +13,4 @@ This page collects third-party tools, client libraries, and scripts related to h
|
||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
||||
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
|
||||
| headscale-zabbix | [Github](https://github.com/dblanque/headscale-zabbix) | A Zabbix Monitoring Template for the Headscale Service. |
|
||||
|
||||
@@ -184,7 +184,7 @@ You may refer to users in the Headscale policy via:
|
||||
## Supported OIDC claims
|
||||
|
||||
Headscale uses [the standard OIDC claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims) to
|
||||
populate and update its local user profile on each login. OIDC claims are read from the ID Token or from the UserInfo
|
||||
populate and update its local user profile on each login. OIDC claims are read from the ID Token and from the UserInfo
|
||||
endpoint.
|
||||
|
||||
| Headscale profile | OIDC claim | Notes / examples |
|
||||
@@ -230,19 +230,6 @@ are known to work:
|
||||
|
||||
Authelia is fully supported by Headscale.
|
||||
|
||||
#### Additional configuration to authorize users based on filters
|
||||
|
||||
Authelia (4.39.0 or newer) no longer provides standard OIDC claims such as `email` or `groups` via the ID Token. The
|
||||
OIDC `email` and `groups` claims are used to [authorize users with filters](#authorize-users-with-filters). This extra
|
||||
configuration step is **only** needed if you need to authorize access based on one of the following user properties:
|
||||
|
||||
- domain
|
||||
- email address
|
||||
- group membership
|
||||
|
||||
Please follow the instructions from Authelia's documentation on how to [Restore Functionality Prior to Claims
|
||||
Parameter](https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter).
|
||||
|
||||
### Authentik
|
||||
|
||||
- Authentik is fully supported by Headscale.
|
||||
@@ -297,13 +284,15 @@ you need to [authorize access based on group membership](#authorize-users-with-f
|
||||
|
||||
- Create a new client scope `groups` for OpenID Connect:
|
||||
- Configure a `Group Membership` mapper with name `groups` and the token claim name `groups`.
|
||||
- Enable the mapper for the ID Token, Access Token and UserInfo endpoint.
|
||||
- Add the mapper to at least the UserInfo endpoint.
|
||||
- Configure the new client scope for your Headscale client:
|
||||
- Edit the Headscale client.
|
||||
- Search for the client scope `group`.
|
||||
- Add it with assigned type `Default`.
|
||||
- [Configure the allowed groups in Headscale](#authorize-users-with-filters). Keep in mind that groups in Keycloak start
|
||||
with a leading `/`.
|
||||
- [Configure the allowed groups in Headscale](#authorize-users-with-filters). How groups need to be specified depends on
|
||||
Keycloak's `Full group path` option:
|
||||
- `Full group path` is enabled: groups contain their full path, e.g. `/top/group1`
|
||||
- `Full group path` is disabled: only the name of the group is used, e.g. `group1`
|
||||
|
||||
### Microsoft Entra ID
|
||||
|
||||
@@ -315,3 +304,6 @@ Entra ID is: `https://login.microsoftonline.com/<tenant-UUID>/v2.0`. The followi
|
||||
|
||||
- `domain_hint: example.com` to use your own domain
|
||||
- `prompt: select_account` to force an account picker during login
|
||||
|
||||
Groups for the [allowed groups filter](#authorize-users-with-filters) need to be specified with their group ID instead
|
||||
of the group name.
|
||||
|
||||
@@ -57,14 +57,14 @@ managed by systemd.
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
sudo wget --output-document=/usr/local/bin/headscale \
|
||||
sudo wget --output-document=/usr/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
1. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
sudo chmod +x /usr/local/bin/headscale
|
||||
sudo chmod +x /usr/bin/headscale
|
||||
```
|
||||
|
||||
1. Add a dedicated local user to run headscale:
|
||||
|
||||
@@ -4,11 +4,35 @@ Headscale should just work as long as the following requirements are met:
|
||||
|
||||
- A server with a public IP address for headscale. A dual-stack setup with a public IPv4 and a public IPv6 address is
|
||||
recommended.
|
||||
- Headscale is served via HTTPS on port 443[^1].
|
||||
- Headscale is served via HTTPS on port 443[^1] and [may use additional ports](#ports-in-use).
|
||||
- A reasonably modern Linux or BSD based operating system.
|
||||
- A dedicated local user account to run headscale.
|
||||
- A little bit of command line knowledge to configure and operate headscale.
|
||||
|
||||
## Ports in use
|
||||
|
||||
The ports in use vary with the intended scenario and enabled features. Some of the listed ports may be changed via the
|
||||
[configuration file](../ref/configuration.md) but we recommend to stick with the default values.
|
||||
|
||||
- tcp/80
|
||||
- Expose publicly: yes
|
||||
- HTTP, used by Let's Encrypt to verify ownership via the HTTP-01 challenge.
|
||||
- Only required if the built-in Let's Enrypt client with the HTTP-01 challenge is used. See [TLS](../ref/tls.md) for
|
||||
details.
|
||||
- tcp/443
|
||||
- Expose publicly: yes
|
||||
- HTTPS, required to make Headscale available to Tailscale clients[^1]
|
||||
- Required if the [embedded DERP server](../ref/derp.md) is enabled
|
||||
- udp/3478
|
||||
- Expose publicly: yes
|
||||
- STUN, required if the [embedded DERP server](../ref/derp.md) is enabled
|
||||
- tcp/50443
|
||||
- Expose publicly: yes
|
||||
- Only required if the gRPC interface is used to [remote-control Headscale](../ref/remote-cli.md).
|
||||
- tcp/9090
|
||||
- Expose publicly: no
|
||||
- [Metrics and debug endpoint](../ref/debug.md#metrics-and-debug-endpoint)
|
||||
|
||||
## Assumptions
|
||||
|
||||
The headscale documentation and the provided examples are written with a few assumptions in mind:
|
||||
|
||||
@@ -6,9 +6,23 @@ This documentation has the goal of showing how a user can use the official Andro
|
||||
|
||||
Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/).
|
||||
|
||||
## Configuring the headscale URL
|
||||
## Connect via normal, interactive login
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an alternate server`
|
||||
- Enter your server URL (e.g `https://headscale.example.com`) and follow the instructions
|
||||
- The client connects automatically as soon as the node registration is complete on headscale. Until then, nothing is
|
||||
visible in the server logs.
|
||||
|
||||
## Connect using a preauthkey
|
||||
|
||||
- Open the app and select the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an alternate server`
|
||||
- Enter your server URL (e.g `https://headscale.example.com`). If login prompts open, close it and continue
|
||||
- Open the settings menu in the upper-right corner
|
||||
- Tap on `Accounts`
|
||||
- In the kebab menu icon (three dots) in the upper-right corner select `Use an auth key`
|
||||
- Enter your [preauthkey generated from headscale](../getting-started.md#using-a-preauthkey)
|
||||
- If needed, tap `Log in` on the main screen. You should now be connected to your headscale.
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1752012998,
|
||||
"narHash": "sha256-Q82Ms+FQmgOBkdoSVm+FBpuFoeUAffNerR5yVV7SgT8=",
|
||||
"lastModified": 1760533177,
|
||||
"narHash": "sha256-OwM1sFustLHx+xmTymhucZuNhtq98fHIbfO8Swm5L8A=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2a2130494ad647f953593c4e84ea4df839fbd68c",
|
||||
"rev": "35f590344ff791e6b1d6d6b8f3523467c9217caf",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
13
flake.nix
13
flake.nix
@@ -18,8 +18,8 @@
|
||||
{
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo124Module;
|
||||
vendorHash = "sha256-83L2NMyOwKCHWqcowStJ7Ze/U9CJYhzleDRLrJNhX2g=";
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-GUIzlPRsyEq1uSTzRNds9p1uVu4pTeH5PAxrJ5Njhis=";
|
||||
in {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
@@ -97,9 +97,10 @@
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
goreleaser = prev.goreleaser.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
gotestsum = prev.gotestsum.override {
|
||||
buildGoModule = buildGo;
|
||||
@@ -124,7 +125,7 @@
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_24 gnumake];
|
||||
buildDeps = with pkgs; [git go_1_25 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
@@ -21,11 +22,94 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type HealthRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthRequest) Reset() {
|
||||
*x = HealthRequest{}
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HealthRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HealthRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type HealthResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
DatabaseConnectivity bool `protobuf:"varint,1,opt,name=database_connectivity,json=databaseConnectivity,proto3" json:"database_connectivity,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthResponse) Reset() {
|
||||
*x = HealthResponse{}
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HealthResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HealthResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *HealthResponse) GetDatabaseConnectivity() bool {
|
||||
if x != nil {
|
||||
return x.DatabaseConnectivity
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\rHealthRequest\"E\n" +
|
||||
"\x0eHealthResponse\x123\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x80\x17\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
@@ -56,109 +140,127 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
|
||||
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
|
||||
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policy\x12[\n" +
|
||||
"\x06Health\x12\x1b.headscale.v1.HealthRequest\x1a\x1c.headscale.v1.HealthResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/healthB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_headscale_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_headscale_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_headscale_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_headscale_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_headscale_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_headscale_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
|
||||
(*HealthRequest)(nil), // 0: headscale.v1.HealthRequest
|
||||
(*HealthResponse)(nil), // 1: headscale.v1.HealthResponse
|
||||
(*CreateUserRequest)(nil), // 2: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 3: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 4: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 8: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 9: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 10: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 11: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 12: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 13: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 14: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 15: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 16: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 17: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 33: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 34: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 36: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
1, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
|
||||
2, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
|
||||
3, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
4, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
5, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
6, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
24, // [24:48] is the sub-list for method output_type
|
||||
0, // [0:24] is the sub-list for method input_type
|
||||
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
3, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
|
||||
4, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
|
||||
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
8, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
25, // [25:50] is the sub-list for method output_type
|
||||
0, // [0:25] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -180,12 +282,13 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_headscale_v1_headscale_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_headscale_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
|
||||
@@ -809,6 +809,24 @@ func local_request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler r
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.Health(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
// RegisterHeadscaleServiceHandlerServer registers the http handlers for service HeadscaleService to "mux".
|
||||
// UnaryRPC :call HeadscaleServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -1295,6 +1313,26 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1743,6 +1781,23 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1771,6 +1826,7 @@ var (
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_Health_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "health"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -1798,4 +1854,5 @@ var (
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_Health_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
HeadscaleService_Health_FullMethodName = "/headscale.v1.HeadscaleService/Health"
|
||||
)
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
@@ -78,6 +79,8 @@ type HeadscaleServiceClient interface {
|
||||
// --- Policy start ---
|
||||
GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error)
|
||||
SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error)
|
||||
// --- Health start ---
|
||||
Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error)
|
||||
}
|
||||
|
||||
type headscaleServiceClient struct {
|
||||
@@ -328,6 +331,16 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HealthResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_Health_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility.
|
||||
@@ -361,6 +374,8 @@ type HeadscaleServiceServer interface {
|
||||
// --- Policy start ---
|
||||
GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error)
|
||||
SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error)
|
||||
// --- Health start ---
|
||||
Health(context.Context, *HealthRequest) (*HealthResponse, error)
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
@@ -443,6 +458,9 @@ func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicy
|
||||
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
|
||||
@@ -896,6 +914,24 @@ func _HeadscaleService_SetPolicy_Handler(srv interface{}, ctx context.Context, d
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).Health(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_Health_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).Health(ctx, req.(*HealthRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -999,6 +1035,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "SetPolicy",
|
||||
Handler: _HeadscaleService_SetPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Health",
|
||||
Handler: _HeadscaleService_Health_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "headscale/v1/headscale.proto",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -164,6 +164,29 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/health": {
|
||||
"get": {
|
||||
"summary": "--- Health start ---",
|
||||
"operationId": "HeadscaleService_Health",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1HealthResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListNodes",
|
||||
@@ -1056,6 +1079,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1HealthResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"databaseConnectivity": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListApiKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
145
go.mod
145
go.mod
@@ -1,61 +1,59 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.2
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/arl/statsviz v0.6.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.2
|
||||
github.com/chasefleming/elem-go v0.30.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
github.com/creachadair/command v0.1.22
|
||||
github.com/arl/statsviz v0.7.2
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.3.3+incompatible
|
||||
github.com/docker/docker v28.5.1+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.65.0
|
||||
github.com/pterm/pterm v0.12.81
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.66.1
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.51.0
|
||||
github.com/sasha-s/go-deadlock v0.3.5
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
|
||||
google.golang.org/grpc v1.75.1
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.0
|
||||
tailscale.com v1.84.3
|
||||
zgo.at/zcache/v2 v2.2.0
|
||||
gorm.io/gorm v1.31.0
|
||||
tailscale.com v1.86.5
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
|
||||
@@ -77,17 +75,17 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.62.1 // indirect
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.10.0 // indirect
|
||||
modernc.org/sqlite v1.37.0
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
@@ -111,17 +109,18 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.24.3 // indirect
|
||||
github.com/creachadair/mds v0.25.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.1.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
@@ -130,14 +129,12 @@ require (
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
@@ -145,12 +142,10 @@ require (
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/csrf v1.7.3 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
@@ -158,13 +153,12 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.4 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
@@ -172,12 +166,11 @@ require (
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
@@ -186,27 +179,25 @@ require (
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.0 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
@@ -215,8 +206,8 @@ require (
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
@@ -224,22 +215,22 @@ require (
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.26.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
)
|
||||
|
||||
|
||||
348
go.sum
348
go.sum
@@ -8,14 +8,12 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
|
||||
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
|
||||
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
|
||||
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
@@ -31,8 +29,6 @@ github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi
|
||||
github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
@@ -41,8 +37,8 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
|
||||
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
|
||||
github.com/arl/statsviz v0.7.2 h1:xnuIfRiXE4kvxEcfGL+IE3mKH1BXNHuE+eJELIh7oOA=
|
||||
github.com/arl/statsviz v0.7.2/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
@@ -86,12 +82,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
|
||||
github.com/chasefleming/elem-go v0.30.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f6vg71RUilJAA4=
|
||||
github.com/chasefleming/elem-go v0.31.0 h1:vZsuKmKdv6idnUbu3awMruxTiFqZ/ertFJFAyBCkVhI=
|
||||
github.com/chasefleming/elem-go v0.31.0/go.mod h1:UBmmZfso2LkXA0HZInbcwsmhE/LXFClEcBPNCGeARtA=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
@@ -103,8 +99,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
@@ -118,20 +116,19 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
|
||||
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.24.3 h1:X7cM2ymZSyl4IVWnfyXLxRXMJ6awhbcWvtLPhfnTaqI=
|
||||
github.com/creachadair/mds v0.24.3/go.mod h1:0oeHt9QWu8VfnmskOL4zi2CumjEvB29ScmtOmdrhFeU=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
|
||||
github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -146,16 +143,14 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
|
||||
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
@@ -177,25 +172,25 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
@@ -206,8 +201,6 @@ github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -226,38 +219,32 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
|
||||
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0=
|
||||
github.com/gookit/assert v0.1.1/go.mod h1:jS5bmIVQZTIwk42uXl4lyj4iaaxx32tqH16CFj0VX2E=
|
||||
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
|
||||
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
|
||||
github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0=
|
||||
github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=
|
||||
github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
|
||||
@@ -270,8 +257,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
|
||||
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -289,10 +276,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@@ -321,18 +304,16 @@ github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
|
||||
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
@@ -341,9 +322,6 @@ github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
|
||||
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
@@ -362,8 +340,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk=
|
||||
@@ -372,16 +350,16 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
|
||||
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
|
||||
github.com/opencontainers/runc v1.3.2 h1:GUwgo0Fx9M/pl2utaSYlJfdBcXAB/CZXDxe322lvJ3Y=
|
||||
github.com/opencontainers/runc v1.3.2/go.mod h1:F7UQQEsxcjUNnFpT1qPLHZBKYP7yWwk6hq8suLy9cl0=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
|
||||
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
@@ -398,14 +376,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -413,15 +391,13 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
|
||||
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
|
||||
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
|
||||
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
@@ -431,29 +407,28 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
|
||||
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw=
|
||||
github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
|
||||
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
@@ -464,8 +439,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
|
||||
@@ -492,8 +467,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
|
||||
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
|
||||
@@ -507,8 +482,8 @@ github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -521,80 +496,70 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -615,8 +580,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -624,43 +589,40 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
@@ -676,8 +638,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
|
||||
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
@@ -686,35 +648,37 @@ honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
|
||||
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
|
||||
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
|
||||
modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
|
||||
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
|
||||
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
||||
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
|
||||
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
|
||||
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
|
||||
modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4=
|
||||
modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.84.3 h1:Ur9LMedSgicwbqpy5xn7t49G8490/s6rqAJOk5Q5AYE=
|
||||
tailscale.com v1.84.3/go.mod h1:6/S63NMAhmncYT/1zIPDJkvCuZwMw+JnUuOfSPNazpo=
|
||||
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
|
||||
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
tailscale.com v1.86.5 h1:yBtWFjuLYDmxVnfnvPbZNZcKADCYgNfMd0rUAOA9XCs=
|
||||
tailscale.com v1.86.5/go.mod h1:Lm8dnzU2i/Emw15r6sl3FRNp/liSQ/nYw6ZSQvIdZ1M=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=
|
||||
|
||||
191
hscontrol/app.go
191
hscontrol/app.go
@@ -17,6 +17,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/mux"
|
||||
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
@@ -99,7 +100,7 @@ type Headscale struct {
|
||||
authProvider AuthProvider
|
||||
mapBatcher mapper.Batcher
|
||||
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
clientStreamsOpen sync.WaitGroup
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -128,28 +129,29 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
}
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
state: s,
|
||||
cfg: cfg,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
clientStreamsOpen: sync.WaitGroup{},
|
||||
state: s,
|
||||
}
|
||||
|
||||
// Initialize ephemeral garbage collector
|
||||
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
|
||||
node, err := app.state.GetNodeByID(ni)
|
||||
if err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to get ephemeral node for deletion")
|
||||
node, ok := app.state.GetNodeByID(ni)
|
||||
if !ok {
|
||||
log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore")
|
||||
return
|
||||
}
|
||||
|
||||
policyChanged, err := app.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node")
|
||||
log.Error().Err(err).Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deletion failed")
|
||||
return
|
||||
}
|
||||
|
||||
app.Change(policyChanged)
|
||||
log.Debug().Uint64("node.id", ni.Uint64()).Msgf("deleted ephemeral node")
|
||||
log.Debug().Caller().Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deleted because garbage collection timeout reached")
|
||||
})
|
||||
app.ephemeralGC = ephemeralGC
|
||||
|
||||
@@ -284,11 +286,23 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
||||
|
||||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
derpMap := derp.GetDERPMap(h.cfg.DERP)
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
return derpMap, nil
|
||||
}, backoff.WithBackOff(backoff.NewExponentialBackOff()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to build new DERPMap, retrying later")
|
||||
continue
|
||||
}
|
||||
h.state.SetDERPMap(derpMap)
|
||||
|
||||
h.Change(change.DERPSet)
|
||||
|
||||
@@ -366,64 +380,53 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := req.Header.Get("authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
if err := func() error {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := req.Header.Get("Authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
}
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
Msg("Failed to write HTTP response")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -449,6 +452,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).
|
||||
Methods(http.MethodPost, http.MethodGet)
|
||||
|
||||
router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
@@ -487,11 +491,12 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
|
||||
// Serve launches the HTTP and gRPC server service Headscale and the API.
|
||||
func (h *Headscale) Serve() error {
|
||||
var err error
|
||||
capver.CanOldCodeBeCleanedUp()
|
||||
|
||||
if profilingEnabled {
|
||||
if profilingPath != "" {
|
||||
err := os.MkdirAll(profilingPath, os.ModePerm)
|
||||
err = os.MkdirAll(profilingPath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to create profiling directory")
|
||||
}
|
||||
@@ -506,7 +511,8 @@ func (h *Headscale) Serve() error {
|
||||
spew.Dump(h.cfg)
|
||||
}
|
||||
|
||||
log.Info().Str("version", types.Version).Str("commit", types.GitCommitHash).Msg("Starting Headscale")
|
||||
versionInfo := types.GetVersionInfo()
|
||||
log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale")
|
||||
log.Info().
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Msg("Clients with a lower minimum version will be rejected")
|
||||
@@ -515,40 +521,39 @@ func (h *Headscale) Serve() error {
|
||||
h.mapBatcher.Start()
|
||||
defer h.mapBatcher.Close()
|
||||
|
||||
// TODO(kradalby): fix state part.
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
// When embedded DERP is enabled we always need a STUN server
|
||||
if h.cfg.DERP.STUNAddr == "" {
|
||||
return errSTUNAddressNotSet
|
||||
}
|
||||
|
||||
region, err := h.DERPServer.GenerateRegion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating DERP region for embedded server: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
h.state.DERPMap().Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
go h.DERPServer.ServeSTUN()
|
||||
}
|
||||
|
||||
if len(h.state.DERPMap().Regions) == 0 {
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get DERPMap: %w", err)
|
||||
}
|
||||
|
||||
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||
region, _ := h.DERPServer.GenerateRegion()
|
||||
derpMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
if len(derpMap.Regions) == 0 {
|
||||
return errEmptyInitialDERPMap
|
||||
}
|
||||
|
||||
h.state.SetDERPMap(derpMap)
|
||||
|
||||
// Start ephemeral node garbage collector and schedule all nodes
|
||||
// that are already in the database and ephemeral. If they are still
|
||||
// around between restarts, they will reconnect and the GC will
|
||||
// be cancelled.
|
||||
go h.ephemeralGC.Start()
|
||||
ephmNodes, err := h.state.ListEphemeralNodes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list ephemeral nodes: %w", err)
|
||||
}
|
||||
for _, node := range ephmNodes {
|
||||
h.ephemeralGC.Schedule(node.ID, h.cfg.EphemeralNodeInactivityTimeout)
|
||||
ephmNodes := h.state.ListEphemeralNodes()
|
||||
for _, node := range ephmNodes.All() {
|
||||
h.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout)
|
||||
}
|
||||
|
||||
if h.cfg.DNSConfig.ExtraRecordsPath != "" {
|
||||
@@ -778,23 +783,14 @@ func (h *Headscale) Serve() error {
|
||||
continue
|
||||
}
|
||||
|
||||
changed, err := h.state.ReloadPolicy()
|
||||
changes, err := h.state.ReloadPolicy()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("reloading policy")
|
||||
continue
|
||||
}
|
||||
|
||||
if changed {
|
||||
log.Info().
|
||||
Msg("ACL policy successfully reloaded, notifying nodes of change")
|
||||
h.Change(changes...)
|
||||
|
||||
err = h.state.AutoApproveNodes()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to approve routes after new policy")
|
||||
}
|
||||
|
||||
h.Change(change.PolicySet)
|
||||
}
|
||||
default:
|
||||
info := func(msg string) { log.Info().Msg(msg) }
|
||||
log.Info().
|
||||
@@ -818,10 +814,11 @@ func (h *Headscale) Serve() error {
|
||||
log.Error().Err(err).Msg("failed to shutdown http")
|
||||
}
|
||||
|
||||
info("closing node notifier")
|
||||
info("closing batcher")
|
||||
h.mapBatcher.Close()
|
||||
|
||||
info("waiting for netmap stream to close")
|
||||
h.pollNetMapStreamWG.Wait()
|
||||
h.clientStreamsOpen.Wait()
|
||||
|
||||
info("shutting down grpc server (socket)")
|
||||
grpcSocket.GracefulStop()
|
||||
@@ -847,11 +844,11 @@ func (h *Headscale) Serve() error {
|
||||
info("closing socket listener")
|
||||
socketListener.Close()
|
||||
|
||||
// Close db connections
|
||||
info("closing database connection")
|
||||
// Close state connections
|
||||
info("closing state and database")
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close db")
|
||||
log.Error().Err(err).Msg("failed to close state")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
@@ -1004,6 +1001,6 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
// Change is used to send changes to nodes.
|
||||
// All change should be enqueued here and empty will be automatically
|
||||
// ignored.
|
||||
func (h *Headscale) Change(c change.ChangeSet) {
|
||||
h.mapBatcher.AddWork(c)
|
||||
func (h *Headscale) Change(cs ...change.ChangeSet) {
|
||||
h.mapBatcher.AddWork(cs...)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
@@ -25,57 +26,98 @@ type AuthProvider interface {
|
||||
|
||||
func (h *Headscale) handleRegister(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, err := h.state.GetNodeByNodeKey(regReq.NodeKey)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("looking up node in database: %w", err)
|
||||
}
|
||||
// Check for logout/expiry FIRST, before checking auth key.
|
||||
// Tailscale clients may send logout requests with BOTH a past expiry AND an auth key.
|
||||
// A past expiry takes precedence - it's a logout regardless of other fields.
|
||||
if !req.Expiry.IsZero() && req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Time("expiry", req.Expiry).
|
||||
Bool("has_auth", req.Auth != nil).
|
||||
Msg("Detected logout attempt with past expiry")
|
||||
|
||||
if node != nil {
|
||||
// If an existing node is trying to register with an auth key,
|
||||
// we need to validate the auth key even for existing nodes
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
|
||||
// This is a logout attempt (expiry in the past)
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Msg("Found existing node for logout, calling handleLogout")
|
||||
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
return nil, httpErr
|
||||
}
|
||||
return nil, fmt.Errorf("handling register with auth key for existing node: %w", err)
|
||||
return nil, fmt.Errorf("handling logout: %w", err)
|
||||
}
|
||||
return resp, nil
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Msg("Logout attempt but node not found in NodeStore")
|
||||
}
|
||||
|
||||
resp, err := h.handleExistingNode(node, regReq, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if regReq.Followup != "" {
|
||||
return h.waitForFollowup(ctx, regReq)
|
||||
// If the register request does not contain a Auth struct, it means we are logging
|
||||
// out an existing node (legacy logout path for clients that send Auth=nil).
|
||||
if req.Auth == nil {
|
||||
// If the register request present a NodeKey that is currently in use, we will
|
||||
// check if the node needs to be sent to re-auth, or if the node is logging out.
|
||||
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
|
||||
// nodes, separated by users and this path is handling expiring/logout paths.
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
}
|
||||
|
||||
// If resp is not nil, we have a response to return to the node.
|
||||
// If resp is nil, we should proceed and see if the node is trying to re-auth.
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
} else {
|
||||
// If the register request is not attempting to register a node, and
|
||||
// we cannot match it with an existing node, we consider that unexpected
|
||||
// as only register nodes should attempt to log out.
|
||||
log.Debug().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Bool("unexpected", true).
|
||||
Msg("received register request with no auth, and no existing node")
|
||||
}
|
||||
}
|
||||
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
|
||||
// If the [tailcfg.RegisterRequest] has a Followup URL, it means that the
|
||||
// node has already started the registration process and we should wait for
|
||||
// it to finish the original registration.
|
||||
if req.Followup != "" {
|
||||
return h.waitForFollowup(ctx, req, machineKey)
|
||||
}
|
||||
|
||||
// Pre authenticated keys are handled slightly different than interactive
|
||||
// logins as they can be done fully sync and we can respond to the node with
|
||||
// the result as it is waiting.
|
||||
if isAuthKey(req) {
|
||||
resp, err := h.handleRegisterWithAuthKey(req, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
return nil, httpErr
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("handling register with auth key: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
resp, err := h.handleRegisterInteractive(regReq, machineKey)
|
||||
resp, err := h.handleRegisterInteractive(req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling register interactive: %w", err)
|
||||
}
|
||||
@@ -83,57 +125,111 @@ func (h *Headscale) handleRegister(
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleExistingNode(
|
||||
node *types.Node,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
// handleLogout checks if the [tailcfg.RegisterRequest] is a
|
||||
// logout attempt from a node. If the node is not attempting to
|
||||
func (h *Headscale) handleLogout(
|
||||
node types.NodeView,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
|
||||
if node.MachineKey != machineKey {
|
||||
// Fail closed if it looks like this is an attempt to modify a node where
|
||||
// the node key and the machine key the noise session was started with does
|
||||
// not align.
|
||||
if node.MachineKey() != machineKey {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node exist with different machine key", nil)
|
||||
}
|
||||
|
||||
expired := node.IsExpired()
|
||||
// Note: We do NOT return early if req.Auth is set, because Tailscale clients
|
||||
// may send logout requests with BOTH a past expiry AND an auth key.
|
||||
// A past expiry indicates logout, regardless of whether Auth is present.
|
||||
// The expiry check below will handle the logout logic.
|
||||
|
||||
if !expired && !regReq.Expiry.IsZero() {
|
||||
requestExpiry := regReq.Expiry
|
||||
// If the node is expired and this is not a re-authentication attempt,
|
||||
// force the client to re-authenticate.
|
||||
// TODO(kradalby): I wonder if this is a path we ever hit?
|
||||
if node.IsExpired() {
|
||||
log.Trace().Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Interface("reg.req", req).
|
||||
Bool("unexpected", true).
|
||||
Msg("Node key expired, forcing re-authentication")
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
AuthURL: "", // Client will need to re-authenticate
|
||||
}, nil
|
||||
}
|
||||
|
||||
// The client is trying to extend their key, this is not allowed.
|
||||
if requestExpiry.After(time.Now()) {
|
||||
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
|
||||
}
|
||||
// If we get here, the node is not currently expired, and not trying to
|
||||
// do an auth.
|
||||
// The node is likely logging out, but before we run that logic, we will validate
|
||||
// that the node is not attempting to tamper/extend their expiry.
|
||||
// If it is not, we will expire the node or in the case of an ephemeral node, delete it.
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if requestExpiry.Before(time.Now()) {
|
||||
if node.IsEphemeral() {
|
||||
c, err := h.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
// The client is trying to extend their key, this is not allowed.
|
||||
if req.Expiry.After(time.Now()) {
|
||||
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Time("req.expiry", req.Expiry).
|
||||
Msg("Processing logout request with past expiry")
|
||||
|
||||
return nil, nil
|
||||
if node.IsEphemeral() {
|
||||
log.Info().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Msg("Deleting ephemeral node during logout")
|
||||
|
||||
c, err := h.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
_, c, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Msg("Node is not ephemeral, setting expiry instead of deleting")
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
}
|
||||
// Update the internal state with the nodes new expiry, meaning it is
|
||||
// logged out.
|
||||
updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), req.Expiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node), nil
|
||||
h.Change(c)
|
||||
|
||||
return nodeToRegisterResponse(updatedNode), nil
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
// isAuthKey reports if the register request is a registration request
|
||||
// using an pre auth key.
|
||||
func isAuthKey(req tailcfg.RegisterRequest) bool {
|
||||
return req.Auth != nil && req.Auth.AuthKey != ""
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
|
||||
return &tailcfg.RegisterResponse{
|
||||
// TODO(kradalby): Only send for user-owned nodes
|
||||
// and not tagged nodes when tags is working.
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
|
||||
// Headscale does not implement the concept of machine authorization
|
||||
@@ -145,9 +241,10 @@ func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
|
||||
func (h *Headscale) waitForFollowup(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
fu, err := url.Parse(regReq.Followup)
|
||||
fu, err := url.Parse(req.Followup)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
|
||||
}
|
||||
@@ -163,21 +260,64 @@ func (h *Headscale) waitForFollowup(
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
|
||||
case node := <-reg.Registered:
|
||||
if node == nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
return nodeToRegisterResponse(node), nil
|
||||
return nodeToRegisterResponse(node.View()), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
|
||||
// if the follow-up registration isn't found anymore, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
|
||||
// reqToNewRegisterResponse refreshes the registration flow by creating a new
|
||||
// registration ID and returning the corresponding AuthURL so the client can
|
||||
// restart the authentication process.
|
||||
func (h *Headscale) reqToNewRegisterResponse(
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
newRegID, err := types.NewRegistrationID()
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusInternalServerError, "failed to generate registration ID", err)
|
||||
}
|
||||
|
||||
// Ensure we have valid hostinfo and hostname
|
||||
validHostinfo, hostname := util.EnsureValidHostinfo(
|
||||
req.Hostinfo,
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: validHostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
log.Info().Msgf("New followup node registration using key: %s", newRegID)
|
||||
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(newRegID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterWithAuthKey(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, changed, policyChanged, err := h.state.HandleNodeFromPreAuthKey(
|
||||
regReq,
|
||||
node, changed, err := h.state.HandleNodeFromPreAuthKey(
|
||||
req,
|
||||
machineKey,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -192,8 +332,8 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If node is nil, it means an ephemeral node was deleted during logout
|
||||
if node == nil {
|
||||
// If node is not valid, it means an ephemeral node was deleted during logout
|
||||
if !node.Valid() {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
}
|
||||
@@ -212,31 +352,43 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// TODO(kradalby): This needs to be ran as part of the batcher maybe?
|
||||
// now since we dont update the node/pol here anymore
|
||||
routeChange := h.state.AutoApproveRoutes(node)
|
||||
|
||||
if _, _, err := h.state.SaveNode(node); err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
}
|
||||
|
||||
if routeChange && changed.Empty() {
|
||||
changed = change.NodeAdded(node.ID)
|
||||
changed = change.NodeAdded(node.ID())
|
||||
}
|
||||
h.Change(changed)
|
||||
|
||||
// If policy changed due to node registration, send a separate policy change
|
||||
if policyChanged {
|
||||
policyChange := change.PolicyChange()
|
||||
h.Change(policyChange)
|
||||
}
|
||||
// TODO(kradalby): I think this is covered above, but we need to validate that.
|
||||
// // If policy changed due to node registration, send a separate policy change
|
||||
// if policyChanged {
|
||||
// policyChange := change.PolicyChange()
|
||||
// h.Change(policyChange)
|
||||
// }
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
}, nil
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Interface("reg.resp", resp).
|
||||
Interface("reg.req", req).
|
||||
Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Msg("RegisterResponse")
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterInteractive(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
registrationId, err := types.NewRegistrationID()
|
||||
@@ -244,19 +396,39 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
return nil, fmt.Errorf("generating registration ID: %w", err)
|
||||
}
|
||||
|
||||
nodeToRegister := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
Hostinfo: regReq.Hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
// Ensure we have valid hostinfo and hostname
|
||||
validHostinfo, hostname := util.EnsureValidHostinfo(
|
||||
req.Hostinfo,
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
|
||||
if req.Hostinfo == nil {
|
||||
log.Warn().
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with nil hostinfo, generated default hostname")
|
||||
} else if req.Hostinfo.Hostname == "" {
|
||||
log.Warn().
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with empty hostname, generated default")
|
||||
}
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = ®Req.Expiry
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: validHostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
h.state.SetRegistrationCacheEntry(
|
||||
@@ -264,6 +436,8 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
nodeToRegister,
|
||||
)
|
||||
|
||||
log.Info().Msgf("Starting node registration using key: %s", registrationId)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(registrationId),
|
||||
}, nil
|
||||
|
||||
3006
hscontrol/auth_test.go
Normal file
3006
hscontrol/auth_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -260,7 +260,7 @@ func NewHeadscaleDatabase(
|
||||
log.Error().Err(err).Msg("Error creating route")
|
||||
} else {
|
||||
log.Info().
|
||||
Uint64("node_id", route.NodeID).
|
||||
Uint64("node.id", route.NodeID).
|
||||
Str("prefix", prefix.String()).
|
||||
Msg("Route migrated")
|
||||
}
|
||||
@@ -870,23 +870,23 @@ AND auth_key_id NOT IN (
|
||||
// Copy data directly using SQL
|
||||
dataCopySQL := []string{
|
||||
`INSERT INTO users (id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at)
|
||||
SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at
|
||||
SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at
|
||||
FROM users_old`,
|
||||
|
||||
`INSERT INTO pre_auth_keys (id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at)
|
||||
SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at
|
||||
SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at
|
||||
FROM pre_auth_keys_old`,
|
||||
|
||||
`INSERT INTO api_keys (id, prefix, hash, expiration, last_seen, created_at)
|
||||
SELECT id, prefix, hash, expiration, last_seen, created_at
|
||||
SELECT id, prefix, hash, expiration, last_seen, created_at
|
||||
FROM api_keys_old`,
|
||||
|
||||
`INSERT INTO nodes (id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at)
|
||||
SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at
|
||||
SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at
|
||||
FROM nodes_old`,
|
||||
|
||||
`INSERT INTO policies (id, data, created_at, updated_at, deleted_at)
|
||||
SELECT id, data, created_at, updated_at, deleted_at
|
||||
SELECT id, data, created_at, updated_at, deleted_at
|
||||
FROM policies_old`,
|
||||
}
|
||||
|
||||
@@ -1131,7 +1131,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
||||
}
|
||||
|
||||
for _, migrationID := range migrationIDs {
|
||||
log.Trace().Str("migration_id", migrationID).Msg("Running migration")
|
||||
log.Trace().Caller().Str("migration_id", migrationID).Msg("Running migration")
|
||||
needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
|
||||
|
||||
if needsFKDisabled {
|
||||
|
||||
@@ -275,7 +275,7 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
return errors.New("backfilling IPs: ip allocator was nil")
|
||||
}
|
||||
|
||||
log.Trace().Msgf("starting to backfill IPs")
|
||||
log.Trace().Caller().Msgf("starting to backfill IPs")
|
||||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
@@ -283,7 +283,7 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
log.Trace().Uint64("node.id", node.ID.Uint64()).Msg("checking if need backfill")
|
||||
log.Trace().Caller().Uint64("node.id", node.ID.Uint64()).Str("node.name", node.Hostname).Msg("IP backfill check started because node found in database")
|
||||
|
||||
changed := false
|
||||
// IPv4 prefix is set, but node ip is missing, alloc
|
||||
|
||||
@@ -13,11 +13,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -34,9 +32,6 @@ var (
|
||||
"node not found in registration cache",
|
||||
)
|
||||
ErrCouldNotConvertNodeInterface = errors.New("failed to convert node interface")
|
||||
ErrDifferentRegisteredUser = errors.New(
|
||||
"node was previously registered with a different user",
|
||||
)
|
||||
)
|
||||
|
||||
// ListPeers returns peers of node, regardless of any Policy or if the node is expired.
|
||||
@@ -260,24 +255,18 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
|
||||
}
|
||||
|
||||
// RenameNode takes a Node struct and a new GivenName for the nodes
|
||||
// and renames it. If the name is not unique, it will return an error.
|
||||
// and renames it. Validation should be done in the state layer before calling this function.
|
||||
func RenameNode(tx *gorm.DB,
|
||||
nodeID types.NodeID, newName string,
|
||||
) error {
|
||||
err := util.CheckForFQDNRules(
|
||||
newName,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
// Check if the new name is unique
|
||||
var count int64
|
||||
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
|
||||
return fmt.Errorf("failed to check name uniqueness: %w", err)
|
||||
}
|
||||
|
||||
uniq, err := isUniqueName(tx, newName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if name is unique: %w", err)
|
||||
}
|
||||
|
||||
if !uniq {
|
||||
return fmt.Errorf("name is not unique: %s", newName)
|
||||
if count > 0 {
|
||||
return errors.New("name is not unique")
|
||||
}
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil {
|
||||
@@ -333,108 +322,19 @@ func (hsdb *HSDatabase) DeleteEphemeralNode(
|
||||
})
|
||||
}
|
||||
|
||||
// HandleNodeFromAuthPath is called from the OIDC or CLI auth path
|
||||
// with a registrationID to register or reauthenticate a node.
|
||||
// If the node found in the registration cache is not already registered,
|
||||
// it will be registered with the user and the node will be removed from the cache.
|
||||
// If the node is already registered, the expiry will be updated.
|
||||
// The node, and a boolean indicating if it was a new node or not, will be returned.
|
||||
func (hsdb *HSDatabase) HandleNodeFromAuthPath(
|
||||
registrationID types.RegistrationID,
|
||||
userID types.UserID,
|
||||
nodeExpiry *time.Time,
|
||||
registrationMethod string,
|
||||
ipv4 *netip.Addr,
|
||||
ipv6 *netip.Addr,
|
||||
) (*types.Node, change.ChangeSet, error) {
|
||||
var nodeChange change.ChangeSet
|
||||
node, err := Write(hsdb.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if reg, ok := hsdb.regCache.Get(registrationID); ok {
|
||||
if node, _ := GetNodeByNodeKey(tx, reg.Node.NodeKey); node == nil {
|
||||
user, err := GetUserByID(tx, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to find user in register node from auth callback, %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
// RegisterNodeForTest is used only for testing purposes to register a node directly in the database.
|
||||
// Production code should use state.HandleNodeFromAuthPath or state.HandleNodeFromPreAuthKey.
|
||||
func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {
|
||||
if !testing.Testing() {
|
||||
panic("RegisterNodeForTest can only be called during tests")
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("registration_id", registrationID.String()).
|
||||
Str("username", user.Username()).
|
||||
Str("registrationMethod", registrationMethod).
|
||||
Str("expiresAt", fmt.Sprintf("%v", nodeExpiry)).
|
||||
Msg("Registering node from API/CLI or auth callback")
|
||||
|
||||
// TODO(kradalby): This looks quite wrong? why ID 0?
|
||||
// Why not always?
|
||||
// Registration of expired node with different user
|
||||
if reg.Node.ID != 0 &&
|
||||
reg.Node.UserID != user.ID {
|
||||
return nil, ErrDifferentRegisteredUser
|
||||
}
|
||||
|
||||
reg.Node.UserID = user.ID
|
||||
reg.Node.User = *user
|
||||
reg.Node.RegisterMethod = registrationMethod
|
||||
|
||||
if nodeExpiry != nil {
|
||||
reg.Node.Expiry = nodeExpiry
|
||||
}
|
||||
|
||||
node, err := RegisterNode(
|
||||
tx,
|
||||
reg.Node,
|
||||
ipv4, ipv6,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
hsdb.regCache.Delete(registrationID)
|
||||
}
|
||||
|
||||
// Signal to waiting clients that the machine has been registered.
|
||||
select {
|
||||
case reg.Registered <- node:
|
||||
default:
|
||||
}
|
||||
close(reg.Registered)
|
||||
|
||||
nodeChange = change.NodeAdded(node.ID)
|
||||
|
||||
return node, err
|
||||
} else {
|
||||
// If the node is already registered, this is a refresh.
|
||||
err := NodeSetExpiry(tx, node.ID, *nodeExpiry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeChange = change.KeyExpiry(node.ID)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNodeNotFoundRegistrationCache
|
||||
})
|
||||
|
||||
return node, nodeChange, err
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) RegisterNode(node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {
|
||||
return Write(hsdb.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
return RegisterNode(tx, node, ipv4, ipv6)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterNode is executed from the CLI to register a new Node using its MachineKey.
|
||||
func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) {
|
||||
log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("user", node.User.Username()).
|
||||
Msg("Registering node")
|
||||
Msg("Registering test node")
|
||||
|
||||
// If the a new node is registered with the same machine key, to the same user,
|
||||
// update the existing node.
|
||||
@@ -445,8 +345,13 @@ func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Ad
|
||||
node.ID = oldNode.ID
|
||||
node.GivenName = oldNode.GivenName
|
||||
node.ApprovedRoutes = oldNode.ApprovedRoutes
|
||||
ipv4 = oldNode.IPv4
|
||||
ipv6 = oldNode.IPv6
|
||||
// Don't overwrite the provided IPs with old ones when they exist
|
||||
if ipv4 == nil {
|
||||
ipv4 = oldNode.IPv4
|
||||
}
|
||||
if ipv6 == nil {
|
||||
ipv6 = oldNode.IPv6
|
||||
}
|
||||
}
|
||||
|
||||
// If the node exists and it already has IP(s), we just save it
|
||||
@@ -463,7 +368,7 @@ func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Ad
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("user", node.User.Username()).
|
||||
Msg("Node authorized again")
|
||||
Msg("Test node authorized again")
|
||||
|
||||
return &node, nil
|
||||
}
|
||||
@@ -472,7 +377,7 @@ func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Ad
|
||||
node.IPv6 = ipv6
|
||||
|
||||
if node.GivenName == "" {
|
||||
givenName, err := ensureUniqueGivenName(tx, node.Hostname)
|
||||
givenName, err := EnsureUniqueGivenName(tx, node.Hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure unique given name: %w", err)
|
||||
}
|
||||
@@ -487,7 +392,7 @@ func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Ad
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node registered with the database")
|
||||
Msg("Test node registered with the database")
|
||||
|
||||
return &node, nil
|
||||
}
|
||||
@@ -560,7 +465,8 @@ func isUniqueName(tx *gorm.DB, name string) (bool, error) {
|
||||
return len(nodes) == 0, nil
|
||||
}
|
||||
|
||||
func ensureUniqueGivenName(
|
||||
// EnsureUniqueGivenName generates a unique given name for a node based on its hostname.
|
||||
func EnsureUniqueGivenName(
|
||||
tx *gorm.DB,
|
||||
name string,
|
||||
) (string, error) {
|
||||
@@ -586,41 +492,6 @@ func ensureUniqueGivenName(
|
||||
return givenName, nil
|
||||
}
|
||||
|
||||
// ExpireExpiredNodes checks for nodes that have expired since the last check
|
||||
// and returns a time to be used for the next check, a StateUpdate
|
||||
// containing the expired nodes, and a boolean indicating if any nodes were found.
|
||||
func ExpireExpiredNodes(tx *gorm.DB,
|
||||
lastCheck time.Time,
|
||||
) (time.Time, []change.ChangeSet, bool) {
|
||||
// use the time of the start of the function to ensure we
|
||||
// dont miss some nodes by returning it _after_ we have
|
||||
// checked everything.
|
||||
started := time.Now()
|
||||
|
||||
expired := make([]*tailcfg.PeerChange, 0)
|
||||
var updates []change.ChangeSet
|
||||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), nil, false
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.IsExpired() && node.Expiry.After(lastCheck) {
|
||||
expired = append(expired, &tailcfg.PeerChange{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: node.Expiry,
|
||||
})
|
||||
updates = append(updates, change.KeyExpiry(node.ID))
|
||||
}
|
||||
}
|
||||
|
||||
if len(expired) > 0 {
|
||||
return started, updates, true
|
||||
}
|
||||
|
||||
return started, nil, false
|
||||
}
|
||||
|
||||
// EphemeralGarbageCollector is a garbage collector that will delete nodes after
|
||||
// a certain amount of time.
|
||||
// It is used to delete ephemeral nodes that have disconnected and should be
|
||||
@@ -781,19 +652,23 @@ func (hsdb *HSDatabase) CreateRegisteredNodeForTest(user *types.User, hostname .
|
||||
|
||||
node := hsdb.CreateNodeForTest(user, hostname...)
|
||||
|
||||
err := hsdb.DB.Transaction(func(tx *gorm.DB) error {
|
||||
_, err := RegisterNode(tx, *node, nil, nil)
|
||||
// Allocate IPs for the test node using the database's IP allocator
|
||||
// This is a simplified allocation for testing - in production this would use State.ipAlloc
|
||||
ipv4, ipv6, err := hsdb.allocateTestIPs(node.ID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to allocate IPs for test node: %v", err))
|
||||
}
|
||||
|
||||
var registeredNode *types.Node
|
||||
err = hsdb.DB.Transaction(func(tx *gorm.DB) error {
|
||||
var err error
|
||||
registeredNode, err = RegisterNodeForTest(tx, *node, ipv4, ipv6)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to register test node: %v", err))
|
||||
}
|
||||
|
||||
registeredNode, err := hsdb.GetNodeByID(node.ID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to get registered test node: %v", err))
|
||||
}
|
||||
|
||||
return registeredNode
|
||||
}
|
||||
|
||||
@@ -842,3 +717,23 @@ func (hsdb *HSDatabase) CreateRegisteredNodesForTest(user *types.User, count int
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// allocateTestIPs allocates sequential test IPs for nodes during testing.
|
||||
func (hsdb *HSDatabase) allocateTestIPs(nodeID types.NodeID) (*netip.Addr, *netip.Addr, error) {
|
||||
if !testing.Testing() {
|
||||
panic("allocateTestIPs can only be called during tests")
|
||||
}
|
||||
|
||||
// Use simple sequential allocation for tests
|
||||
// IPv4: 100.64.0.x (where x is nodeID)
|
||||
// IPv6: fd7a:115c:a1e0::x (where x is nodeID)
|
||||
|
||||
if nodeID > 254 {
|
||||
return nil, nil, fmt.Errorf("test node ID %d too large for simple IP allocation", nodeID)
|
||||
}
|
||||
|
||||
ipv4 := netip.AddrFrom4([4]byte{100, 64, 0, byte(nodeID)})
|
||||
ipv6 := netip.AddrFrom16([16]byte{0xfd, 0x7a, 0x11, 0x5c, 0xa1, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(nodeID)})
|
||||
|
||||
return &ipv4, &ipv6, nil
|
||||
}
|
||||
|
||||
@@ -292,12 +292,57 @@ func TestHeadscale_generateGivenName(t *testing.T) {
|
||||
|
||||
func TestAutoApproveRoutes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
acl string
|
||||
routes []netip.Prefix
|
||||
want []netip.Prefix
|
||||
want2 []netip.Prefix
|
||||
name string
|
||||
acl string
|
||||
routes []netip.Prefix
|
||||
want []netip.Prefix
|
||||
want2 []netip.Prefix
|
||||
expectChange bool // whether to expect route changes
|
||||
}{
|
||||
{
|
||||
name: "no-auto-approvers-empty-policy",
|
||||
acl: `
|
||||
{
|
||||
"groups": {
|
||||
"group:admins": ["test@"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["group:admins:*"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
routes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
|
||||
want: []netip.Prefix{}, // Should be empty - no auto-approvers
|
||||
want2: []netip.Prefix{}, // Should be empty - no auto-approvers
|
||||
expectChange: false, // No changes expected
|
||||
},
|
||||
{
|
||||
name: "no-auto-approvers-explicit-empty",
|
||||
acl: `
|
||||
{
|
||||
"groups": {
|
||||
"group:admins": ["test@"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["group:admins:*"]
|
||||
}
|
||||
],
|
||||
"autoApprovers": {
|
||||
"routes": {},
|
||||
"exitNode": []
|
||||
}
|
||||
}`,
|
||||
routes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
|
||||
want: []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers
|
||||
want2: []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers
|
||||
expectChange: false, // No changes expected
|
||||
},
|
||||
{
|
||||
name: "2068-approve-issue-sub-kube",
|
||||
acl: `
|
||||
@@ -316,8 +361,9 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}`,
|
||||
routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")},
|
||||
want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")},
|
||||
routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")},
|
||||
want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")},
|
||||
expectChange: true, // Routes should be approved
|
||||
},
|
||||
{
|
||||
name: "2068-approve-issue-sub-exit-tag",
|
||||
@@ -361,6 +407,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
},
|
||||
expectChange: true, // Routes should be approved
|
||||
},
|
||||
}
|
||||
|
||||
@@ -421,28 +468,40 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pm)
|
||||
|
||||
changed1 := policy.AutoApproveRoutes(pm, &node)
|
||||
assert.True(t, changed1)
|
||||
newRoutes1, changed1 := policy.ApproveRoutesWithPolicy(pm, node.View(), node.ApprovedRoutes, tt.routes)
|
||||
assert.Equal(t, tt.expectChange, changed1)
|
||||
|
||||
err = adb.DB.Save(&node).Error
|
||||
require.NoError(t, err)
|
||||
if changed1 {
|
||||
err = SetApprovedRoutes(adb.DB, node.ID, newRoutes1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_ = policy.AutoApproveRoutes(pm, &nodeTagged)
|
||||
|
||||
err = adb.DB.Save(&nodeTagged).Error
|
||||
require.NoError(t, err)
|
||||
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), node.ApprovedRoutes, tt.routes)
|
||||
if changed2 {
|
||||
err = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
node1ByID, err := adb.GetNodeByID(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if diff := cmp.Diff(tt.want, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
// For empty auto-approvers tests, handle nil vs empty slice comparison
|
||||
expectedRoutes1 := tt.want
|
||||
if len(expectedRoutes1) == 0 {
|
||||
expectedRoutes1 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
node2ByID, err := adb.GetNodeByID(2)
|
||||
require.NoError(t, err)
|
||||
|
||||
if diff := cmp.Diff(tt.want2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
expectedRoutes2 := tt.want2
|
||||
if len(expectedRoutes2) == 0 {
|
||||
expectedRoutes2 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
@@ -620,11 +679,11 @@ func TestRenameNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
_, err := RegisterNode(tx, node, nil, nil)
|
||||
_, err := RegisterNodeForTest(tx, node, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNode(tx, node2, nil, nil)
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
})
|
||||
@@ -721,11 +780,11 @@ func TestListPeers(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
_, err := RegisterNode(tx, node1, nil, nil)
|
||||
_, err := RegisterNodeForTest(tx, node1, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNode(tx, node2, nil, nil)
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
})
|
||||
@@ -739,13 +798,13 @@ func TestListPeers(t *testing.T) {
|
||||
// No parameter means no filter, should return all peers
|
||||
nodes, err = db.ListPeers(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(nodes))
|
||||
assert.Len(t, nodes, 1)
|
||||
assert.Equal(t, "test2", nodes[0].Hostname)
|
||||
|
||||
// Empty node list should return all peers
|
||||
nodes, err = db.ListPeers(1, types.NodeIDs{}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(nodes))
|
||||
assert.Len(t, nodes, 1)
|
||||
assert.Equal(t, "test2", nodes[0].Hostname)
|
||||
|
||||
// No match in IDs should return empty list and no error
|
||||
@@ -756,13 +815,13 @@ func TestListPeers(t *testing.T) {
|
||||
// Partial match in IDs
|
||||
nodes, err = db.ListPeers(1, types.NodeIDs{2, 3}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(nodes))
|
||||
assert.Len(t, nodes, 1)
|
||||
assert.Equal(t, "test2", nodes[0].Hostname)
|
||||
|
||||
// Several matched IDs, but node ID is still filtered out
|
||||
nodes, err = db.ListPeers(1, types.NodeIDs{1, 2, 3}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(nodes))
|
||||
assert.Len(t, nodes, 1)
|
||||
assert.Equal(t, "test2", nodes[0].Hostname)
|
||||
}
|
||||
|
||||
@@ -806,11 +865,11 @@ func TestListNodes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
_, err := RegisterNode(tx, node1, nil, nil)
|
||||
_, err := RegisterNodeForTest(tx, node1, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNode(tx, node2, nil, nil)
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
return err
|
||||
})
|
||||
@@ -824,14 +883,14 @@ func TestListNodes(t *testing.T) {
|
||||
// No parameter means no filter, should return all nodes
|
||||
nodes, err = db.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(nodes))
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Equal(t, "test1", nodes[0].Hostname)
|
||||
assert.Equal(t, "test2", nodes[1].Hostname)
|
||||
|
||||
// Empty node list should return all nodes
|
||||
nodes, err = db.ListNodes(types.NodeIDs{}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(nodes))
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Equal(t, "test1", nodes[0].Hostname)
|
||||
assert.Equal(t, "test2", nodes[1].Hostname)
|
||||
|
||||
@@ -843,13 +902,13 @@ func TestListNodes(t *testing.T) {
|
||||
// Partial match in IDs
|
||||
nodes, err = db.ListNodes(types.NodeIDs{2, 3}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(nodes))
|
||||
assert.Len(t, nodes, 1)
|
||||
assert.Equal(t, "test2", nodes[0].Hostname)
|
||||
|
||||
// Several matched IDs
|
||||
nodes, err = db.ListNodes(types.NodeIDs{1, 2, 3}...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(nodes))
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Equal(t, "test1", nodes[0].Hostname)
|
||||
assert.Equal(t, "test2", nodes[1].Hostname)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -47,8 +48,9 @@ func CreatePreAuthKey(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove duplicates
|
||||
// Remove duplicates and sort for consistency
|
||||
aclTags = set.SetOf(aclTags).Slice()
|
||||
slices.Sort(aclTags)
|
||||
|
||||
// TODO(kradalby): factor out and create a reusable tag validation,
|
||||
// check if there is one in Tailscale's lib.
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// Got from https://github.com/xdg-go/strum/blob/main/types.go
|
||||
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
|
||||
|
||||
func isTextUnmarshaler(rv reflect.Value) bool {
|
||||
return rv.Type().Implements(textUnmarshalerType)
|
||||
|
||||
@@ -198,19 +198,20 @@ func ListNodesByUser(tx *gorm.DB, uid types.UserID) (types.Nodes, error) {
|
||||
}
|
||||
|
||||
// AssignNodeToUser assigns a Node to a user.
|
||||
// Note: Validation should be done in the state layer before calling this function.
|
||||
func AssignNodeToUser(tx *gorm.DB, nodeID types.NodeID, uid types.UserID) error {
|
||||
node, err := GetNodeByID(tx, nodeID)
|
||||
if err != nil {
|
||||
return err
|
||||
// Check if the user exists
|
||||
var userExists bool
|
||||
if err := tx.Model(&types.User{}).Select("count(*) > 0").Where("id = ?", uid).Find(&userExists).Error; err != nil {
|
||||
return fmt.Errorf("failed to check if user exists: %w", err)
|
||||
}
|
||||
user, err := GetUserByID(tx, uid)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if !userExists {
|
||||
return ErrUserNotFound
|
||||
}
|
||||
node.User = *user
|
||||
node.UserID = user.ID
|
||||
if result := tx.Save(&node); result.Error != nil {
|
||||
return result.Error
|
||||
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("user_id", uid).Error; err != nil {
|
||||
return fmt.Errorf("failed to assign node to user: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -4,58 +4,83 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/arl/statsviz"
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsweb"
|
||||
)
|
||||
|
||||
func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
debugMux := http.NewServeMux()
|
||||
debug := tsweb.Debugger(debugMux)
|
||||
|
||||
// State overview endpoint
|
||||
debug.Handle("overview", "State overview", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
overview := h.state.DebugOverviewJSON()
|
||||
overviewJSON, err := json.MarshalIndent(overview, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(overviewJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
overview := h.state.DebugOverview()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(overview))
|
||||
}
|
||||
}))
|
||||
|
||||
// Configuration endpoint
|
||||
debug.Handle("config", "Current configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
config, err := json.MarshalIndent(h.cfg, "", " ")
|
||||
config := h.state.DebugConfig()
|
||||
configJSON, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(config)
|
||||
w.Write(configJSON)
|
||||
}))
|
||||
debug.Handle("policy", "Current policy", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch h.cfg.Policy.Mode {
|
||||
case types.PolicyModeDB:
|
||||
p, err := h.state.GetPolicy()
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(p.Data))
|
||||
case types.PolicyModeFile:
|
||||
// Read the file directly for debug purposes
|
||||
absPath := util.AbsolutePathFromConfigPath(h.cfg.Policy.Path)
|
||||
pol, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(pol)
|
||||
default:
|
||||
httpError(w, fmt.Errorf("unsupported policy mode: %s", h.cfg.Policy.Mode))
|
||||
}
|
||||
}))
|
||||
debug.Handle("filter", "Current filter", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
filter, _ := h.state.Filter()
|
||||
|
||||
// Policy endpoint
|
||||
debug.Handle("policy", "Current policy", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
policy, err := h.state.DebugPolicy()
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
// Policy data is HuJSON, which is a superset of JSON
|
||||
// Set content type based on Accept header preference
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
if strings.Contains(acceptHeader, "application/json") {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(policy))
|
||||
}))
|
||||
|
||||
// Filter rules endpoint
|
||||
debug.Handle("filter", "Current filter rules", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
filter, err := h.state.DebugFilter()
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
filterJSON, err := json.MarshalIndent(filter, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
@@ -65,25 +90,11 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(filterJSON)
|
||||
}))
|
||||
debug.Handle("ssh", "SSH Policy per node", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
nodes, err := h.state.ListNodes()
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
sshPol := make(map[string]*tailcfg.SSHPolicy)
|
||||
for _, node := range nodes {
|
||||
pol, err := h.state.SSHPolicy(node.View())
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
sshPol[fmt.Sprintf("id:%d hostname:%s givenname:%s", node.ID, node.Hostname, node.GivenName)] = pol
|
||||
}
|
||||
|
||||
sshJSON, err := json.MarshalIndent(sshPol, "", " ")
|
||||
// SSH policies endpoint
|
||||
debug.Handle("ssh", "SSH policies per node", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sshPolicies := h.state.DebugSSHPolicies()
|
||||
sshJSON, err := json.MarshalIndent(sshPolicies, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
@@ -92,33 +103,169 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(sshJSON)
|
||||
}))
|
||||
debug.Handle("derpmap", "Current DERPMap", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
dm := h.state.DERPMap()
|
||||
|
||||
dmJSON, err := json.MarshalIndent(dm, "", " ")
|
||||
// DERP map endpoint
|
||||
debug.Handle("derp", "DERP map configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
derpInfo := h.state.DebugDERPJSON()
|
||||
derpJSON, err := json.MarshalIndent(derpInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(derpJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
derpInfo := h.state.DebugDERPMap()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(derpInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
// NodeStore endpoint
|
||||
debug.Handle("nodestore", "NodeStore information", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
nodeStoreNodes := h.state.DebugNodeStoreJSON()
|
||||
nodeStoreJSON, err := json.MarshalIndent(nodeStoreNodes, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(nodeStoreJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
nodeStoreInfo := h.state.DebugNodeStore()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(nodeStoreInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
// Registration cache endpoint
|
||||
debug.Handle("registration-cache", "Registration cache information", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cacheInfo := h.state.DebugRegistrationCache()
|
||||
cacheJSON, err := json.MarshalIndent(cacheInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(dmJSON)
|
||||
w.Write(cacheJSON)
|
||||
}))
|
||||
debug.Handle("registration-cache", "Pending registrations", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO(kradalby): This should be replaced with a proper state method that returns registration info
|
||||
|
||||
// Routes endpoint
|
||||
debug.Handle("routes", "Primary routes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
routes := h.state.DebugRoutes()
|
||||
routesJSON, err := json.MarshalIndent(routes, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(routesJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
routes := h.state.DebugRoutesString()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(routes))
|
||||
}
|
||||
}))
|
||||
|
||||
// Policy manager endpoint
|
||||
debug.Handle("policy-manager", "Policy manager state", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
policyManagerInfo := h.state.DebugPolicyManagerJSON()
|
||||
policyManagerJSON, err := json.MarshalIndent(policyManagerInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(policyManagerJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
policyManagerInfo := h.state.DebugPolicyManager()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(policyManagerInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
debug.Handle("mapresponses", "Map responses for all nodes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
res, err := h.mapBatcher.DebugMapResponses()
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH not set"))
|
||||
return
|
||||
}
|
||||
|
||||
resJSON, err := json.MarshalIndent(res, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("{}")) // For now, return empty object
|
||||
w.Write(resJSON)
|
||||
}))
|
||||
debug.Handle("routes", "Routes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(h.state.PrimaryRoutesString()))
|
||||
}))
|
||||
debug.Handle("policy-manager", "Policy Manager", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(h.state.PolicyDebugString()))
|
||||
|
||||
// Batcher endpoint
|
||||
debug.Handle("batcher", "Batcher connected nodes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check Accept header to determine response format
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
wantsJSON := strings.Contains(acceptHeader, "application/json")
|
||||
|
||||
if wantsJSON {
|
||||
batcherInfo := h.debugBatcherJSON()
|
||||
|
||||
batcherJSON, err := json.MarshalIndent(batcherInfo, "", " ")
|
||||
if err != nil {
|
||||
httpError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(batcherJSON)
|
||||
} else {
|
||||
// Default to text/plain for backward compatibility
|
||||
batcherInfo := h.debugBatcher()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(batcherInfo))
|
||||
}
|
||||
}))
|
||||
|
||||
err := statsviz.Register(debugMux)
|
||||
@@ -138,3 +285,124 @@ func (h *Headscale) debugHTTPServer() *http.Server {
|
||||
|
||||
return debugHTTPServer
|
||||
}
|
||||
|
||||
// debugBatcher returns debug information about the batcher's connected nodes.
|
||||
func (h *Headscale) debugBatcher() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("=== Batcher Connected Nodes ===\n\n")
|
||||
|
||||
totalNodes := 0
|
||||
connectedCount := 0
|
||||
|
||||
// Collect nodes and sort them by ID
|
||||
type nodeStatus struct {
|
||||
id types.NodeID
|
||||
connected bool
|
||||
activeConnections int
|
||||
}
|
||||
|
||||
var nodes []nodeStatus
|
||||
|
||||
// Try to get detailed debug info if we have a LockFreeBatcher
|
||||
if batcher, ok := h.mapBatcher.(*mapper.LockFreeBatcher); ok {
|
||||
debugInfo := batcher.Debug()
|
||||
for nodeID, info := range debugInfo {
|
||||
nodes = append(nodes, nodeStatus{
|
||||
id: nodeID,
|
||||
connected: info.Connected,
|
||||
activeConnections: info.ActiveConnections,
|
||||
})
|
||||
totalNodes++
|
||||
if info.Connected {
|
||||
connectedCount++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback to basic connection info
|
||||
connectedMap := h.mapBatcher.ConnectedMap()
|
||||
connectedMap.Range(func(nodeID types.NodeID, connected bool) bool {
|
||||
nodes = append(nodes, nodeStatus{
|
||||
id: nodeID,
|
||||
connected: connected,
|
||||
activeConnections: 0,
|
||||
})
|
||||
totalNodes++
|
||||
if connected {
|
||||
connectedCount++
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by node ID
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
for j := i + 1; j < len(nodes); j++ {
|
||||
if nodes[i].id > nodes[j].id {
|
||||
nodes[i], nodes[j] = nodes[j], nodes[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output sorted nodes
|
||||
for _, node := range nodes {
|
||||
status := "disconnected"
|
||||
if node.connected {
|
||||
status = "connected"
|
||||
}
|
||||
|
||||
if node.activeConnections > 0 {
|
||||
sb.WriteString(fmt.Sprintf("Node %d:\t%s (%d connections)\n", node.id, status, node.activeConnections))
|
||||
} else {
|
||||
sb.WriteString(fmt.Sprintf("Node %d:\t%s\n", node.id, status))
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf("\nSummary: %d connected, %d total\n", connectedCount, totalNodes))
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// DebugBatcherInfo represents batcher connection information in a structured format.
|
||||
type DebugBatcherInfo struct {
|
||||
ConnectedNodes map[string]DebugBatcherNodeInfo `json:"connected_nodes"` // NodeID -> node connection info
|
||||
TotalNodes int `json:"total_nodes"`
|
||||
}
|
||||
|
||||
// DebugBatcherNodeInfo represents connection information for a single node.
|
||||
type DebugBatcherNodeInfo struct {
|
||||
Connected bool `json:"connected"`
|
||||
ActiveConnections int `json:"active_connections"`
|
||||
}
|
||||
|
||||
// debugBatcherJSON returns structured debug information about the batcher's connected nodes.
|
||||
func (h *Headscale) debugBatcherJSON() DebugBatcherInfo {
|
||||
info := DebugBatcherInfo{
|
||||
ConnectedNodes: make(map[string]DebugBatcherNodeInfo),
|
||||
TotalNodes: 0,
|
||||
}
|
||||
|
||||
// Try to get detailed debug info if we have a LockFreeBatcher
|
||||
if batcher, ok := h.mapBatcher.(*mapper.LockFreeBatcher); ok {
|
||||
debugInfo := batcher.Debug()
|
||||
for nodeID, debugData := range debugInfo {
|
||||
info.ConnectedNodes[fmt.Sprintf("%d", nodeID)] = DebugBatcherNodeInfo{
|
||||
Connected: debugData.Connected,
|
||||
ActiveConnections: debugData.ActiveConnections,
|
||||
}
|
||||
info.TotalNodes++
|
||||
}
|
||||
} else {
|
||||
// Fallback to basic connection info
|
||||
connectedMap := h.mapBatcher.ConnectedMap()
|
||||
connectedMap.Range(func(nodeID types.NodeID, connected bool) bool {
|
||||
info.ConnectedNodes[fmt.Sprintf("%d", nodeID)] = DebugBatcherNodeInfo{
|
||||
Connected: connected,
|
||||
ActiveConnections: 0,
|
||||
}
|
||||
info.TotalNodes++
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
package derp
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v3"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
@@ -76,56 +82,88 @@ func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap {
|
||||
maps.Copy(result.Regions, derpMap.Regions)
|
||||
}
|
||||
|
||||
for id, region := range result.Regions {
|
||||
if region == nil {
|
||||
delete(result.Regions, id)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func GetDERPMap(cfg types.DERPConfig) *tailcfg.DERPMap {
|
||||
func GetDERPMap(cfg types.DERPConfig) (*tailcfg.DERPMap, error) {
|
||||
var derpMaps []*tailcfg.DERPMap
|
||||
if cfg.DERPMap != nil {
|
||||
derpMaps = append(derpMaps, cfg.DERPMap)
|
||||
}
|
||||
|
||||
for _, path := range cfg.Paths {
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Msg("Loading DERPMap from path")
|
||||
derpMap, err := loadDERPMapFromPath(path)
|
||||
for _, addr := range cfg.URLs {
|
||||
derpMap, err := loadDERPMapFromURL(addr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
for _, addr := range cfg.URLs {
|
||||
derpMap, err := loadDERPMapFromURL(addr)
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Msg("Loading DERPMap from path")
|
||||
for _, path := range cfg.Paths {
|
||||
derpMap, err := loadDERPMapFromPath(path)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
derpMap := mergeDERPMaps(derpMaps)
|
||||
shuffleDERPMap(derpMap)
|
||||
|
||||
log.Trace().Interface("derpMap", derpMap).Msg("DERPMap loaded")
|
||||
|
||||
return derpMap
|
||||
return derpMap, nil
|
||||
}
|
||||
|
||||
func shuffleDERPMap(dm *tailcfg.DERPMap) {
|
||||
if dm == nil || len(dm.Regions) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for id, region := range dm.Regions {
|
||||
if len(region.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
dm.Regions[id] = shuffleRegionNoClone(region)
|
||||
}
|
||||
}
|
||||
|
||||
var crc64Table = crc64.MakeTable(crc64.ISO)
|
||||
|
||||
var (
|
||||
derpRandomOnce sync.Once
|
||||
derpRandomInst *rand.Rand
|
||||
derpRandomMu sync.Mutex
|
||||
)
|
||||
|
||||
func derpRandom() *rand.Rand {
|
||||
derpRandomMu.Lock()
|
||||
defer derpRandomMu.Unlock()
|
||||
|
||||
derpRandomOnce.Do(func() {
|
||||
seed := cmp.Or(viper.GetString("dns.base_domain"), time.Now().String())
|
||||
rnd := rand.New(rand.NewSource(0))
|
||||
rnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table)))
|
||||
derpRandomInst = rnd
|
||||
})
|
||||
return derpRandomInst
|
||||
}
|
||||
|
||||
func resetDerpRandomForTesting() {
|
||||
derpRandomMu.Lock()
|
||||
defer derpRandomMu.Unlock()
|
||||
derpRandomOnce = sync.Once{}
|
||||
derpRandomInst = nil
|
||||
}
|
||||
|
||||
func shuffleRegionNoClone(r *tailcfg.DERPRegion) *tailcfg.DERPRegion {
|
||||
derpRandom().Shuffle(len(r.Nodes), reflect.Swapper(r.Nodes))
|
||||
return r
|
||||
}
|
||||
|
||||
283
hscontrol/derp/derp_test.go
Normal file
283
hscontrol/derp/derp_test.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package derp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/spf13/viper"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
baseDomain string
|
||||
derpMap *tailcfg.DERPMap
|
||||
expected *tailcfg.DERPMap
|
||||
}{
|
||||
{
|
||||
name: "single region with 4 nodes",
|
||||
baseDomain: "test1.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "nyc",
|
||||
RegionName: "New York City",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "1f", RegionID: 1, HostName: "derp1f.tailscale.com"},
|
||||
{Name: "1g", RegionID: 1, HostName: "derp1g.tailscale.com"},
|
||||
{Name: "1h", RegionID: 1, HostName: "derp1h.tailscale.com"},
|
||||
{Name: "1i", RegionID: 1, HostName: "derp1i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "nyc",
|
||||
RegionName: "New York City",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "1g", RegionID: 1, HostName: "derp1g.tailscale.com"},
|
||||
{Name: "1f", RegionID: 1, HostName: "derp1f.tailscale.com"},
|
||||
{Name: "1i", RegionID: 1, HostName: "derp1i.tailscale.com"},
|
||||
{Name: "1h", RegionID: 1, HostName: "derp1h.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple regions with nodes",
|
||||
baseDomain: "test2.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
10: {
|
||||
RegionID: 10,
|
||||
RegionCode: "sea",
|
||||
RegionName: "Seattle",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
RegionID: 2,
|
||||
RegionCode: "sfo",
|
||||
RegionName: "San Francisco",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
10: {
|
||||
RegionID: 10,
|
||||
RegionCode: "sea",
|
||||
RegionName: "Seattle",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
RegionID: 2,
|
||||
RegionCode: "sfo",
|
||||
RegionName: "San Francisco",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "large region with many nodes",
|
||||
baseDomain: "test3.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same region different base domain",
|
||||
baseDomain: "different.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
viper.Set("dns.base_domain", tt.baseDomain)
|
||||
defer viper.Reset()
|
||||
resetDerpRandomForTesting()
|
||||
|
||||
testMap := tt.derpMap.View().AsStruct()
|
||||
shuffleDERPMap(testMap)
|
||||
|
||||
if diff := cmp.Diff(tt.expected, testMap); diff != "" {
|
||||
t.Errorf("Shuffled DERP map doesn't match expected (-expected +actual):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestShuffleDERPMapEdgeCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
derpMap *tailcfg.DERPMap
|
||||
}{
|
||||
{
|
||||
name: "nil derp map",
|
||||
derpMap: nil,
|
||||
},
|
||||
{
|
||||
name: "empty derp map",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "region with no nodes",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "empty",
|
||||
RegionName: "Empty Region",
|
||||
Nodes: []*tailcfg.DERPNode{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "region with single node",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "single",
|
||||
RegionName: "Single Node Region",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "1a", RegionID: 1, HostName: "derp1a.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
shuffleDERPMap(tt.derpMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffleDERPMapWithoutBaseDomain(t *testing.T) {
|
||||
viper.Reset()
|
||||
resetDerpRandomForTesting()
|
||||
|
||||
derpMap := &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "test",
|
||||
RegionName: "Test Region",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "1a", RegionID: 1, HostName: "derp1a.test.com"},
|
||||
{Name: "1b", RegionID: 1, HostName: "derp1b.test.com"},
|
||||
{Name: "1c", RegionID: 1, HostName: "derp1c.test.com"},
|
||||
{Name: "1d", RegionID: 1, HostName: "derp1d.test.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
original := derpMap.View().AsStruct()
|
||||
shuffleDERPMap(derpMap)
|
||||
|
||||
if len(derpMap.Regions) != 1 || len(derpMap.Regions[1].Nodes) != 4 {
|
||||
t.Error("Shuffle corrupted DERP map structure")
|
||||
}
|
||||
|
||||
originalNodes := make(map[string]bool)
|
||||
for _, node := range original.Regions[1].Nodes {
|
||||
originalNodes[node.Name] = true
|
||||
}
|
||||
|
||||
shuffledNodes := make(map[string]bool)
|
||||
for _, node := range derpMap.Regions[1].Nodes {
|
||||
shuffledNodes[node.Name] = true
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(originalNodes, shuffledNodes); diff != "" {
|
||||
t.Errorf("Shuffle changed node set (-original +shuffled):\n%s", diff)
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/net/wsconn"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -35,6 +36,11 @@ const (
|
||||
DerpVerifyScheme = "headscale-derp-verify"
|
||||
)
|
||||
|
||||
// debugUseDERPIP is a debug-only flag that causes the DERP server to resolve
|
||||
// hostnames to IP addresses when generating the DERP region configuration.
|
||||
// This is useful for integration testing where DNS resolution may be unreliable.
|
||||
var debugUseDERPIP = envknob.Bool("HEADSCALE_DEBUG_DERP_USE_IP")
|
||||
|
||||
type DERPServer struct {
|
||||
serverURL string
|
||||
key key.NodePrivate
|
||||
@@ -70,7 +76,10 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
||||
}
|
||||
var host string
|
||||
var port int
|
||||
host, portStr, err := net.SplitHostPort(serverURL.Host)
|
||||
var portStr string
|
||||
|
||||
// Extract hostname and port from URL
|
||||
host, portStr, err = net.SplitHostPort(serverURL.Host)
|
||||
if err != nil {
|
||||
if serverURL.Scheme == "https" {
|
||||
host = serverURL.Host
|
||||
@@ -86,6 +95,19 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If debug flag is set, resolve hostname to IP address
|
||||
if debugUseDERPIP {
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Failed to resolve DERP hostname %s to IP, using hostname", host)
|
||||
} else if len(ips) > 0 {
|
||||
// Use the first IP address
|
||||
ipStr := ips[0].String()
|
||||
log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: Resolved %s to %s", host, ipStr)
|
||||
host = ipStr
|
||||
}
|
||||
}
|
||||
|
||||
localDERPregion := tailcfg.DERPRegion{
|
||||
RegionID: d.cfg.ServerRegionID,
|
||||
RegionCode: d.cfg.ServerRegionCode,
|
||||
@@ -139,7 +161,7 @@ func (d *DERPServer) DERPHandler(
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
@@ -177,7 +199,7 @@ func (d *DERPServer) serveWebsocket(writer http.ResponseWriter, req *http.Reques
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
@@ -207,7 +229,7 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
@@ -223,7 +245,7 @@ func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
@@ -262,7 +284,7 @@ func DERPProbeHandler(
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -276,7 +298,7 @@ func DERPProbeHandler(
|
||||
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
||||
// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL.
|
||||
func DERPBootstrapDNSHandler(
|
||||
derpMap *tailcfg.DERPMap,
|
||||
derpMap tailcfg.DERPMapView,
|
||||
) func(http.ResponseWriter, *http.Request) {
|
||||
return func(
|
||||
writer http.ResponseWriter,
|
||||
@@ -287,18 +309,18 @@ func DERPBootstrapDNSHandler(
|
||||
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
|
||||
defer cancel()
|
||||
var resolver net.Resolver
|
||||
for _, region := range derpMap.Regions {
|
||||
for _, node := range region.Nodes { // we don't care if we override some nodes
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName)
|
||||
for _, region := range derpMap.Regions().All() {
|
||||
for _, node := range region.Nodes().All() { // we don't care if we override some nodes
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName())
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msgf("bootstrap DNS lookup failed %q", node.HostName)
|
||||
Msgf("bootstrap DNS lookup failed %q", node.HostName())
|
||||
|
||||
continue
|
||||
}
|
||||
dnsEntries[node.HostName] = addrs
|
||||
dnsEntries[node.HostName()] = addrs
|
||||
}
|
||||
}
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
@@ -308,7 +330,7 @@ func DERPBootstrapDNSHandler(
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/views"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/state"
|
||||
@@ -59,9 +59,10 @@ func (api headscaleV1APIServer) CreateUser(
|
||||
return nil, status.Errorf(codes.Internal, "failed to create user: %s", err)
|
||||
}
|
||||
|
||||
|
||||
c := change.UserAdded(types.UserID(user.ID))
|
||||
if policyChanged {
|
||||
|
||||
// TODO(kradalby): Both of these might be policy changes, find a better way to merge.
|
||||
if !policyChanged.Empty() {
|
||||
c.Change = change.Policy
|
||||
}
|
||||
|
||||
@@ -79,15 +80,13 @@ func (api headscaleV1APIServer) RenameUser(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, policyChanged, err := api.h.state.RenameUser(types.UserID(oldUser.ID), request.GetNewName())
|
||||
_, c, err := api.h.state.RenameUser(types.UserID(oldUser.ID), request.GetNewName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
if policyChanged {
|
||||
api.h.Change(change.PolicyChange())
|
||||
}
|
||||
api.h.Change(c)
|
||||
|
||||
newUser, err := api.h.state.GetUserByName(request.GetNewName())
|
||||
if err != nil {
|
||||
@@ -238,6 +237,7 @@ func (api headscaleV1APIServer) RegisterNode(
|
||||
request *v1.RegisterNodeRequest,
|
||||
) (*v1.RegisterNodeResponse, error) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("user", request.GetUser()).
|
||||
Str("registration_id", request.GetKey()).
|
||||
Msg("Registering node")
|
||||
@@ -288,17 +288,13 @@ func (api headscaleV1APIServer) GetNode(
|
||||
ctx context.Context,
|
||||
request *v1.GetNodeRequest,
|
||||
) (*v1.GetNodeResponse, error) {
|
||||
node, err := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
node, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "node not found")
|
||||
}
|
||||
|
||||
resp := node.Proto()
|
||||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = api.h.mapBatcher.IsConnected(node.ID)
|
||||
|
||||
return &v1.GetNodeResponse{Node: resp}, nil
|
||||
}
|
||||
|
||||
@@ -323,7 +319,8 @@ func (api headscaleV1APIServer) SetTags(
|
||||
api.h.Change(nodeChange)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
Strs("tags", request.GetTags()).
|
||||
Msg("Changing tags of node")
|
||||
|
||||
@@ -334,7 +331,13 @@ func (api headscaleV1APIServer) SetApprovedRoutes(
|
||||
ctx context.Context,
|
||||
request *v1.SetApprovedRoutesRequest,
|
||||
) (*v1.SetApprovedRoutesResponse, error) {
|
||||
var routes []netip.Prefix
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", request.GetNodeId()).
|
||||
Strs("requestedRoutes", request.GetRoutes()).
|
||||
Msg("gRPC SetApprovedRoutes called")
|
||||
|
||||
var newApproved []netip.Prefix
|
||||
for _, route := range request.GetRoutes() {
|
||||
prefix, err := netip.ParsePrefix(route)
|
||||
if err != nil {
|
||||
@@ -344,31 +347,35 @@ func (api headscaleV1APIServer) SetApprovedRoutes(
|
||||
// If the prefix is an exit route, add both. The client expect both
|
||||
// to annotate the node as an exit node.
|
||||
if prefix == tsaddr.AllIPv4() || prefix == tsaddr.AllIPv6() {
|
||||
routes = append(routes, tsaddr.AllIPv4(), tsaddr.AllIPv6())
|
||||
newApproved = append(newApproved, tsaddr.AllIPv4(), tsaddr.AllIPv6())
|
||||
} else {
|
||||
routes = append(routes, prefix)
|
||||
newApproved = append(newApproved, prefix)
|
||||
}
|
||||
}
|
||||
tsaddr.SortPrefixes(routes)
|
||||
routes = slices.Compact(routes)
|
||||
tsaddr.SortPrefixes(newApproved)
|
||||
newApproved = slices.Compact(newApproved)
|
||||
|
||||
node, nodeChange, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), routes)
|
||||
node, nodeChange, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), newApproved)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
routeChange := api.h.state.SetNodeRoutes(node.ID, node.SubnetRoutes()...)
|
||||
|
||||
// Always propagate node changes from SetApprovedRoutes
|
||||
api.h.Change(nodeChange)
|
||||
|
||||
// If routes changed, propagate those changes too
|
||||
if !routeChange.Empty() {
|
||||
api.h.Change(routeChange)
|
||||
}
|
||||
|
||||
proto := node.Proto()
|
||||
proto.SubnetRoutes = util.PrefixesToString(api.h.state.GetNodePrimaryRoutes(node.ID))
|
||||
// Populate SubnetRoutes with PrimaryRoutes to ensure it includes only the
|
||||
// routes that are actively served from the node (per architectural requirement in types/node.go)
|
||||
primaryRoutes := api.h.state.GetNodePrimaryRoutes(node.ID())
|
||||
proto.SubnetRoutes = util.PrefixesToString(primaryRoutes)
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Strs("approvedRoutes", util.PrefixesToString(node.ApprovedRoutes().AsSlice())).
|
||||
Strs("primaryRoutes", util.PrefixesToString(primaryRoutes)).
|
||||
Strs("finalSubnetRoutes", proto.SubnetRoutes).
|
||||
Msg("gRPC SetApprovedRoutes completed")
|
||||
|
||||
return &v1.SetApprovedRoutesResponse{Node: proto}, nil
|
||||
}
|
||||
@@ -390,9 +397,9 @@ func (api headscaleV1APIServer) DeleteNode(
|
||||
ctx context.Context,
|
||||
request *v1.DeleteNodeRequest,
|
||||
) (*v1.DeleteNodeResponse, error) {
|
||||
node, err := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
node, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "node not found")
|
||||
}
|
||||
|
||||
nodeChange, err := api.h.state.DeleteNode(node)
|
||||
@@ -420,8 +427,9 @@ func (api headscaleV1APIServer) ExpireNode(
|
||||
api.h.Change(nodeChange)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
Time("expiry", *node.Expiry).
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
Time("expiry", *node.AsStruct().Expiry).
|
||||
Msg("node expired")
|
||||
|
||||
return &v1.ExpireNodeResponse{Node: node.Proto()}, nil
|
||||
@@ -440,7 +448,8 @@ func (api headscaleV1APIServer) RenameNode(
|
||||
api.h.Change(nodeChange)
|
||||
|
||||
log.Trace().
|
||||
Str("node", node.Hostname).
|
||||
Caller().
|
||||
Str("node", node.Hostname()).
|
||||
Str("new_name", request.GetNewName()).
|
||||
Msg("node renamed")
|
||||
|
||||
@@ -455,58 +464,45 @@ func (api headscaleV1APIServer) ListNodes(
|
||||
// the filtering of nodes by user, vs nodes as a whole can
|
||||
// probably be done once.
|
||||
// TODO(kradalby): This should be done in one tx.
|
||||
|
||||
IsConnected := api.h.mapBatcher.ConnectedMap()
|
||||
if request.GetUser() != "" {
|
||||
user, err := api.h.state.GetUserByName(request.GetUser())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes, err := api.h.state.ListNodesByUser(types.UserID(user.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes := api.h.state.ListNodesByUser(types.UserID(user.ID))
|
||||
|
||||
response := nodesToProto(api.h.state, IsConnected, nodes)
|
||||
response := nodesToProto(api.h.state, nodes)
|
||||
return &v1.ListNodesResponse{Nodes: response}, nil
|
||||
}
|
||||
|
||||
nodes, err := api.h.state.ListNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes := api.h.state.ListNodes()
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
return nodes[i].ID < nodes[j].ID
|
||||
})
|
||||
|
||||
response := nodesToProto(api.h.state, IsConnected, nodes)
|
||||
response := nodesToProto(api.h.state, nodes)
|
||||
return &v1.ListNodesResponse{Nodes: response}, nil
|
||||
}
|
||||
|
||||
func nodesToProto(state *state.State, IsConnected *xsync.MapOf[types.NodeID, bool], nodes types.Nodes) []*v1.Node {
|
||||
response := make([]*v1.Node, len(nodes))
|
||||
for index, node := range nodes {
|
||||
func nodesToProto(state *state.State, nodes views.Slice[types.NodeView]) []*v1.Node {
|
||||
response := make([]*v1.Node, nodes.Len())
|
||||
for index, node := range nodes.All() {
|
||||
resp := node.Proto()
|
||||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
if val, ok := IsConnected.Load(node.ID); ok && val {
|
||||
resp.Online = true
|
||||
}
|
||||
|
||||
var tags []string
|
||||
for _, tag := range node.RequestTags() {
|
||||
if state.NodeCanHaveTag(node.View(), tag) {
|
||||
if state.NodeCanHaveTag(node, tag) {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
resp.ValidTags = lo.Uniq(append(tags, node.ForcedTags...))
|
||||
resp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID), node.ExitRoutes()...))
|
||||
resp.ValidTags = lo.Uniq(append(tags, node.ForcedTags().AsSlice()...))
|
||||
|
||||
resp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID()), node.ExitRoutes()...))
|
||||
response[index] = resp
|
||||
}
|
||||
|
||||
sort.Slice(response, func(i, j int) bool {
|
||||
return response[i].Id < response[j].Id
|
||||
})
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
@@ -530,7 +526,7 @@ func (api headscaleV1APIServer) BackfillNodeIPs(
|
||||
ctx context.Context,
|
||||
request *v1.BackfillNodeIPsRequest,
|
||||
) (*v1.BackfillNodeIPsResponse, error) {
|
||||
log.Trace().Msg("Backfill called")
|
||||
log.Trace().Caller().Msg("Backfill called")
|
||||
|
||||
if !request.Confirmed {
|
||||
return nil, errors.New("not confirmed, aborting")
|
||||
@@ -674,17 +670,15 @@ func (api headscaleV1APIServer) SetPolicy(
|
||||
// a scenario where they might be allowed if the server has no nodes
|
||||
// yet, but it should help for the general case and for hot reloading
|
||||
// configurations.
|
||||
nodes, err := api.h.state.ListNodes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading nodes from database to validate policy: %w", err)
|
||||
}
|
||||
changed, err := api.h.state.SetPolicy([]byte(p))
|
||||
nodes := api.h.state.ListNodes()
|
||||
|
||||
_, err := api.h.state.SetPolicy([]byte(p))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting policy: %w", err)
|
||||
}
|
||||
|
||||
if len(nodes) > 0 {
|
||||
_, err = api.h.state.SSHPolicy(nodes[0].View())
|
||||
if nodes.Len() > 0 {
|
||||
_, err = api.h.state.SSHPolicy(nodes.At(0))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("verifying SSH rules: %w", err)
|
||||
}
|
||||
@@ -695,14 +689,20 @@ func (api headscaleV1APIServer) SetPolicy(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only send update if the packet filter has changed.
|
||||
if changed {
|
||||
err = api.h.state.AutoApproveNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Always reload policy to ensure route re-evaluation, even if policy content hasn't changed.
|
||||
// This ensures that routes are re-evaluated for auto-approval in cases where routes
|
||||
// were manually disabled but could now be auto-approved with the current policy.
|
||||
cs, err := api.h.state.ReloadPolicy()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reloading policy: %w", err)
|
||||
}
|
||||
|
||||
api.h.Change(change.PolicyChange())
|
||||
if len(cs) > 0 {
|
||||
api.h.Change(cs...)
|
||||
} else {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Msg("No policy changes to distribute because ReloadPolicy returned empty changeset")
|
||||
}
|
||||
|
||||
response := &v1.SetPolicyResponse{
|
||||
@@ -710,6 +710,10 @@ func (api headscaleV1APIServer) SetPolicy(
|
||||
UpdatedAt: timestamppb.New(updated.UpdatedAt),
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Msg("gRPC SetPolicy completed successfully because response prepared")
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -732,12 +736,12 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
Caller().
|
||||
Interface("route-prefix", routes).
|
||||
Interface("route-str", request.GetRoutes()).
|
||||
Msg("")
|
||||
Msg("Creating routes for node")
|
||||
|
||||
hostinfo := tailcfg.Hostinfo{
|
||||
RoutableIPs: routes,
|
||||
OS: "TestOS",
|
||||
Hostname: "DebugTestNode",
|
||||
Hostname: request.GetName(),
|
||||
}
|
||||
|
||||
registrationId, err := types.RegistrationIDFromString(request.GetKey())
|
||||
@@ -745,8 +749,8 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newNode := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
newNode := types.NewRegisterNode(
|
||||
types.Node{
|
||||
NodeKey: key.NewNode().Public(),
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
Hostname: request.GetName(),
|
||||
@@ -757,10 +761,10 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
|
||||
Hostinfo: &hostinfo,
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
}
|
||||
)
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("registration_id", registrationId.String()).
|
||||
Msg("adding debug machine via CLI, appending to registration cache")
|
||||
|
||||
@@ -769,4 +773,24 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
return &v1.DebugCreateNodeResponse{Node: newNode.Node.Proto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) Health(
|
||||
ctx context.Context,
|
||||
request *v1.HealthRequest,
|
||||
) (*v1.HealthResponse, error) {
|
||||
var healthErr error
|
||||
response := &v1.HealthResponse{}
|
||||
|
||||
if err := api.h.state.PingDB(ctx); err != nil {
|
||||
healthErr = fmt.Errorf("database ping failed: %w", err)
|
||||
} else {
|
||||
response.DatabaseConnectivity = true
|
||||
}
|
||||
|
||||
if healthErr != nil {
|
||||
log.Error().Err(healthErr).Msg("Health check failed")
|
||||
}
|
||||
|
||||
return response, healthErr
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
|
||||
@@ -91,16 +91,22 @@ func (h *Headscale) handleVerifyRequest(
|
||||
|
||||
var derpAdmitClientRequest tailcfg.DERPAdmitClientRequest
|
||||
if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil {
|
||||
return fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err)
|
||||
return NewHTTPError(http.StatusBadRequest, "Bad Request: invalid JSON", fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err))
|
||||
}
|
||||
|
||||
nodes, err := h.state.ListNodes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list nodes: %w", err)
|
||||
nodes := h.state.ListNodes()
|
||||
|
||||
// Check if any node has the requested NodeKey
|
||||
var nodeKeyFound bool
|
||||
for _, node := range nodes.All() {
|
||||
if node.NodeKey() == derpAdmitClientRequest.NodePublic {
|
||||
nodeKeyFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
resp := &tailcfg.DERPAdmitClientResponse{
|
||||
Allow: nodes.ContainsNodeKey(derpAdmitClientRequest.NodePublic),
|
||||
Allow: nodeKeyFound,
|
||||
}
|
||||
|
||||
return json.NewEncoder(writer).Encode(resp)
|
||||
@@ -180,6 +186,21 @@ func (h *Headscale) HealthHandler(
|
||||
respond(nil)
|
||||
}
|
||||
|
||||
func (h *Headscale) RobotsHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write([]byte("User-agent: *\nDisallow: /"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
}
|
||||
|
||||
var codeStyleRegisterWebAPI = styles.Props{
|
||||
styles.Display: "block",
|
||||
styles.Padding: "20px",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -18,12 +20,13 @@ type batcherFunc func(cfg *types.Config, state *state.State) Batcher
|
||||
type Batcher interface {
|
||||
Start()
|
||||
Close()
|
||||
AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, isRouter bool, version tailcfg.CapabilityVersion) error
|
||||
RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse, isRouter bool)
|
||||
AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error
|
||||
RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool
|
||||
IsConnected(id types.NodeID) bool
|
||||
ConnectedMap() *xsync.Map[types.NodeID, bool]
|
||||
AddWork(c change.ChangeSet)
|
||||
AddWork(c ...change.ChangeSet)
|
||||
MapResponseFromChange(id types.NodeID, c change.ChangeSet) (*tailcfg.MapResponse, error)
|
||||
DebugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error)
|
||||
}
|
||||
|
||||
func NewBatcher(batchTime time.Duration, workers int, mapper *mapper) *LockFreeBatcher {
|
||||
@@ -34,7 +37,7 @@ func NewBatcher(batchTime time.Duration, workers int, mapper *mapper) *LockFreeB
|
||||
|
||||
// The size of this channel is arbitrary chosen, the sizing should be revisited.
|
||||
workCh: make(chan work, workers*200),
|
||||
nodes: xsync.NewMap[types.NodeID, *nodeConn](),
|
||||
nodes: xsync.NewMap[types.NodeID, *multiChannelNodeConn](),
|
||||
connected: xsync.NewMap[types.NodeID, *time.Time](),
|
||||
pendingChanges: xsync.NewMap[types.NodeID, []change.ChangeSet](),
|
||||
}
|
||||
@@ -45,6 +48,7 @@ func NewBatcherAndMapper(cfg *types.Config, state *state.State) Batcher {
|
||||
m := newMapper(cfg, state)
|
||||
b := NewBatcher(cfg.Tuning.BatchChangeDelay, cfg.Tuning.BatcherWorkers, m)
|
||||
m.batcher = b
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -70,8 +74,10 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
|
||||
return nil, fmt.Errorf("mapper is nil for nodeID %d", nodeID)
|
||||
}
|
||||
|
||||
var mapResp *tailcfg.MapResponse
|
||||
var err error
|
||||
var (
|
||||
mapResp *tailcfg.MapResponse
|
||||
err error
|
||||
)
|
||||
|
||||
switch c.Change {
|
||||
case change.DERP:
|
||||
@@ -82,20 +88,46 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
|
||||
// TODO(kradalby): This can potentially be a peer update of the old and new subnet router.
|
||||
mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
} else {
|
||||
// Trust the change type for online/offline status to avoid race conditions
|
||||
// between NodeStore updates and change processing
|
||||
onlineStatus := c.Change == change.NodeCameOnline
|
||||
|
||||
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: c.NodeID.NodeID(),
|
||||
Online: ptr.To(c.Change == change.NodeCameOnline),
|
||||
Online: ptr.To(onlineStatus),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
case change.NodeNewOrUpdate:
|
||||
mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
// If the node is the one being updated, we send a self update that preserves peer information
|
||||
// to ensure the node sees changes to its own properties (e.g., hostname/DNS name changes)
|
||||
// without losing its view of peer status during rapid reconnection cycles
|
||||
if c.IsSelfUpdate(nodeID) {
|
||||
mapResp, err = mapper.selfMapResponse(nodeID, version)
|
||||
} else {
|
||||
mapResp, err = mapper.peerChangeResponse(nodeID, version, c.NodeID)
|
||||
}
|
||||
|
||||
case change.NodeRemove:
|
||||
mapResp, err = mapper.peerRemovedResponse(nodeID, c.NodeID)
|
||||
|
||||
case change.NodeKeyExpiry:
|
||||
// If the node is the one whose key is expiring, we send a "full" self update
|
||||
// as nodes will ignore patch updates about themselves (?).
|
||||
if c.IsSelfUpdate(nodeID) {
|
||||
mapResp, err = mapper.selfMapResponse(nodeID, version)
|
||||
// mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
} else {
|
||||
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: c.NodeID.NodeID(),
|
||||
KeyExpiry: c.NodeExpiry,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
default:
|
||||
// The following will always hit this:
|
||||
// change.Full, change.Policy
|
||||
@@ -119,11 +151,16 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
|
||||
// handleNodeChange generates and sends a [tailcfg.MapResponse] for a given node and [change.ChangeSet].
|
||||
func handleNodeChange(nc nodeConnection, mapper *mapper, c change.ChangeSet) error {
|
||||
if nc == nil {
|
||||
return fmt.Errorf("nodeConnection is nil")
|
||||
return errors.New("nodeConnection is nil")
|
||||
}
|
||||
|
||||
nodeID := nc.nodeID()
|
||||
data, err := generateMapResponse(nodeID, nc.version(), mapper, c)
|
||||
|
||||
log.Debug().Caller().Uint64("node.id", nodeID.Uint64()).Str("change.type", c.Change.String()).Msg("Node change processing started because change notification received")
|
||||
|
||||
var data *tailcfg.MapResponse
|
||||
var err error
|
||||
data, err = generateMapResponse(nodeID, nc.version(), mapper, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating map response for node %d: %w", nodeID, err)
|
||||
}
|
||||
@@ -134,7 +171,8 @@ func handleNodeChange(nc nodeConnection, mapper *mapper, c change.ChangeSet) err
|
||||
}
|
||||
|
||||
// Send the map response
|
||||
if err := nc.send(data); err != nil {
|
||||
err = nc.send(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sending map response to node %d: %w", nodeID, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package mapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -21,8 +22,7 @@ type LockFreeBatcher struct {
|
||||
mapper *mapper
|
||||
workers int
|
||||
|
||||
// Lock-free concurrent maps
|
||||
nodes *xsync.Map[types.NodeID, *nodeConn]
|
||||
nodes *xsync.Map[types.NodeID, *multiChannelNodeConn]
|
||||
connected *xsync.Map[types.NodeID, *time.Time]
|
||||
|
||||
// Work queue channel
|
||||
@@ -32,7 +32,6 @@ type LockFreeBatcher struct {
|
||||
|
||||
// Batching state
|
||||
pendingChanges *xsync.Map[types.NodeID, []change.ChangeSet]
|
||||
batchMutex sync.RWMutex
|
||||
|
||||
// Metrics
|
||||
totalNodes atomic.Int64
|
||||
@@ -45,87 +44,105 @@ type LockFreeBatcher struct {
|
||||
// AddNode registers a new node connection with the batcher and sends an initial map response.
|
||||
// It creates or updates the node's connection data, validates the initial map generation,
|
||||
// and notifies other nodes that this node has come online.
|
||||
// TODO(kradalby): See if we can move the isRouter argument somewhere else.
|
||||
func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, isRouter bool, version tailcfg.CapabilityVersion) error {
|
||||
// First validate that we can generate initial map before doing anything else
|
||||
fullSelfChange := change.FullSelf(id)
|
||||
func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error {
|
||||
addNodeStart := time.Now()
|
||||
|
||||
// Generate connection ID
|
||||
connID := generateConnectionID()
|
||||
|
||||
// Create new connection entry
|
||||
now := time.Now()
|
||||
newEntry := &connectionEntry{
|
||||
id: connID,
|
||||
c: c,
|
||||
version: version,
|
||||
created: now,
|
||||
}
|
||||
// Initialize last used timestamp
|
||||
newEntry.lastUsed.Store(now.Unix())
|
||||
|
||||
// Get or create multiChannelNodeConn - this reuses existing offline nodes for rapid reconnection
|
||||
nodeConn, loaded := b.nodes.LoadOrStore(id, newMultiChannelNodeConn(id, b.mapper))
|
||||
|
||||
if !loaded {
|
||||
b.totalNodes.Add(1)
|
||||
}
|
||||
|
||||
// Add connection to the list (lock-free)
|
||||
nodeConn.addConnection(newEntry)
|
||||
|
||||
// Use the worker pool for controlled concurrency instead of direct generation
|
||||
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
|
||||
|
||||
// TODO(kradalby): This should not be generated here, but rather in MapResponseFromChange.
|
||||
// This currently means that the goroutine for the node connection will do the processing
|
||||
// which means that we might have uncontrolled concurrency.
|
||||
// When we use MapResponseFromChange, it will be processed by the same worker pool, causing
|
||||
// it to be processed in a more controlled manner.
|
||||
initialMap, err := generateMapResponse(id, version, b.mapper, fullSelfChange)
|
||||
if err != nil {
|
||||
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
|
||||
nodeConn.removeConnectionByChannel(c)
|
||||
return fmt.Errorf("failed to generate initial map for node %d: %w", id, err)
|
||||
}
|
||||
|
||||
// Only after validation succeeds, create or update node connection
|
||||
newConn := newNodeConn(id, c, version, b.mapper)
|
||||
|
||||
var conn *nodeConn
|
||||
if existing, loaded := b.nodes.LoadOrStore(id, newConn); loaded {
|
||||
// Update existing connection
|
||||
existing.updateConnection(c, version)
|
||||
conn = existing
|
||||
} else {
|
||||
b.totalNodes.Add(1)
|
||||
conn = newConn
|
||||
// Use a blocking send with timeout for initial map since the channel should be ready
|
||||
// and we want to avoid the race condition where the receiver isn't ready yet
|
||||
select {
|
||||
case c <- initialMap:
|
||||
// Success
|
||||
case <-time.After(5 * time.Second):
|
||||
log.Error().Uint64("node.id", id.Uint64()).Err(fmt.Errorf("timeout")).Msg("Initial map send timeout")
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("timeout.duration", 5*time.Second).
|
||||
Msg("Initial map send timed out because channel was blocked or receiver not ready")
|
||||
nodeConn.removeConnectionByChannel(c)
|
||||
return fmt.Errorf("failed to send initial map to node %d: timeout", id)
|
||||
}
|
||||
|
||||
// Mark as connected only after validation succeeds
|
||||
// Update connection status
|
||||
b.connected.Store(id, nil) // nil = connected
|
||||
|
||||
log.Info().Uint64("node.id", id.Uint64()).Bool("isRouter", isRouter).Msg("Node connected to batcher")
|
||||
// Node will automatically receive updates through the normal flow
|
||||
// The initial full map already contains all current state
|
||||
|
||||
// Send the validated initial map
|
||||
if initialMap != nil {
|
||||
if err := conn.send(initialMap); err != nil {
|
||||
// Clean up the connection state on send failure
|
||||
b.nodes.Delete(id)
|
||||
b.connected.Delete(id)
|
||||
return fmt.Errorf("failed to send initial map to node %d: %w", id, err)
|
||||
}
|
||||
|
||||
// Notify other nodes that this node came online
|
||||
b.addWork(change.ChangeSet{NodeID: id, Change: change.NodeCameOnline, IsSubnetRouter: isRouter})
|
||||
}
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("total.duration", time.Since(addNodeStart)).
|
||||
Int("active.connections", nodeConn.getActiveConnectionCount()).
|
||||
Msg("Node connection established in batcher because AddNode completed successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveNode disconnects a node from the batcher, marking it as offline and cleaning up its state.
|
||||
// It validates the connection channel matches the current one, closes the connection,
|
||||
// and notifies other nodes that this node has gone offline.
|
||||
func (b *LockFreeBatcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse, isRouter bool) {
|
||||
// Check if this is the current connection and mark it as closed
|
||||
if existing, ok := b.nodes.Load(id); ok {
|
||||
if !existing.matchesChannel(c) {
|
||||
log.Debug().Uint64("node.id", id.Uint64()).Msg("RemoveNode called for non-current connection, ignoring")
|
||||
return // Not the current connection, not an error
|
||||
}
|
||||
|
||||
// Mark the connection as closed to prevent further sends
|
||||
if connData := existing.connData.Load(); connData != nil {
|
||||
connData.closed.Store(true)
|
||||
}
|
||||
// It validates the connection channel matches one of the current connections, closes that specific connection,
|
||||
// and keeps the node entry alive for rapid reconnections instead of aggressive deletion.
|
||||
// Reports if the node still has active connections after removal.
|
||||
func (b *LockFreeBatcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {
|
||||
nodeConn, exists := b.nodes.Load(id)
|
||||
if !exists {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode called for non-existent node because node not found in batcher")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().Uint64("node.id", id.Uint64()).Bool("isRouter", isRouter).Msg("Node disconnected from batcher, marking as offline")
|
||||
// Remove specific connection
|
||||
removed := nodeConn.removeConnectionByChannel(c)
|
||||
if !removed {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode: channel not found because connection already removed or invalid")
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove node and mark disconnected atomically
|
||||
b.nodes.Delete(id)
|
||||
// Check if node has any remaining active connections
|
||||
if nodeConn.hasActiveConnections() {
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).
|
||||
Int("active.connections", nodeConn.getActiveConnectionCount()).
|
||||
Msg("Node connection removed but keeping online because other connections remain")
|
||||
return true // Node still has active connections
|
||||
}
|
||||
|
||||
// No active connections - keep the node entry alive for rapid reconnections
|
||||
// The node will get a fresh full map when it reconnects
|
||||
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("Node disconnected from batcher because all connections removed, keeping entry for rapid reconnection")
|
||||
b.connected.Store(id, ptr.To(time.Now()))
|
||||
b.totalNodes.Add(-1)
|
||||
|
||||
// Notify other nodes that this node went offline
|
||||
b.addWork(change.ChangeSet{NodeID: id, Change: change.NodeWentOffline, IsSubnetRouter: isRouter})
|
||||
return false
|
||||
}
|
||||
|
||||
// AddWork queues a change to be processed by the batcher.
|
||||
// Critical changes are processed immediately, while others are batched for efficiency.
|
||||
func (b *LockFreeBatcher) AddWork(c change.ChangeSet) {
|
||||
b.addWork(c)
|
||||
func (b *LockFreeBatcher) AddWork(c ...change.ChangeSet) {
|
||||
b.addWork(c...)
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) Start() {
|
||||
@@ -136,41 +153,57 @@ func (b *LockFreeBatcher) Start() {
|
||||
func (b *LockFreeBatcher) Close() {
|
||||
if b.cancel != nil {
|
||||
b.cancel()
|
||||
b.cancel = nil
|
||||
}
|
||||
close(b.workCh)
|
||||
|
||||
// Only close workCh once
|
||||
select {
|
||||
case <-b.workCh:
|
||||
// Channel is already closed
|
||||
default:
|
||||
close(b.workCh)
|
||||
}
|
||||
|
||||
// Close the underlying channels supplying the data to the clients.
|
||||
b.nodes.Range(func(nodeID types.NodeID, conn *multiChannelNodeConn) bool {
|
||||
conn.close()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) doWork() {
|
||||
log.Debug().Msg("batcher doWork loop started")
|
||||
defer log.Debug().Msg("batcher doWork loop stopped")
|
||||
|
||||
for i := range b.workers {
|
||||
go b.worker(i + 1)
|
||||
}
|
||||
|
||||
// Create a cleanup ticker for removing truly disconnected nodes
|
||||
cleanupTicker := time.NewTicker(5 * time.Minute)
|
||||
defer cleanupTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-b.tick.C:
|
||||
// Process batched changes
|
||||
b.processBatchedChanges()
|
||||
case <-cleanupTicker.C:
|
||||
// Clean up nodes that have been offline for too long
|
||||
b.cleanupOfflineNodes()
|
||||
case <-b.ctx.Done():
|
||||
log.Info().Msg("batcher context done, stopping to feed workers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) worker(workerID int) {
|
||||
log.Debug().Int("workerID", workerID).Msg("batcher worker started")
|
||||
defer log.Debug().Int("workerID", workerID).Msg("batcher worker stopped")
|
||||
|
||||
for {
|
||||
select {
|
||||
case w, ok := <-b.workCh:
|
||||
if !ok {
|
||||
log.Debug().Int("worker.id", workerID).Msgf("worker channel closing, shutting down worker %d", workerID)
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
b.workProcessed.Add(1)
|
||||
|
||||
// If the resultCh is set, it means that this is a work request
|
||||
@@ -180,20 +213,23 @@ func (b *LockFreeBatcher) worker(workerID int) {
|
||||
if w.resultCh != nil {
|
||||
var result workResult
|
||||
if nc, exists := b.nodes.Load(w.nodeID); exists {
|
||||
result.mapResponse, result.err = generateMapResponse(nc.nodeID(), nc.version(), b.mapper, w.c)
|
||||
var err error
|
||||
result.mapResponse, err = generateMapResponse(nc.nodeID(), nc.version(), b.mapper, w.c)
|
||||
result.err = err
|
||||
if result.err != nil {
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(result.err).
|
||||
Int("workerID", workerID).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Msg("failed to generate map response for synchronous work")
|
||||
}
|
||||
} else {
|
||||
result.err = fmt.Errorf("node %d not found", w.nodeID)
|
||||
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(result.err).
|
||||
Int("workerID", workerID).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Msg("node not found for synchronous work")
|
||||
}
|
||||
@@ -205,15 +241,6 @@ func (b *LockFreeBatcher) worker(workerID int) {
|
||||
return
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
if duration > 100*time.Millisecond {
|
||||
log.Warn().
|
||||
Int("workerID", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Dur("duration", duration).
|
||||
Msg("slow synchronous work processing")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -221,71 +248,30 @@ func (b *LockFreeBatcher) worker(workerID int) {
|
||||
// that should be processed and sent to the node instead of
|
||||
// returned to the caller.
|
||||
if nc, exists := b.nodes.Load(w.nodeID); exists {
|
||||
// Check if this connection is still active before processing
|
||||
if connData := nc.connData.Load(); connData != nil && connData.closed.Load() {
|
||||
log.Debug().
|
||||
Int("workerID", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Msg("skipping work for closed connection")
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply change to node - this will handle offline nodes gracefully
|
||||
// and queue work for when they reconnect
|
||||
err := nc.change(w.c)
|
||||
if err != nil {
|
||||
b.workErrors.Add(1)
|
||||
log.Error().Err(err).
|
||||
Int("workerID", workerID).
|
||||
Int("worker.id", workerID).
|
||||
Uint64("node.id", w.c.NodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Msg("failed to apply change")
|
||||
}
|
||||
} else {
|
||||
log.Debug().
|
||||
Int("workerID", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Msg("node not found for asynchronous work - node may have disconnected")
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
if duration > 100*time.Millisecond {
|
||||
log.Warn().
|
||||
Int("workerID", workerID).
|
||||
Uint64("node.id", w.nodeID.Uint64()).
|
||||
Str("change", w.c.Change.String()).
|
||||
Dur("duration", duration).
|
||||
Msg("slow asynchronous work processing")
|
||||
}
|
||||
|
||||
case <-b.ctx.Done():
|
||||
log.Debug().Int("workder.id", workerID).Msg("batcher context is done, exiting worker")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) addWork(c change.ChangeSet) {
|
||||
// For critical changes that need immediate processing, send directly
|
||||
if b.shouldProcessImmediately(c) {
|
||||
if c.SelfUpdateOnly {
|
||||
b.queueWork(work{c: c, nodeID: c.NodeID, resultCh: nil})
|
||||
return
|
||||
}
|
||||
b.nodes.Range(func(nodeID types.NodeID, _ *nodeConn) bool {
|
||||
if c.NodeID == nodeID && !c.AlsoSelf() {
|
||||
return true
|
||||
}
|
||||
b.queueWork(work{c: c, nodeID: nodeID, resultCh: nil})
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// For non-critical changes, add to batch
|
||||
b.addToBatch(c)
|
||||
func (b *LockFreeBatcher) addWork(c ...change.ChangeSet) {
|
||||
b.addToBatch(c...)
|
||||
}
|
||||
|
||||
// queueWork safely queues work
|
||||
// queueWork safely queues work.
|
||||
func (b *LockFreeBatcher) queueWork(w work) {
|
||||
b.workQueuedCount.Add(1)
|
||||
|
||||
@@ -298,46 +284,42 @@ func (b *LockFreeBatcher) queueWork(w work) {
|
||||
}
|
||||
}
|
||||
|
||||
// shouldProcessImmediately determines if a change should bypass batching
|
||||
func (b *LockFreeBatcher) shouldProcessImmediately(c change.ChangeSet) bool {
|
||||
// Process these changes immediately to avoid delaying critical functionality
|
||||
switch c.Change {
|
||||
case change.Full, change.NodeRemove, change.NodeCameOnline, change.NodeWentOffline, change.Policy:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
// addToBatch adds a change to the pending batch.
|
||||
func (b *LockFreeBatcher) addToBatch(c ...change.ChangeSet) {
|
||||
// Short circuit if any of the changes is a full update, which
|
||||
// means we can skip sending individual changes.
|
||||
if change.HasFull(c) {
|
||||
b.nodes.Range(func(nodeID types.NodeID, _ *multiChannelNodeConn) bool {
|
||||
b.pendingChanges.Store(nodeID, []change.ChangeSet{{Change: change.Full}})
|
||||
|
||||
// addToBatch adds a change to the pending batch
|
||||
func (b *LockFreeBatcher) addToBatch(c change.ChangeSet) {
|
||||
b.batchMutex.Lock()
|
||||
defer b.batchMutex.Unlock()
|
||||
|
||||
if c.SelfUpdateOnly {
|
||||
changes, _ := b.pendingChanges.LoadOrStore(c.NodeID, []change.ChangeSet{})
|
||||
changes = append(changes, c)
|
||||
b.pendingChanges.Store(c.NodeID, changes)
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
b.nodes.Range(func(nodeID types.NodeID, _ *nodeConn) bool {
|
||||
if c.NodeID == nodeID && !c.AlsoSelf() {
|
||||
return true
|
||||
}
|
||||
all, self := change.SplitAllAndSelf(c)
|
||||
|
||||
for _, changeSet := range self {
|
||||
changes, _ := b.pendingChanges.LoadOrStore(changeSet.NodeID, []change.ChangeSet{})
|
||||
changes = append(changes, changeSet)
|
||||
b.pendingChanges.Store(changeSet.NodeID, changes)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
b.nodes.Range(func(nodeID types.NodeID, _ *multiChannelNodeConn) bool {
|
||||
rel := change.RemoveUpdatesForSelf(nodeID, all)
|
||||
|
||||
changes, _ := b.pendingChanges.LoadOrStore(nodeID, []change.ChangeSet{})
|
||||
changes = append(changes, c)
|
||||
changes = append(changes, rel...)
|
||||
b.pendingChanges.Store(nodeID, changes)
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// processBatchedChanges processes all pending batched changes
|
||||
// processBatchedChanges processes all pending batched changes.
|
||||
func (b *LockFreeBatcher) processBatchedChanges() {
|
||||
b.batchMutex.Lock()
|
||||
defer b.batchMutex.Unlock()
|
||||
|
||||
if b.pendingChanges == nil {
|
||||
return
|
||||
}
|
||||
@@ -355,16 +337,69 @@ func (b *LockFreeBatcher) processBatchedChanges() {
|
||||
|
||||
// Clear the pending changes for this node
|
||||
b.pendingChanges.Delete(nodeID)
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// IsConnected is lock-free read.
|
||||
func (b *LockFreeBatcher) IsConnected(id types.NodeID) bool {
|
||||
if val, ok := b.connected.Load(id); ok {
|
||||
// nil means connected
|
||||
return val == nil
|
||||
// cleanupOfflineNodes removes nodes that have been offline for too long to prevent memory leaks.
|
||||
// TODO(kradalby): reevaluate if we want to keep this.
|
||||
func (b *LockFreeBatcher) cleanupOfflineNodes() {
|
||||
cleanupThreshold := 15 * time.Minute
|
||||
now := time.Now()
|
||||
|
||||
var nodesToCleanup []types.NodeID
|
||||
|
||||
// Find nodes that have been offline for too long
|
||||
b.connected.Range(func(nodeID types.NodeID, disconnectTime *time.Time) bool {
|
||||
if disconnectTime != nil && now.Sub(*disconnectTime) > cleanupThreshold {
|
||||
// Double-check the node doesn't have active connections
|
||||
if nodeConn, exists := b.nodes.Load(nodeID); exists {
|
||||
if !nodeConn.hasActiveConnections() {
|
||||
nodesToCleanup = append(nodesToCleanup, nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Clean up the identified nodes
|
||||
for _, nodeID := range nodesToCleanup {
|
||||
log.Info().Uint64("node.id", nodeID.Uint64()).
|
||||
Dur("offline_duration", cleanupThreshold).
|
||||
Msg("Cleaning up node that has been offline for too long")
|
||||
|
||||
b.nodes.Delete(nodeID)
|
||||
b.connected.Delete(nodeID)
|
||||
b.totalNodes.Add(-1)
|
||||
}
|
||||
|
||||
if len(nodesToCleanup) > 0 {
|
||||
log.Info().Int("cleaned_nodes", len(nodesToCleanup)).
|
||||
Msg("Completed cleanup of long-offline nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected is lock-free read that checks if a node has any active connections.
|
||||
func (b *LockFreeBatcher) IsConnected(id types.NodeID) bool {
|
||||
// First check if we have active connections for this node
|
||||
if nodeConn, exists := b.nodes.Load(id); exists {
|
||||
if nodeConn.hasActiveConnections() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check disconnected timestamp with grace period
|
||||
val, ok := b.connected.Load(id)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// nil means connected
|
||||
if val == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -372,9 +407,26 @@ func (b *LockFreeBatcher) IsConnected(id types.NodeID) bool {
|
||||
func (b *LockFreeBatcher) ConnectedMap() *xsync.Map[types.NodeID, bool] {
|
||||
ret := xsync.NewMap[types.NodeID, bool]()
|
||||
|
||||
// First, add all nodes with active connections
|
||||
b.nodes.Range(func(id types.NodeID, nodeConn *multiChannelNodeConn) bool {
|
||||
if nodeConn.hasActiveConnections() {
|
||||
ret.Store(id, true)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Then add all entries from the connected map
|
||||
b.connected.Range(func(id types.NodeID, val *time.Time) bool {
|
||||
// nil means connected
|
||||
ret.Store(id, val == nil)
|
||||
// Only add if not already added as connected above
|
||||
if _, exists := ret.Load(id); !exists {
|
||||
if val == nil {
|
||||
// nil means connected
|
||||
ret.Store(id, true)
|
||||
} else {
|
||||
// timestamp means disconnected
|
||||
ret.Store(id, false)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -398,94 +450,269 @@ func (b *LockFreeBatcher) MapResponseFromChange(id types.NodeID, c change.Change
|
||||
}
|
||||
}
|
||||
|
||||
// connectionData holds the channel and connection parameters.
|
||||
type connectionData struct {
|
||||
c chan<- *tailcfg.MapResponse
|
||||
version tailcfg.CapabilityVersion
|
||||
closed atomic.Bool // Track if this connection has been closed
|
||||
// connectionEntry represents a single connection to a node.
|
||||
type connectionEntry struct {
|
||||
id string // unique connection ID
|
||||
c chan<- *tailcfg.MapResponse
|
||||
version tailcfg.CapabilityVersion
|
||||
created time.Time
|
||||
lastUsed atomic.Int64 // Unix timestamp of last successful send
|
||||
}
|
||||
|
||||
// nodeConn described the node connection and its associated data.
|
||||
type nodeConn struct {
|
||||
// multiChannelNodeConn manages multiple concurrent connections for a single node.
|
||||
type multiChannelNodeConn struct {
|
||||
id types.NodeID
|
||||
mapper *mapper
|
||||
|
||||
// Atomic pointer to connection data - allows lock-free updates
|
||||
connData atomic.Pointer[connectionData]
|
||||
mutex sync.RWMutex
|
||||
connections []*connectionEntry
|
||||
|
||||
updateCount atomic.Int64
|
||||
}
|
||||
|
||||
func newNodeConn(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion, mapper *mapper) *nodeConn {
|
||||
nc := &nodeConn{
|
||||
// generateConnectionID generates a unique connection identifier.
|
||||
func generateConnectionID() string {
|
||||
bytes := make([]byte, 8)
|
||||
rand.Read(bytes)
|
||||
return fmt.Sprintf("%x", bytes)
|
||||
}
|
||||
|
||||
// newMultiChannelNodeConn creates a new multi-channel node connection.
|
||||
func newMultiChannelNodeConn(id types.NodeID, mapper *mapper) *multiChannelNodeConn {
|
||||
return &multiChannelNodeConn{
|
||||
id: id,
|
||||
mapper: mapper,
|
||||
}
|
||||
|
||||
// Initialize connection data
|
||||
data := &connectionData{
|
||||
c: c,
|
||||
version: version,
|
||||
}
|
||||
nc.connData.Store(data)
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
// updateConnection atomically updates connection parameters.
|
||||
func (nc *nodeConn) updateConnection(c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) {
|
||||
newData := &connectionData{
|
||||
c: c,
|
||||
version: version,
|
||||
func (mc *multiChannelNodeConn) close() {
|
||||
mc.mutex.Lock()
|
||||
defer mc.mutex.Unlock()
|
||||
|
||||
for _, conn := range mc.connections {
|
||||
close(conn.c)
|
||||
}
|
||||
nc.connData.Store(newData)
|
||||
}
|
||||
|
||||
// matchesChannel checks if the given channel matches current connection.
|
||||
func (nc *nodeConn) matchesChannel(c chan<- *tailcfg.MapResponse) bool {
|
||||
data := nc.connData.Load()
|
||||
// addConnection adds a new connection.
|
||||
func (mc *multiChannelNodeConn) addConnection(entry *connectionEntry) {
|
||||
mutexWaitStart := time.Now()
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
|
||||
Msg("addConnection: waiting for mutex - POTENTIAL CONTENTION POINT")
|
||||
|
||||
mc.mutex.Lock()
|
||||
mutexWaitDur := time.Since(mutexWaitStart)
|
||||
defer mc.mutex.Unlock()
|
||||
|
||||
mc.connections = append(mc.connections, entry)
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
|
||||
Int("total_connections", len(mc.connections)).
|
||||
Dur("mutex_wait_time", mutexWaitDur).
|
||||
Msg("Successfully added connection after mutex wait")
|
||||
}
|
||||
|
||||
// removeConnectionByChannel removes a connection by matching channel pointer.
|
||||
func (mc *multiChannelNodeConn) removeConnectionByChannel(c chan<- *tailcfg.MapResponse) bool {
|
||||
mc.mutex.Lock()
|
||||
defer mc.mutex.Unlock()
|
||||
|
||||
for i, entry := range mc.connections {
|
||||
if entry.c == c {
|
||||
// Remove this connection
|
||||
mc.connections = append(mc.connections[:i], mc.connections[i+1:]...)
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", c)).
|
||||
Int("remaining_connections", len(mc.connections)).
|
||||
Msg("Successfully removed connection")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasActiveConnections checks if the node has any active connections.
|
||||
func (mc *multiChannelNodeConn) hasActiveConnections() bool {
|
||||
mc.mutex.RLock()
|
||||
defer mc.mutex.RUnlock()
|
||||
|
||||
return len(mc.connections) > 0
|
||||
}
|
||||
|
||||
// getActiveConnectionCount returns the number of active connections.
|
||||
func (mc *multiChannelNodeConn) getActiveConnectionCount() int {
|
||||
mc.mutex.RLock()
|
||||
defer mc.mutex.RUnlock()
|
||||
|
||||
return len(mc.connections)
|
||||
}
|
||||
|
||||
// send broadcasts data to all active connections for the node.
|
||||
func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
|
||||
if data == nil {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
// Compare channel pointers directly
|
||||
return data.c == c
|
||||
|
||||
mc.mutex.Lock()
|
||||
defer mc.mutex.Unlock()
|
||||
|
||||
if len(mc.connections) == 0 {
|
||||
// During rapid reconnection, nodes may temporarily have no active connections
|
||||
// This is not an error - the node will receive a full map when it reconnects
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
Msg("send: skipping send to node with no active connections (likely rapid reconnection)")
|
||||
return nil // Return success instead of error
|
||||
}
|
||||
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
Int("total_connections", len(mc.connections)).
|
||||
Msg("send: broadcasting to all connections")
|
||||
|
||||
var lastErr error
|
||||
successCount := 0
|
||||
var failedConnections []int // Track failed connections for removal
|
||||
|
||||
// Send to all connections
|
||||
for i, conn := range mc.connections {
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
Msg("send: attempting to send to connection")
|
||||
|
||||
if err := conn.send(data); err != nil {
|
||||
lastErr = err
|
||||
failedConnections = append(failedConnections, i)
|
||||
log.Warn().Err(err).
|
||||
Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
Msg("send: connection send failed")
|
||||
} else {
|
||||
successCount++
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
|
||||
Str("conn.id", conn.id).Int("connection_index", i).
|
||||
Msg("send: successfully sent to connection")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove failed connections (in reverse order to maintain indices)
|
||||
for i := len(failedConnections) - 1; i >= 0; i-- {
|
||||
idx := failedConnections[i]
|
||||
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
|
||||
Str("conn.id", mc.connections[idx].id).
|
||||
Msg("send: removing failed connection")
|
||||
mc.connections = append(mc.connections[:idx], mc.connections[idx+1:]...)
|
||||
}
|
||||
|
||||
mc.updateCount.Add(1)
|
||||
|
||||
log.Info().Uint64("node.id", mc.id.Uint64()).
|
||||
Int("successful_sends", successCount).
|
||||
Int("failed_connections", len(failedConnections)).
|
||||
Int("remaining_connections", len(mc.connections)).
|
||||
Msg("send: completed broadcast")
|
||||
|
||||
// Success if at least one send succeeded
|
||||
if successCount > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("node %d: all connections failed, last error: %w", mc.id, lastErr)
|
||||
}
|
||||
|
||||
// compressAndVersion atomically reads connection settings.
|
||||
func (nc *nodeConn) version() tailcfg.CapabilityVersion {
|
||||
data := nc.connData.Load()
|
||||
// send sends data to a single connection entry with timeout-based stale connection detection.
|
||||
func (entry *connectionEntry) send(data *tailcfg.MapResponse) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use a short timeout to detect stale connections where the client isn't reading the channel.
|
||||
// This is critical for detecting Docker containers that are forcefully terminated
|
||||
// but still have channels that appear open.
|
||||
select {
|
||||
case entry.c <- data:
|
||||
// Update last used timestamp on successful send
|
||||
entry.lastUsed.Store(time.Now().Unix())
|
||||
return nil
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
// Connection is likely stale - client isn't reading from channel
|
||||
// This catches the case where Docker containers are killed but channels remain open
|
||||
return fmt.Errorf("connection %s: timeout sending to channel (likely stale connection)", entry.id)
|
||||
}
|
||||
}
|
||||
|
||||
// nodeID returns the node ID.
|
||||
func (mc *multiChannelNodeConn) nodeID() types.NodeID {
|
||||
return mc.id
|
||||
}
|
||||
|
||||
// version returns the capability version from the first active connection.
|
||||
// All connections for a node should have the same version in practice.
|
||||
func (mc *multiChannelNodeConn) version() tailcfg.CapabilityVersion {
|
||||
mc.mutex.RLock()
|
||||
defer mc.mutex.RUnlock()
|
||||
|
||||
if len(mc.connections) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return data.version
|
||||
return mc.connections[0].version
|
||||
}
|
||||
|
||||
func (nc *nodeConn) nodeID() types.NodeID {
|
||||
return nc.id
|
||||
// change applies a change to all active connections for the node.
|
||||
func (mc *multiChannelNodeConn) change(c change.ChangeSet) error {
|
||||
return handleNodeChange(mc, mc.mapper, c)
|
||||
}
|
||||
|
||||
func (nc *nodeConn) change(c change.ChangeSet) error {
|
||||
return handleNodeChange(nc, nc.mapper, c)
|
||||
// DebugNodeInfo contains debug information about a node's connections.
|
||||
type DebugNodeInfo struct {
|
||||
Connected bool `json:"connected"`
|
||||
ActiveConnections int `json:"active_connections"`
|
||||
}
|
||||
|
||||
// send sends data to the node's channel.
|
||||
// The node will pick it up and send it to the HTTP handler.
|
||||
func (nc *nodeConn) send(data *tailcfg.MapResponse) error {
|
||||
connData := nc.connData.Load()
|
||||
if connData == nil {
|
||||
return fmt.Errorf("node %d: no connection data", nc.id)
|
||||
}
|
||||
// Debug returns a pre-baked map of node debug information for the debug interface.
|
||||
func (b *LockFreeBatcher) Debug() map[types.NodeID]DebugNodeInfo {
|
||||
result := make(map[types.NodeID]DebugNodeInfo)
|
||||
|
||||
// Check if connection has been closed
|
||||
if connData.closed.Load() {
|
||||
return fmt.Errorf("node %d: connection closed", nc.id)
|
||||
}
|
||||
// Get all nodes with their connection status using immediate connection logic
|
||||
// (no grace period) for debug purposes
|
||||
b.nodes.Range(func(id types.NodeID, nodeConn *multiChannelNodeConn) bool {
|
||||
nodeConn.mutex.RLock()
|
||||
activeConnCount := len(nodeConn.connections)
|
||||
nodeConn.mutex.RUnlock()
|
||||
|
||||
// TODO(kradalby): We might need some sort of timeout here if the client is not reading
|
||||
// the channel. That might mean that we are sending to a node that has gone offline, but
|
||||
// the channel is still open.
|
||||
connData.c <- data
|
||||
nc.updateCount.Add(1)
|
||||
return nil
|
||||
// Use immediate connection status: if active connections exist, node is connected
|
||||
// If not, check the connected map for nil (connected) vs timestamp (disconnected)
|
||||
connected := false
|
||||
if activeConnCount > 0 {
|
||||
connected = true
|
||||
} else {
|
||||
// Check connected map for immediate status
|
||||
if val, ok := b.connected.Load(id); ok && val == nil {
|
||||
connected = true
|
||||
}
|
||||
}
|
||||
|
||||
result[id] = DebugNodeInfo{
|
||||
Connected: connected,
|
||||
ActiveConnections: activeConnCount,
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Add all entries from the connected map to capture both connected and disconnected nodes
|
||||
b.connected.Range(func(id types.NodeID, val *time.Time) bool {
|
||||
// Only add if not already processed above
|
||||
if _, exists := result[id]; !exists {
|
||||
// Use immediate connection status for debug (no grace period)
|
||||
connected := (val == nil) // nil means connected, timestamp means disconnected
|
||||
result[id] = DebugNodeInfo{
|
||||
Connected: connected,
|
||||
ActiveConnections: 0,
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (b *LockFreeBatcher) DebugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
|
||||
return b.mapper.debugMapResponses()
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,42 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/multierr"
|
||||
)
|
||||
|
||||
// MapResponseBuilder provides a fluent interface for building tailcfg.MapResponse
|
||||
// MapResponseBuilder provides a fluent interface for building tailcfg.MapResponse.
|
||||
type MapResponseBuilder struct {
|
||||
resp *tailcfg.MapResponse
|
||||
mapper *mapper
|
||||
nodeID types.NodeID
|
||||
capVer tailcfg.CapabilityVersion
|
||||
errs []error
|
||||
|
||||
debugType debugType
|
||||
}
|
||||
|
||||
// NewMapResponseBuilder creates a new builder with basic fields set
|
||||
type debugType string
|
||||
|
||||
const (
|
||||
fullResponseDebug debugType = "full"
|
||||
selfResponseDebug debugType = "self"
|
||||
patchResponseDebug debugType = "patch"
|
||||
removeResponseDebug debugType = "remove"
|
||||
changeResponseDebug debugType = "change"
|
||||
derpResponseDebug debugType = "derp"
|
||||
)
|
||||
|
||||
// NewMapResponseBuilder creates a new builder with basic fields set.
|
||||
func (m *mapper) NewMapResponseBuilder(nodeID types.NodeID) *MapResponseBuilder {
|
||||
now := time.Now()
|
||||
return &MapResponseBuilder{
|
||||
@@ -35,37 +50,37 @@ func (m *mapper) NewMapResponseBuilder(nodeID types.NodeID) *MapResponseBuilder
|
||||
}
|
||||
}
|
||||
|
||||
// addError adds an error to the builder's error list
|
||||
// addError adds an error to the builder's error list.
|
||||
func (b *MapResponseBuilder) addError(err error) {
|
||||
if err != nil {
|
||||
b.errs = append(b.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// hasErrors returns true if the builder has accumulated any errors
|
||||
// hasErrors returns true if the builder has accumulated any errors.
|
||||
func (b *MapResponseBuilder) hasErrors() bool {
|
||||
return len(b.errs) > 0
|
||||
}
|
||||
|
||||
// WithCapabilityVersion sets the capability version for the response
|
||||
// WithCapabilityVersion sets the capability version for the response.
|
||||
func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVersion) *MapResponseBuilder {
|
||||
b.capVer = capVer
|
||||
return b
|
||||
}
|
||||
|
||||
// WithSelfNode adds the requesting node to the response
|
||||
// WithSelfNode adds the requesting node to the response.
|
||||
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
_, matchers := b.mapper.state.Filter()
|
||||
tailnode, err := tailNode(
|
||||
node.View(), b.capVer, b.mapper.state,
|
||||
nv, b.capVer, b.mapper.state,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(node.View(), b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
@@ -74,29 +89,38 @@ func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
}
|
||||
|
||||
b.resp.Node = tailnode
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDERPMap adds the DERP map to the response
|
||||
func (b *MapResponseBuilder) WithDebugType(t debugType) *MapResponseBuilder {
|
||||
if debugDumpMapResponsePath != "" {
|
||||
b.debugType = t
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDERPMap adds the DERP map to the response.
|
||||
func (b *MapResponseBuilder) WithDERPMap() *MapResponseBuilder {
|
||||
b.resp.DERPMap = b.mapper.state.DERPMap()
|
||||
b.resp.DERPMap = b.mapper.state.DERPMap().AsStruct()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDomain adds the domain configuration
|
||||
// WithDomain adds the domain configuration.
|
||||
func (b *MapResponseBuilder) WithDomain() *MapResponseBuilder {
|
||||
b.resp.Domain = b.mapper.cfg.Domain()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithCollectServicesDisabled sets the collect services flag to false
|
||||
// WithCollectServicesDisabled sets the collect services flag to false.
|
||||
func (b *MapResponseBuilder) WithCollectServicesDisabled() *MapResponseBuilder {
|
||||
b.resp.CollectServices.Set(false)
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDebugConfig adds debug configuration
|
||||
// It disables log tailing if the mapper's LogTail is not enabled
|
||||
// It disables log tailing if the mapper's LogTail is not enabled.
|
||||
func (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {
|
||||
b.resp.Debug = &tailcfg.Debug{
|
||||
DisableLogTail: !b.mapper.cfg.LogTail.Enabled,
|
||||
@@ -104,72 +128,78 @@ func (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
// WithSSHPolicy adds SSH policy configuration for the requesting node
|
||||
// WithSSHPolicy adds SSH policy configuration for the requesting node.
|
||||
func (b *MapResponseBuilder) WithSSHPolicy() *MapResponseBuilder {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
sshPolicy, err := b.mapper.state.SSHPolicy(node.View())
|
||||
sshPolicy, err := b.mapper.state.SSHPolicy(node)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
return b
|
||||
}
|
||||
|
||||
b.resp.SSHPolicy = sshPolicy
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDNSConfig adds DNS configuration for the requesting node
|
||||
// WithDNSConfig adds DNS configuration for the requesting node.
|
||||
func (b *MapResponseBuilder) WithDNSConfig() *MapResponseBuilder {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
b.resp.DNSConfig = generateDNSConfig(b.mapper.cfg, node)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithUserProfiles adds user profiles for the requesting node and given peers
|
||||
func (b *MapResponseBuilder) WithUserProfiles(peers types.Nodes) *MapResponseBuilder {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
// WithUserProfiles adds user profiles for the requesting node and given peers.
|
||||
func (b *MapResponseBuilder) WithUserProfiles(peers views.Slice[types.NodeView]) *MapResponseBuilder {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
b.resp.UserProfiles = generateUserProfiles(node, peers)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithPacketFilters adds packet filter rules based on policy
|
||||
// WithPacketFilters adds packet filter rules based on policy.
|
||||
func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
filter, err := b.mapper.state.FilterForNode(node)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
return b
|
||||
}
|
||||
|
||||
filter, _ := b.mapper.state.Filter()
|
||||
|
||||
// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
|
||||
// Currently, we do not send incremental package filters, however using the
|
||||
// new PacketFilters field and "base" allows us to send a full update when we
|
||||
// have to send an empty list, avoiding the hack in the else block.
|
||||
b.resp.PacketFilters = map[string][]tailcfg.FilterRule{
|
||||
"base": policy.ReduceFilterRules(node.View(), filter),
|
||||
"base": policy.ReduceFilterRules(node, filter),
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithPeers adds full peer list with policy filtering (for full map response)
|
||||
func (b *MapResponseBuilder) WithPeers(peers types.Nodes) *MapResponseBuilder {
|
||||
|
||||
// WithPeers adds full peer list with policy filtering (for full map response).
|
||||
func (b *MapResponseBuilder) WithPeers(peers views.Slice[types.NodeView]) *MapResponseBuilder {
|
||||
tailPeers, err := b.buildTailPeers(peers)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
@@ -177,12 +207,12 @@ func (b *MapResponseBuilder) WithPeers(peers types.Nodes) *MapResponseBuilder {
|
||||
}
|
||||
|
||||
b.resp.Peers = tailPeers
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// WithPeerChanges adds changed peers with policy filtering (for incremental updates)
|
||||
func (b *MapResponseBuilder) WithPeerChanges(peers types.Nodes) *MapResponseBuilder {
|
||||
|
||||
// WithPeerChanges adds changed peers with policy filtering (for incremental updates).
|
||||
func (b *MapResponseBuilder) WithPeerChanges(peers views.Slice[types.NodeView]) *MapResponseBuilder {
|
||||
tailPeers, err := b.buildTailPeers(peers)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
@@ -190,31 +220,38 @@ func (b *MapResponseBuilder) WithPeerChanges(peers types.Nodes) *MapResponseBuil
|
||||
}
|
||||
|
||||
b.resp.PeersChanged = tailPeers
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// buildTailPeers converts types.Nodes to []tailcfg.Node with policy filtering and sorting
|
||||
func (b *MapResponseBuilder) buildTailPeers(peers types.Nodes) ([]*tailcfg.Node, error) {
|
||||
node, err := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
// buildTailPeers converts views.Slice[types.NodeView] to []tailcfg.Node with policy filtering and sorting.
|
||||
func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) ([]*tailcfg.Node, error) {
|
||||
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
return nil, errors.New("node not found")
|
||||
}
|
||||
|
||||
// Use per-node filter to handle autogroup:self
|
||||
filter, err := b.mapper.state.FilterForNode(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filter, matchers := b.mapper.state.Filter()
|
||||
matchers := matcher.MatchesFromFilterRules(filter)
|
||||
|
||||
// If there are filter rules present, see if there are any nodes that cannot
|
||||
// access each-other at all and remove them from the peers.
|
||||
var changedViews views.Slice[types.NodeView]
|
||||
if len(filter) > 0 {
|
||||
changedViews = policy.ReduceNodes(node.View(), peers.ViewSlice(), matchers)
|
||||
changedViews = policy.ReduceNodes(node, peers, matchers)
|
||||
} else {
|
||||
changedViews = peers.ViewSlice()
|
||||
changedViews = peers
|
||||
}
|
||||
|
||||
tailPeers, err := tailNodes(
|
||||
changedViews, b.capVer, b.mapper.state,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(node.View(), b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
return policy.ReduceRoutes(node, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
@@ -229,30 +266,30 @@ func (b *MapResponseBuilder) buildTailPeers(peers types.Nodes) ([]*tailcfg.Node,
|
||||
return tailPeers, nil
|
||||
}
|
||||
|
||||
// WithPeerChangedPatch adds peer change patches
|
||||
// WithPeerChangedPatch adds peer change patches.
|
||||
func (b *MapResponseBuilder) WithPeerChangedPatch(changes []*tailcfg.PeerChange) *MapResponseBuilder {
|
||||
b.resp.PeersChangedPatch = changes
|
||||
return b
|
||||
}
|
||||
|
||||
// WithPeersRemoved adds removed peer IDs
|
||||
// WithPeersRemoved adds removed peer IDs.
|
||||
func (b *MapResponseBuilder) WithPeersRemoved(removedIDs ...types.NodeID) *MapResponseBuilder {
|
||||
|
||||
var tailscaleIDs []tailcfg.NodeID
|
||||
for _, id := range removedIDs {
|
||||
tailscaleIDs = append(tailscaleIDs, id.NodeID())
|
||||
}
|
||||
b.resp.PeersRemoved = tailscaleIDs
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Build finalizes the response and returns marshaled bytes
|
||||
func (b *MapResponseBuilder) Build(messages ...string) (*tailcfg.MapResponse, error) {
|
||||
func (b *MapResponseBuilder) Build() (*tailcfg.MapResponse, error) {
|
||||
if len(b.errs) > 0 {
|
||||
return nil, multierr.New(b.errs...)
|
||||
}
|
||||
if debugDumpMapResponsePath != "" {
|
||||
writeDebugMapResponse(b.resp, b.nodeID)
|
||||
writeDebugMapResponse(b.resp, b.debugType, b.nodeID)
|
||||
}
|
||||
|
||||
return b.resp, nil
|
||||
|
||||
@@ -18,17 +18,17 @@ func TestMapResponseBuilder_Basic(t *testing.T) {
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
mockState := &state.State{}
|
||||
m := &mapper{
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID)
|
||||
|
||||
|
||||
// Test basic builder creation
|
||||
assert.NotNil(t, builder)
|
||||
assert.Equal(t, nodeID, builder.nodeID)
|
||||
@@ -45,13 +45,13 @@ func TestMapResponseBuilder_WithCapabilityVersion(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
capVer := tailcfg.CapabilityVersion(42)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithCapabilityVersion(capVer)
|
||||
|
||||
|
||||
assert.Equal(t, capVer, builder.capVer)
|
||||
assert.False(t, builder.hasErrors())
|
||||
}
|
||||
@@ -62,18 +62,18 @@ func TestMapResponseBuilder_WithDomain(t *testing.T) {
|
||||
ServerURL: "https://test.example.com",
|
||||
BaseDomain: domain,
|
||||
}
|
||||
|
||||
|
||||
mockState := &state.State{}
|
||||
m := &mapper{
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithDomain()
|
||||
|
||||
|
||||
assert.Equal(t, domain, builder.resp.Domain)
|
||||
assert.False(t, builder.hasErrors())
|
||||
}
|
||||
@@ -85,12 +85,12 @@ func TestMapResponseBuilder_WithCollectServicesDisabled(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithCollectServicesDisabled()
|
||||
|
||||
|
||||
value, isSet := builder.resp.CollectServices.Get()
|
||||
assert.True(t, isSet)
|
||||
assert.False(t, value)
|
||||
@@ -99,22 +99,22 @@ func TestMapResponseBuilder_WithCollectServicesDisabled(t *testing.T) {
|
||||
|
||||
func TestMapResponseBuilder_WithDebugConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
name string
|
||||
logTailEnabled bool
|
||||
expected bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "LogTail enabled",
|
||||
name: "LogTail enabled",
|
||||
logTailEnabled: true,
|
||||
expected: false, // DisableLogTail should be false when LogTail is enabled
|
||||
expected: false, // DisableLogTail should be false when LogTail is enabled
|
||||
},
|
||||
{
|
||||
name: "LogTail disabled",
|
||||
name: "LogTail disabled",
|
||||
logTailEnabled: false,
|
||||
expected: true, // DisableLogTail should be true when LogTail is disabled
|
||||
expected: true, // DisableLogTail should be true when LogTail is disabled
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &types.Config{
|
||||
@@ -127,12 +127,12 @@ func TestMapResponseBuilder_WithDebugConfig(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugConfig()
|
||||
|
||||
|
||||
require.NotNil(t, builder.resp.Debug)
|
||||
assert.Equal(t, tt.expected, builder.resp.Debug.DisableLogTail)
|
||||
assert.False(t, builder.hasErrors())
|
||||
@@ -147,22 +147,22 @@ func TestMapResponseBuilder_WithPeerChangedPatch(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
changes := []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: 123,
|
||||
NodeID: 123,
|
||||
DERPRegion: 1,
|
||||
},
|
||||
{
|
||||
NodeID: 456,
|
||||
NodeID: 456,
|
||||
DERPRegion: 2,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithPeerChangedPatch(changes)
|
||||
|
||||
|
||||
assert.Equal(t, changes, builder.resp.PeersChangedPatch)
|
||||
assert.False(t, builder.hasErrors())
|
||||
}
|
||||
@@ -174,14 +174,14 @@ func TestMapResponseBuilder_WithPeersRemoved(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
removedID1 := types.NodeID(123)
|
||||
removedID2 := types.NodeID(456)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithPeersRemoved(removedID1, removedID2)
|
||||
|
||||
|
||||
expected := []tailcfg.NodeID{
|
||||
removedID1.NodeID(),
|
||||
removedID2.NodeID(),
|
||||
@@ -197,25 +197,25 @@ func TestMapResponseBuilder_ErrorHandling(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
// Simulate an error in the builder
|
||||
builder := m.NewMapResponseBuilder(nodeID)
|
||||
builder.addError(assert.AnError)
|
||||
|
||||
|
||||
// All subsequent calls should continue to work and accumulate errors
|
||||
result := builder.
|
||||
WithDomain().
|
||||
WithCollectServicesDisabled().
|
||||
WithDebugConfig()
|
||||
|
||||
|
||||
assert.True(t, result.hasErrors())
|
||||
assert.Len(t, result.errs, 1)
|
||||
assert.Equal(t, assert.AnError, result.errs[0])
|
||||
|
||||
|
||||
// Build should return the error
|
||||
data, err := result.Build("none")
|
||||
data, err := result.Build()
|
||||
assert.Nil(t, data)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -229,22 +229,22 @@ func TestMapResponseBuilder_ChainedCalls(t *testing.T) {
|
||||
Enabled: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
mockState := &state.State{}
|
||||
m := &mapper{
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
capVer := tailcfg.CapabilityVersion(99)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithDomain().
|
||||
WithCollectServicesDisabled().
|
||||
WithDebugConfig()
|
||||
|
||||
|
||||
// Verify all fields are set correctly
|
||||
assert.Equal(t, capVer, builder.capVer)
|
||||
assert.Equal(t, domain, builder.resp.Domain)
|
||||
@@ -263,16 +263,16 @@ func TestMapResponseBuilder_MultipleWithPeersRemoved(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
removedID1 := types.NodeID(100)
|
||||
removedID2 := types.NodeID(200)
|
||||
|
||||
|
||||
// Test calling WithPeersRemoved multiple times
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithPeersRemoved(removedID1).
|
||||
WithPeersRemoved(removedID2)
|
||||
|
||||
|
||||
// Second call should overwrite the first
|
||||
expected := []tailcfg.NodeID{removedID2.NodeID()}
|
||||
assert.Equal(t, expected, builder.resp.PeersRemoved)
|
||||
@@ -286,12 +286,12 @@ func TestMapResponseBuilder_EmptyPeerChangedPatch(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithPeerChangedPatch([]*tailcfg.PeerChange{})
|
||||
|
||||
|
||||
assert.Empty(t, builder.resp.PeersChangedPatch)
|
||||
assert.False(t, builder.hasErrors())
|
||||
}
|
||||
@@ -303,12 +303,12 @@ func TestMapResponseBuilder_NilPeerChangedPatch(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
builder := m.NewMapResponseBuilder(nodeID).
|
||||
WithPeerChangedPatch(nil)
|
||||
|
||||
|
||||
assert.Nil(t, builder.resp.PeersChangedPatch)
|
||||
assert.False(t, builder.hasErrors())
|
||||
}
|
||||
@@ -320,28 +320,28 @@ func TestMapResponseBuilder_MultipleErrors(t *testing.T) {
|
||||
cfg: cfg,
|
||||
state: mockState,
|
||||
}
|
||||
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
|
||||
// Create a builder and add multiple errors
|
||||
builder := m.NewMapResponseBuilder(nodeID)
|
||||
builder.addError(assert.AnError)
|
||||
builder.addError(assert.AnError)
|
||||
builder.addError(nil) // This should be ignored
|
||||
|
||||
|
||||
// All subsequent calls should continue to work
|
||||
result := builder.
|
||||
WithDomain().
|
||||
WithCollectServicesDisabled()
|
||||
|
||||
|
||||
assert.True(t, result.hasErrors())
|
||||
assert.Len(t, result.errs, 2) // nil error should be ignored
|
||||
|
||||
|
||||
// Build should return a multierr
|
||||
data, err := result.Build("none")
|
||||
data, err := result.Build()
|
||||
assert.Nil(t, data)
|
||||
assert.Error(t, err)
|
||||
|
||||
|
||||
// The error should contain information about multiple errors
|
||||
assert.Contains(t, err.Error(), "multiple errors")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,16 +70,18 @@ func newMapper(
|
||||
}
|
||||
|
||||
func generateUserProfiles(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
node types.NodeView,
|
||||
peers views.Slice[types.NodeView],
|
||||
) []tailcfg.UserProfile {
|
||||
userMap := make(map[uint]*types.User)
|
||||
ids := make([]uint, 0, len(userMap))
|
||||
userMap[node.User.ID] = &node.User
|
||||
ids = append(ids, node.User.ID)
|
||||
for _, peer := range peers {
|
||||
userMap[peer.User.ID] = &peer.User
|
||||
ids = append(ids, peer.User.ID)
|
||||
user := node.User()
|
||||
userMap[user.ID] = &user
|
||||
ids = append(ids, user.ID)
|
||||
for _, peer := range peers.All() {
|
||||
peerUser := peer.User()
|
||||
userMap[peerUser.ID] = &peerUser
|
||||
ids = append(ids, peerUser.ID)
|
||||
}
|
||||
|
||||
slices.Sort(ids)
|
||||
@@ -94,7 +98,7 @@ func generateUserProfiles(
|
||||
|
||||
func generateDNSConfig(
|
||||
cfg *types.Config,
|
||||
node *types.Node,
|
||||
node types.NodeView,
|
||||
) *tailcfg.DNSConfig {
|
||||
if cfg.TailcfgDNSConfig == nil {
|
||||
return nil
|
||||
@@ -114,12 +118,12 @@ func generateDNSConfig(
|
||||
//
|
||||
// This will produce a resolver like:
|
||||
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node types.NodeView) {
|
||||
for _, resolver := range resolvers {
|
||||
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
||||
attrs := url.Values{
|
||||
"device_name": []string{node.Hostname},
|
||||
"device_model": []string{node.Hostinfo.OS},
|
||||
"device_name": []string{node.Hostname()},
|
||||
"device_model": []string{node.Hostinfo().OS()},
|
||||
}
|
||||
|
||||
if len(node.IPs()) > 0 {
|
||||
@@ -135,14 +139,11 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
||||
func (m *mapper) fullMapResponse(
|
||||
nodeID types.NodeID,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
messages ...string,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
peers, err := m.listPeers(nodeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers := m.state.ListPeers(nodeID)
|
||||
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(fullResponseDebug).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithSelfNode().
|
||||
WithDERPMap().
|
||||
@@ -154,13 +155,34 @@ func (m *mapper) fullMapResponse(
|
||||
WithUserProfiles(peers).
|
||||
WithPacketFilters().
|
||||
WithPeers(peers).
|
||||
Build(messages...)
|
||||
Build()
|
||||
}
|
||||
|
||||
func (m *mapper) selfMapResponse(
|
||||
nodeID types.NodeID,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
ma, err := m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(selfResponseDebug).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithSelfNode().
|
||||
Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the peers to nil, to ensure the node does not think
|
||||
// its getting a new list.
|
||||
ma.Peers = nil
|
||||
|
||||
return ma, err
|
||||
}
|
||||
|
||||
func (m *mapper) derpMapResponse(
|
||||
nodeID types.NodeID,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(derpResponseDebug).
|
||||
WithDERPMap().
|
||||
Build()
|
||||
}
|
||||
@@ -172,6 +194,7 @@ func (m *mapper) peerChangedPatchResponse(
|
||||
changed []*tailcfg.PeerChange,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(patchResponseDebug).
|
||||
WithPeerChangedPatch(changed).
|
||||
Build()
|
||||
}
|
||||
@@ -182,14 +205,11 @@ func (m *mapper) peerChangeResponse(
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
changedNodeID types.NodeID,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
peers, err := m.listPeers(nodeID, changedNodeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers := m.state.ListPeers(nodeID, changedNodeID)
|
||||
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(changeResponseDebug).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithSelfNode().
|
||||
WithUserProfiles(peers).
|
||||
WithPeerChanges(peers).
|
||||
Build()
|
||||
@@ -201,42 +221,23 @@ func (m *mapper) peerRemovedResponse(
|
||||
removedNodeID types.NodeID,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(removeResponseDebug).
|
||||
WithPeersRemoved(removedNodeID).
|
||||
Build()
|
||||
}
|
||||
|
||||
func writeDebugMapResponse(
|
||||
resp *tailcfg.MapResponse,
|
||||
t debugType,
|
||||
nodeID types.NodeID,
|
||||
messages ...string,
|
||||
) {
|
||||
data := map[string]any{
|
||||
"Messages": messages,
|
||||
"MapResponse": resp,
|
||||
}
|
||||
|
||||
responseType := "keepalive"
|
||||
|
||||
switch {
|
||||
case len(resp.Peers) > 0:
|
||||
responseType = "full"
|
||||
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil && resp.DERPMap == nil && !resp.KeepAlive:
|
||||
responseType = "self"
|
||||
case len(resp.PeersChanged) > 0:
|
||||
responseType = "changed"
|
||||
case len(resp.PeersChangedPatch) > 0:
|
||||
responseType = "patch"
|
||||
case len(resp.PeersRemoved) > 0:
|
||||
responseType = "removed"
|
||||
}
|
||||
|
||||
body, err := json.MarshalIndent(data, "", " ")
|
||||
body, err := json.MarshalIndent(resp, "", " ")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
perms := fs.FileMode(debugMapResponsePerm)
|
||||
mPath := path.Join(debugDumpMapResponsePath, nodeID.String())
|
||||
mPath := path.Join(debugDumpMapResponsePath, fmt.Sprintf("%d", nodeID))
|
||||
err = os.MkdirAll(mPath, perms)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -246,7 +247,7 @@ func writeDebugMapResponse(
|
||||
|
||||
mapResponsePath := path.Join(
|
||||
mPath,
|
||||
fmt.Sprintf("%s-%s.json", now, responseType),
|
||||
fmt.Sprintf("%s-%s.json", now, t),
|
||||
)
|
||||
|
||||
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
||||
@@ -256,26 +257,70 @@ func writeDebugMapResponse(
|
||||
}
|
||||
}
|
||||
|
||||
// listPeers returns peers of node, regardless of any Policy or if the node is expired.
|
||||
// If no peer IDs are given, all peers are returned.
|
||||
// If at least one peer ID is given, only these peer nodes will be returned.
|
||||
func (m *mapper) listPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
|
||||
peers, err := m.state.ListPeers(nodeID, peerIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(kradalby): Add back online via batcher. This was removed
|
||||
// to avoid a circular dependency between the mapper and the notification.
|
||||
for _, peer := range peers {
|
||||
online := m.batcher.IsConnected(peer.ID)
|
||||
peer.IsOnline = &online
|
||||
}
|
||||
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// routeFilterFunc is a function that takes a node ID and returns a list of
|
||||
// netip.Prefixes that are allowed for that node. It is used to filter routes
|
||||
// from the primary route manager to the node.
|
||||
type routeFilterFunc func(id types.NodeID) []netip.Prefix
|
||||
|
||||
func (m *mapper) debugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
|
||||
if debugDumpMapResponsePath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ReadMapResponsesFromDirectory(debugDumpMapResponsePath)
|
||||
}
|
||||
|
||||
func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapResponse, error) {
|
||||
nodes, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[types.NodeID][]tailcfg.MapResponse)
|
||||
for _, node := range nodes {
|
||||
if !node.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeIDu, err := strconv.ParseUint(node.Name(), 10, 64)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Parsing node ID from dir %s", node.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
nodeID := types.NodeID(nodeIDu)
|
||||
|
||||
files, err := os.ReadDir(path.Join(dir, node.Name()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Reading dir %s", node.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
slices.SortStableFunc(files, func(a, b fs.DirEntry) int {
|
||||
return strings.Compare(a.Name(), b.Name())
|
||||
})
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() || !strings.HasSuffix(file.Name(), ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
body, err := os.ReadFile(path.Join(dir, node.Name(), file.Name()))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Reading file %s", file.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
var resp tailcfg.MapResponse
|
||||
err = json.Unmarshal(body, &resp)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Unmarshalling file %s", file.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
result[nodeID] = append(result[nodeID], resp)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestDNSConfigMapResponse(t *testing.T) {
|
||||
&types.Config{
|
||||
TailcfgDNSConfig: &dnsConfigOrig,
|
||||
},
|
||||
nodeInShared1,
|
||||
nodeInShared1.View(),
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, cmpopts.EquateEmpty()); diff != "" {
|
||||
|
||||
@@ -133,13 +133,12 @@ func tailNode(
|
||||
tNode.CapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{}
|
||||
}
|
||||
|
||||
if !node.IsOnline().Valid() || !node.IsOnline().Get() {
|
||||
// LastSeen is only set when node is
|
||||
// not connected to the control server.
|
||||
if node.LastSeen().Valid() {
|
||||
lastSeen := node.LastSeen().Get()
|
||||
tNode.LastSeen = &lastSeen
|
||||
}
|
||||
// Set LastSeen only for offline nodes to avoid confusing Tailscale clients
|
||||
// during rapid reconnection cycles. Online nodes should not have LastSeen set
|
||||
// as this can make clients interpret them as "not online" despite Online=true.
|
||||
if node.LastSeen().Valid() && node.IsOnline().Valid() && !node.IsOnline().Get() {
|
||||
lastSeen := node.LastSeen().Get()
|
||||
tNode.LastSeen = &lastSeen
|
||||
}
|
||||
|
||||
return &tNode, nil
|
||||
|
||||
@@ -158,7 +158,6 @@ func TestTailNode(t *testing.T) {
|
||||
|
||||
Tags: []string{},
|
||||
|
||||
LastSeen: &lastSeen,
|
||||
MachineAuthorized: true,
|
||||
|
||||
CapMap: tailcfg.NodeCapMap{
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/net/http2"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/control/controlbase"
|
||||
"tailscale.com/control/controlhttp/controlhttpserver"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -176,8 +175,8 @@ func rejectUnsupported(
|
||||
Int("client_cap_ver", int(version)).
|
||||
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
|
||||
Str("client_version", capver.TailscaleVersion(version)).
|
||||
Str("node_key", nkey.ShortString()).
|
||||
Str("machine_key", mkey.ShortString()).
|
||||
Str("node.key", nkey.ShortString()).
|
||||
Str("machine.key", mkey.ShortString()).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, unsupportedClientError(version).Error(), http.StatusBadRequest)
|
||||
|
||||
@@ -283,7 +282,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
if err := json.NewEncoder(writer).Encode(registerResponse); err != nil {
|
||||
log.Error().Err(err).Msg("NoiseRegistrationHandler: failed to encode RegisterResponse")
|
||||
log.Error().Caller().Err(err).Msg("NoiseRegistrationHandler: failed to encode RegisterResponse")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -296,16 +295,11 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
||||
// getAndValidateNode retrieves the node from the database using the NodeKey
|
||||
// and validates that it matches the MachineKey from the Noise session.
|
||||
func (ns *noiseServer) getAndValidateNode(mapRequest tailcfg.MapRequest) (types.NodeView, error) {
|
||||
node, err := ns.headscale.state.GetNodeByNodeKey(mapRequest.NodeKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return types.NodeView{}, NewHTTPError(http.StatusNotFound, "node not found", nil)
|
||||
}
|
||||
return types.NodeView{}, NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("lookup node: %s", err), nil)
|
||||
nv, ok := ns.headscale.state.GetNodeByNodeKey(mapRequest.NodeKey)
|
||||
if !ok {
|
||||
return types.NodeView{}, NewHTTPError(http.StatusNotFound, "node not found", nil)
|
||||
}
|
||||
|
||||
nv := node.View()
|
||||
|
||||
// Validate that the MachineKey in the Noise session matches the one associated with the NodeKey.
|
||||
if ns.machineKey != nv.MachineKey() {
|
||||
return types.NodeView{}, NewHTTPError(http.StatusNotFound, "node key in request does not match the one associated with this machine key", nil)
|
||||
|
||||
@@ -181,7 +181,7 @@ func (a *AuthProviderOIDC) RegisterHandler(
|
||||
a.registrationCache.Set(state, registrationInfo)
|
||||
|
||||
authURL := a.oauth2Config.AuthCodeURL(state, extras...)
|
||||
log.Debug().Msgf("Redirecting to %s for authentication", authURL)
|
||||
log.Debug().Caller().Msgf("Redirecting to %s for authentication", authURL)
|
||||
|
||||
http.Redirect(writer, req, authURL, http.StatusFound)
|
||||
}
|
||||
@@ -254,6 +254,35 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch user information (email, groups, name, etc) from the userinfo endpoint
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||
var userinfo *oidc.UserInfo
|
||||
userinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token))
|
||||
if err != nil {
|
||||
util.LogErr(err, "could not get userinfo; only using claims from id token")
|
||||
}
|
||||
|
||||
// The oidc.UserInfo type only decodes some fields (Subject, Profile, Email, EmailVerified).
|
||||
// We are interested in other fields too (e.g. groups are required for allowedGroups) so we
|
||||
// decode into our own OIDCUserInfo type using the underlying claims struct.
|
||||
var userinfo2 types.OIDCUserInfo
|
||||
if userinfo != nil && userinfo.Claims(&userinfo2) == nil && userinfo2.Sub == claims.Sub {
|
||||
// Update the user with the userinfo claims (with id token claims as fallback).
|
||||
// TODO(kradalby): there might be more interesting fields here that we have not found yet.
|
||||
claims.Email = cmp.Or(userinfo2.Email, claims.Email)
|
||||
claims.EmailVerified = cmp.Or(userinfo2.EmailVerified, claims.EmailVerified)
|
||||
claims.Username = cmp.Or(userinfo2.PreferredUsername, claims.Username)
|
||||
claims.Name = cmp.Or(userinfo2.Name, claims.Name)
|
||||
claims.ProfilePictureURL = cmp.Or(userinfo2.Picture, claims.ProfilePictureURL)
|
||||
if userinfo2.Groups != nil {
|
||||
claims.Groups = userinfo2.Groups
|
||||
}
|
||||
} else {
|
||||
util.LogErr(err, "could not get userinfo; only using claims from id token")
|
||||
}
|
||||
|
||||
// The user claims are now updated from the userinfo endpoint so we can verify the user
|
||||
// against allowed emails, email domains, and groups.
|
||||
if err := validateOIDCAllowedDomains(a.cfg.AllowedDomains, &claims); err != nil {
|
||||
httpError(writer, err)
|
||||
return
|
||||
@@ -269,31 +298,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
var userinfo *oidc.UserInfo
|
||||
userinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token))
|
||||
if err != nil {
|
||||
util.LogErr(err, "could not get userinfo; only checking claim")
|
||||
}
|
||||
|
||||
// If the userinfo is available, we can check if the subject matches the
|
||||
// claims, then use some of the userinfo fields to update the user.
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||
if userinfo != nil && userinfo.Subject == claims.Sub {
|
||||
claims.Email = cmp.Or(claims.Email, userinfo.Email)
|
||||
claims.EmailVerified = cmp.Or(claims.EmailVerified, types.FlexibleBoolean(userinfo.EmailVerified))
|
||||
|
||||
// The userinfo has some extra fields that we can use to update the user but they are only
|
||||
// available in the underlying claims struct.
|
||||
// TODO(kradalby): there might be more interesting fields here that we have not found yet.
|
||||
var userinfo2 types.OIDCUserInfo
|
||||
if err := userinfo.Claims(&userinfo2); err == nil {
|
||||
claims.Username = cmp.Or(claims.Username, userinfo2.PreferredUsername)
|
||||
claims.Name = cmp.Or(claims.Name, userinfo2.Name)
|
||||
claims.ProfilePictureURL = cmp.Or(claims.ProfilePictureURL, userinfo2.Picture)
|
||||
}
|
||||
}
|
||||
|
||||
user, policyChanged, err := a.createOrUpdateUserFromClaim(&claims)
|
||||
user, c, err := a.createOrUpdateUserFromClaim(&claims)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
@@ -306,16 +311,14 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(werr).
|
||||
Msg("Failed to write response")
|
||||
Msg("Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
if policyChanged {
|
||||
a.h.Change(change.PolicyChange())
|
||||
}
|
||||
a.h.Change(c)
|
||||
|
||||
// TODO(kradalby): Is this comment right?
|
||||
// If the node exists, then the node should be reauthenticated,
|
||||
@@ -328,6 +331,12 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
verb := "Reauthenticated"
|
||||
newNode, err := a.handleRegistration(user, *registrationId, nodeExpiry)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNodeNotFoundRegistrationCache) {
|
||||
log.Debug().Caller().Str("registration_id", registrationId.String()).Msg("registration session expired before authorization completed")
|
||||
httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", err))
|
||||
|
||||
return
|
||||
}
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
@@ -346,7 +355,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
if _, err := writer.Write(content.Bytes()); err != nil {
|
||||
util.LogErr(err, "Failed to write response")
|
||||
util.LogErr(err, "Failed to write HTTP response")
|
||||
}
|
||||
|
||||
return
|
||||
@@ -478,14 +487,14 @@ func (a *AuthProviderOIDC) getRegistrationIDFromState(state string) *types.Regis
|
||||
|
||||
func (a *AuthProviderOIDC) createOrUpdateUserFromClaim(
|
||||
claims *types.OIDCClaims,
|
||||
) (*types.User, bool, error) {
|
||||
) (*types.User, change.ChangeSet, error) {
|
||||
var user *types.User
|
||||
var err error
|
||||
var newUser bool
|
||||
var policyChanged bool
|
||||
var c change.ChangeSet
|
||||
user, err = a.h.state.GetUserByOIDCIdentifier(claims.Identifier())
|
||||
if err != nil && !errors.Is(err, db.ErrUserNotFound) {
|
||||
return nil, false, fmt.Errorf("creating or updating user: %w", err)
|
||||
return nil, change.EmptySet, fmt.Errorf("creating or updating user: %w", err)
|
||||
}
|
||||
|
||||
// if the user is still not found, create a new empty user.
|
||||
@@ -499,21 +508,21 @@ func (a *AuthProviderOIDC) createOrUpdateUserFromClaim(
|
||||
user.FromClaim(claims)
|
||||
|
||||
if newUser {
|
||||
user, policyChanged, err = a.h.state.CreateUser(*user)
|
||||
user, c, err = a.h.state.CreateUser(*user)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("creating user: %w", err)
|
||||
return nil, change.EmptySet, fmt.Errorf("creating user: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, policyChanged, err = a.h.state.UpdateUser(types.UserID(user.ID), func(u *types.User) error {
|
||||
_, c, err = a.h.state.UpdateUser(types.UserID(user.ID), func(u *types.User) error {
|
||||
*u = *user
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("updating user: %w", err)
|
||||
return nil, change.EmptySet, fmt.Errorf("updating user: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return user, policyChanged, nil
|
||||
return user, c, nil
|
||||
}
|
||||
|
||||
func (a *AuthProviderOIDC) handleRegistration(
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
type PolicyManager interface {
|
||||
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
|
||||
Filter() ([]tailcfg.FilterRule, []matcher.Match)
|
||||
// FilterForNode returns filter rules for a specific node, handling autogroup:self
|
||||
FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
|
||||
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
|
||||
SetPolicy([]byte) (bool, error)
|
||||
SetUsers(users []types.User) (bool, error)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -138,39 +139,74 @@ func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcf
|
||||
return ret
|
||||
}
|
||||
|
||||
// AutoApproveRoutes approves any route that can be autoapproved from
|
||||
// the nodes perspective according to the given policy.
|
||||
// It reports true if any routes were approved.
|
||||
// Note: This function now takes a pointer to the actual node to modify ApprovedRoutes.
|
||||
func AutoApproveRoutes(pm PolicyManager, node *types.Node) bool {
|
||||
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
|
||||
// and returns the new list of approved routes.
|
||||
// The approved routes will include:
|
||||
// 1. ALL previously approved routes (regardless of whether they're still advertised)
|
||||
// 2. New routes from announcedRoutes that can be auto-approved by policy
|
||||
// This ensures that:
|
||||
// - Previously approved routes are ALWAYS preserved (auto-approval never removes routes)
|
||||
// - New routes can be auto-approved according to policy
|
||||
// - Routes can only be removed by explicit admin action (not by auto-approval).
|
||||
func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApproved, announcedRoutes []netip.Prefix) ([]netip.Prefix, bool) {
|
||||
if pm == nil {
|
||||
return false
|
||||
return currentApproved, false
|
||||
}
|
||||
nodeView := node.View()
|
||||
var newApproved []netip.Prefix
|
||||
for _, route := range nodeView.AnnouncedRoutes() {
|
||||
if pm.NodeCanApproveRoute(nodeView, route) {
|
||||
|
||||
// Start with ALL currently approved routes - we never remove approved routes
|
||||
newApproved := make([]netip.Prefix, len(currentApproved))
|
||||
copy(newApproved, currentApproved)
|
||||
|
||||
// Then, check for new routes that can be auto-approved
|
||||
for _, route := range announcedRoutes {
|
||||
// Skip if already approved
|
||||
if slices.Contains(newApproved, route) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this new route can be auto-approved by policy
|
||||
canApprove := pm.NodeCanApproveRoute(nv, route)
|
||||
if canApprove {
|
||||
newApproved = append(newApproved, route)
|
||||
}
|
||||
}
|
||||
|
||||
// Only modify ApprovedRoutes if we have new routes to approve.
|
||||
// This prevents clearing existing approved routes when nodes
|
||||
// temporarily don't have announced routes during policy changes.
|
||||
if len(newApproved) > 0 {
|
||||
combined := append(newApproved, node.ApprovedRoutes...)
|
||||
tsaddr.SortPrefixes(combined)
|
||||
combined = slices.Compact(combined)
|
||||
combined = lo.Filter(combined, func(route netip.Prefix, index int) bool {
|
||||
return route.IsValid()
|
||||
})
|
||||
// Sort and deduplicate
|
||||
tsaddr.SortPrefixes(newApproved)
|
||||
newApproved = slices.Compact(newApproved)
|
||||
newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool {
|
||||
return route.IsValid()
|
||||
})
|
||||
|
||||
// Only update if the routes actually changed
|
||||
if !slices.Equal(node.ApprovedRoutes, combined) {
|
||||
node.ApprovedRoutes = combined
|
||||
return true
|
||||
// Sort the current approved for comparison
|
||||
sortedCurrent := make([]netip.Prefix, len(currentApproved))
|
||||
copy(sortedCurrent, currentApproved)
|
||||
tsaddr.SortPrefixes(sortedCurrent)
|
||||
|
||||
// Only update if the routes actually changed
|
||||
if !slices.Equal(sortedCurrent, newApproved) {
|
||||
// Log what changed
|
||||
var added, kept []netip.Prefix
|
||||
for _, route := range newApproved {
|
||||
if !slices.Contains(sortedCurrent, route) {
|
||||
added = append(added, route)
|
||||
} else {
|
||||
kept = append(kept, route)
|
||||
}
|
||||
}
|
||||
|
||||
if len(added) > 0 {
|
||||
log.Debug().
|
||||
Uint64("node.id", nv.ID().Uint64()).
|
||||
Str("node.name", nv.Hostname()).
|
||||
Strs("routes.added", util.PrefixesToString(added)).
|
||||
Strs("routes.kept", util.PrefixesToString(kept)).
|
||||
Int("routes.total", len(newApproved)).
|
||||
Msg("Routes auto-approved by policy")
|
||||
}
|
||||
|
||||
return newApproved, true
|
||||
}
|
||||
|
||||
return false
|
||||
return newApproved, false
|
||||
}
|
||||
|
||||
339
hscontrol/policy/policy_autoapprove_test.go
Normal file
339
hscontrol/policy/policy_autoapprove_test.go
Normal file
@@ -0,0 +1,339 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
|
||||
user1 := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "testuser@",
|
||||
}
|
||||
user2 := types.User{
|
||||
Model: gorm.Model{ID: 2},
|
||||
Name: "otheruser@",
|
||||
}
|
||||
users := []types.User{user1, user2}
|
||||
|
||||
node1 := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "test-node",
|
||||
UserID: user1.ID,
|
||||
User: user1,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
ForcedTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
node2 := &types.Node{
|
||||
ID: 2,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "other-node",
|
||||
UserID: user2.ID,
|
||||
User: user2,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
|
||||
}
|
||||
|
||||
// Create a policy that auto-approves specific routes
|
||||
policyJSON := `{
|
||||
"groups": {
|
||||
"group:test": ["testuser@"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:test": ["testuser@"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
],
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"10.0.0.0/8": ["testuser@", "tag:test"],
|
||||
"10.1.0.0/24": ["testuser@"],
|
||||
"10.2.0.0/24": ["testuser@"],
|
||||
"192.168.0.0/24": ["tag:test"]
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
pm, err := policyv2.NewPolicyManager([]byte(policyJSON), users, views.SliceOf([]types.NodeView{node1.View(), node2.View()}))
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
node *types.Node
|
||||
currentApproved []netip.Prefix
|
||||
announcedRoutes []netip.Prefix
|
||||
wantApproved []netip.Prefix
|
||||
wantChanged bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "previously_approved_route_no_longer_advertised_should_remain",
|
||||
node: node1,
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Only this one is still advertised
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // Should still be here!
|
||||
},
|
||||
wantChanged: false,
|
||||
description: "Previously approved routes should never be removed even when no longer advertised",
|
||||
},
|
||||
{
|
||||
name: "add_new_auto_approved_route_keeps_old_approved",
|
||||
node: node1,
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.5.0.0/24"), // This was manually approved
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.1.0.0/24"), // New route that should be auto-approved
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.1.0.0/24"), // New auto-approved route (subset of 10.0.0.0/8)
|
||||
netip.MustParsePrefix("10.5.0.0/24"), // Old approved route kept
|
||||
},
|
||||
wantChanged: true,
|
||||
description: "New auto-approved routes should be added while keeping old approved routes",
|
||||
},
|
||||
{
|
||||
name: "no_announced_routes_keeps_all_approved",
|
||||
node: node1,
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{}, // No routes announced
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
description: "All approved routes should remain when no routes are announced",
|
||||
},
|
||||
{
|
||||
name: "no_changes_when_announced_equals_approved",
|
||||
node: node1,
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
description: "No changes should occur when announced routes match approved routes",
|
||||
},
|
||||
{
|
||||
name: "auto_approve_multiple_new_routes",
|
||||
node: node1,
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/24"), // This was manually approved
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.2.0.0/24"), // Should be auto-approved (subset of 10.0.0.0/8)
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // Should be auto-approved for tag:test
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.2.0.0/24"), // New auto-approved
|
||||
netip.MustParsePrefix("172.16.0.0/24"), // Original kept
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // New auto-approved
|
||||
},
|
||||
wantChanged: true,
|
||||
description: "Multiple new routes should be auto-approved while keeping existing approved routes",
|
||||
},
|
||||
{
|
||||
name: "node_without_permission_no_auto_approval",
|
||||
node: node2, // Different node without the tag
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // This requires tag:test
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Only the original approved route
|
||||
},
|
||||
wantChanged: false,
|
||||
description: "Routes should not be auto-approved for nodes without proper permissions",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotApproved, gotChanged := ApproveRoutesWithPolicy(pm, tt.node.View(), tt.currentApproved, tt.announcedRoutes)
|
||||
|
||||
assert.Equal(t, tt.wantChanged, gotChanged, "changed flag mismatch: %s", tt.description)
|
||||
|
||||
// Sort for comparison since ApproveRoutesWithPolicy sorts the results
|
||||
tsaddr.SortPrefixes(tt.wantApproved)
|
||||
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch: %s", tt.description)
|
||||
|
||||
// Verify that all previously approved routes are still present
|
||||
for _, prevRoute := range tt.currentApproved {
|
||||
assert.Contains(t, gotApproved, prevRoute,
|
||||
"previously approved route %s was removed - this should never happen", prevRoute)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApproveRoutesWithPolicy_NilAndEmptyCases(t *testing.T) {
|
||||
// Create a basic policy for edge case testing
|
||||
aclPolicy := `
|
||||
{
|
||||
"acls": [
|
||||
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
|
||||
],
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"10.1.0.0/24": ["test@"],
|
||||
},
|
||||
},
|
||||
}`
|
||||
|
||||
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentApproved []netip.Prefix
|
||||
announcedRoutes []netip.Prefix
|
||||
wantApproved []netip.Prefix
|
||||
wantChanged bool
|
||||
}{
|
||||
{
|
||||
name: "nil_policy_manager",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
},
|
||||
{
|
||||
name: "nil_current_approved",
|
||||
currentApproved: nil,
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.1.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.1.0.0/24"),
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "nil_announced_routes",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: nil,
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
},
|
||||
{
|
||||
name: "duplicate_approved_routes",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Duplicate
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.1.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("10.1.0.0/24"),
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "empty_slices",
|
||||
currentApproved: []netip.Prefix{},
|
||||
announcedRoutes: []netip.Prefix{},
|
||||
wantApproved: []netip.Prefix{},
|
||||
wantChanged: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for i, pmf := range pmfs {
|
||||
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
|
||||
// Create test user
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "test",
|
||||
}
|
||||
users := []types.User{user}
|
||||
|
||||
// Create test node
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
User: user,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
}
|
||||
nodes := types.Nodes{&node}
|
||||
|
||||
// Create policy manager or use nil if specified
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
if tt.name != "nil_policy_manager" {
|
||||
pm, err = pmf(users, nodes.ViewSlice())
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
pm = nil
|
||||
}
|
||||
|
||||
gotApproved, gotChanged := ApproveRoutesWithPolicy(pm, node.View(), tt.currentApproved, tt.announcedRoutes)
|
||||
|
||||
assert.Equal(t, tt.wantChanged, gotChanged, "changed flag mismatch")
|
||||
|
||||
// Handle nil vs empty slice comparison
|
||||
if tt.wantApproved == nil {
|
||||
assert.Nil(t, gotApproved, "expected nil approved routes")
|
||||
} else {
|
||||
tsaddr.SortPrefixes(tt.wantApproved)
|
||||
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
361
hscontrol/policy/policy_route_approval_test.go
Normal file
361
hscontrol/policy/policy_route_approval_test.go
Normal file
@@ -0,0 +1,361 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
|
||||
// Test policy that allows specific routes to be auto-approved
|
||||
aclPolicy := `
|
||||
{
|
||||
"groups": {
|
||||
"group:admins": ["test@"],
|
||||
},
|
||||
"acls": [
|
||||
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
|
||||
],
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"10.0.0.0/24": ["test@"],
|
||||
"192.168.0.0/24": ["group:admins"],
|
||||
"172.16.0.0/16": ["tag:approved"],
|
||||
},
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:approved": ["test@"],
|
||||
},
|
||||
}`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentApproved []netip.Prefix
|
||||
announcedRoutes []netip.Prefix
|
||||
nodeHostname string
|
||||
nodeUser string
|
||||
nodeTags []string
|
||||
wantApproved []netip.Prefix
|
||||
wantChanged bool
|
||||
wantRemovedRoutes []netip.Prefix // Routes that should NOT be in the result
|
||||
}{
|
||||
{
|
||||
name: "previously_approved_route_no_longer_advertised_remains",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // Only this one still advertised
|
||||
},
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Should remain!
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
wantRemovedRoutes: []netip.Prefix{}, // Nothing should be removed
|
||||
},
|
||||
{
|
||||
name: "add_new_auto_approved_route_keeps_existing",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Still advertised
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // New route
|
||||
},
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // Auto-approved via group
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "no_announced_routes_keeps_all_approved",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{}, // No routes announced anymore
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
wantChanged: false,
|
||||
},
|
||||
{
|
||||
name: "manually_approved_route_not_in_policy_remains",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("203.0.113.0/24"), // Not in auto-approvers
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Can be auto-approved
|
||||
},
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // New auto-approved
|
||||
netip.MustParsePrefix("203.0.113.0/24"), // Manual approval preserved
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "tagged_node_gets_tag_approved_routes",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"), // Tag-approved route
|
||||
},
|
||||
nodeUser: "test",
|
||||
nodeTags: []string{"tag:approved"},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"), // New tag-approved
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Previous approval preserved
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "complex_scenario_multiple_changes",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Will not be advertised
|
||||
netip.MustParsePrefix("203.0.113.0/24"), // Manual, not advertised
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // New, auto-approvable
|
||||
netip.MustParsePrefix("172.16.0.0/16"), // New, not approvable (no tag)
|
||||
netip.MustParsePrefix("198.51.100.0/24"), // New, not in policy
|
||||
},
|
||||
nodeUser: "test",
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Kept despite not advertised
|
||||
netip.MustParsePrefix("192.168.0.0/24"), // New auto-approved
|
||||
netip.MustParsePrefix("203.0.113.0/24"), // Kept despite not advertised
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
}
|
||||
|
||||
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
|
||||
|
||||
for _, tt := range tests {
|
||||
for i, pmf := range pmfs {
|
||||
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
|
||||
// Create test user
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: tt.nodeUser,
|
||||
}
|
||||
users := []types.User{user}
|
||||
|
||||
// Create test node
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: tt.nodeHostname,
|
||||
UserID: user.ID,
|
||||
User: user,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
ForcedTags: tt.nodeTags,
|
||||
}
|
||||
nodes := types.Nodes{&node}
|
||||
|
||||
// Create policy manager
|
||||
pm, err := pmf(users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pm)
|
||||
|
||||
// Test ApproveRoutesWithPolicy
|
||||
gotApproved, gotChanged := ApproveRoutesWithPolicy(
|
||||
pm,
|
||||
node.View(),
|
||||
tt.currentApproved,
|
||||
tt.announcedRoutes,
|
||||
)
|
||||
|
||||
// Check change flag
|
||||
assert.Equal(t, tt.wantChanged, gotChanged, "change flag mismatch")
|
||||
|
||||
// Check approved routes match expected
|
||||
if diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != "" {
|
||||
t.Logf("Want: %v", tt.wantApproved)
|
||||
t.Logf("Got: %v", gotApproved)
|
||||
t.Errorf("unexpected approved routes (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Verify all previously approved routes are still present
|
||||
for _, prevRoute := range tt.currentApproved {
|
||||
assert.Contains(t, gotApproved, prevRoute,
|
||||
"previously approved route %s was removed - this should NEVER happen", prevRoute)
|
||||
}
|
||||
|
||||
// Verify no routes were incorrectly removed
|
||||
for _, removedRoute := range tt.wantRemovedRoutes {
|
||||
assert.NotContains(t, gotApproved, removedRoute,
|
||||
"route %s should have been removed but wasn't", removedRoute)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApproveRoutesWithPolicy_EdgeCases(t *testing.T) {
|
||||
aclPolicy := `
|
||||
{
|
||||
"acls": [
|
||||
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
|
||||
],
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"10.0.0.0/8": ["test@"],
|
||||
},
|
||||
},
|
||||
}`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentApproved []netip.Prefix
|
||||
announcedRoutes []netip.Prefix
|
||||
wantApproved []netip.Prefix
|
||||
wantChanged bool
|
||||
}{
|
||||
{
|
||||
name: "nil_current_approved",
|
||||
currentApproved: nil,
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "empty_current_approved",
|
||||
currentApproved: []netip.Prefix{},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: true,
|
||||
},
|
||||
{
|
||||
name: "duplicate_routes_handled",
|
||||
currentApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Duplicate
|
||||
},
|
||||
announcedRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantApproved: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
},
|
||||
wantChanged: true, // Duplicates are removed, so it's a change
|
||||
},
|
||||
}
|
||||
|
||||
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
|
||||
|
||||
for _, tt := range tests {
|
||||
for i, pmf := range pmfs {
|
||||
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
|
||||
// Create test user
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "test",
|
||||
}
|
||||
users := []types.User{user}
|
||||
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
User: user,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tt.announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: tt.currentApproved,
|
||||
}
|
||||
nodes := types.Nodes{&node}
|
||||
|
||||
pm, err := pmf(users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
gotApproved, gotChanged := ApproveRoutesWithPolicy(
|
||||
pm,
|
||||
node.View(),
|
||||
tt.currentApproved,
|
||||
tt.announcedRoutes,
|
||||
)
|
||||
|
||||
assert.Equal(t, tt.wantChanged, gotChanged)
|
||||
|
||||
if diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected approved routes (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApproveRoutesWithPolicy_NilPolicyManagerCase(t *testing.T) {
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
currentApproved := []netip.Prefix{
|
||||
netip.MustParsePrefix("10.0.0.0/24"),
|
||||
}
|
||||
announcedRoutes := []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
}
|
||||
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
User: user,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: announcedRoutes,
|
||||
},
|
||||
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
|
||||
ApprovedRoutes: currentApproved,
|
||||
}
|
||||
|
||||
// With nil policy manager, should return current approved unchanged
|
||||
gotApproved, gotChanged := ApproveRoutesWithPolicy(nil, node.View(), currentApproved, announcedRoutes)
|
||||
|
||||
assert.False(t, gotChanged)
|
||||
assert.Equal(t, currentApproved, gotApproved)
|
||||
}
|
||||
@@ -222,6 +222,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{
|
||||
@@ -236,6 +237,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -371,10 +373,12 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: hsExitNodeDestForTest,
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -478,6 +482,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
@@ -513,6 +518,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -588,6 +594,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
@@ -601,6 +608,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -676,6 +684,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
@@ -689,6 +698,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -756,6 +766,7 @@ func TestReduceFilterRules(t *testing.T) {
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1612,13 +1623,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
UserID: 2,
|
||||
User: users[1],
|
||||
}
|
||||
taggedServer := types.Node{
|
||||
Hostname: "tagged-server",
|
||||
IPv4: ap("100.64.0.3"),
|
||||
UserID: 3,
|
||||
User: users[2],
|
||||
ForcedTags: []string{"tag:server"},
|
||||
}
|
||||
|
||||
taggedClient := types.Node{
|
||||
Hostname: "tagged-client",
|
||||
IPv4: ap("100.64.0.4"),
|
||||
@@ -1659,149 +1664,14 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
"*": "=",
|
||||
"root": "",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "group-to-tag",
|
||||
targetNode: taggedServer,
|
||||
peers: types.Nodes{&nodeUser1, &nodeUser2},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:server": ["user3@"],
|
||||
},
|
||||
"groups": {
|
||||
"group:users": ["user1@", "user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:users"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.1"},
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "tag-to-user",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&taggedClient},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user1@"],
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.4"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "tag-to-tag",
|
||||
targetNode: taggedServer,
|
||||
peers: types.Nodes{&taggedClient},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user2@"],
|
||||
"tag:server": ["user3@"],
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["tag:server"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.4"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "group-to-wildcard",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&nodeUser2, &taggedClient},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["*"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
@@ -1830,13 +1700,15 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
{NodeIP: "100.64.0.4"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"autogroup:nonroot": "=",
|
||||
"*": "=",
|
||||
"root": "",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
SessionDuration: 24 * time.Hour,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
Accept: true,
|
||||
SessionDuration: 24 * time.Hour,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
@@ -1875,7 +1747,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
expectErr: true,
|
||||
errorMessage: `SSH action "invalid" is not valid, must be accept or check`,
|
||||
errorMessage: `invalid SSH action "invalid", must be one of: accept, check`,
|
||||
},
|
||||
{
|
||||
name: "invalid-check-period",
|
||||
@@ -1895,40 +1767,6 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
expectErr: true,
|
||||
errorMessage: "not a valid duration string",
|
||||
},
|
||||
{
|
||||
name: "multiple-ssh-users-with-autogroup",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&taggedClient},
|
||||
policy: `{
|
||||
"tagOwners": {
|
||||
"tag:client": ["user1@"],
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["alice", "bob"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.4"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"alice": "=",
|
||||
"bob": "=",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "unsupported-autogroup",
|
||||
targetNode: nodeUser1,
|
||||
@@ -1946,6 +1784,114 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
expectErr: true,
|
||||
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-should-use-wildcard-with-root-excluded",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
// autogroup:nonroot should map to wildcard "*" with root excluded
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"*": "=",
|
||||
"root": "",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "autogroup-nonroot-plus-root-should-use-wildcard-with-root-mapped",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["autogroup:nonroot", "root"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
// autogroup:nonroot + root should map to wildcard "*" with root mapped to itself
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"*": "=",
|
||||
"root": "root",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "specific-users-should-map-to-themselves-not-equals",
|
||||
targetNode: nodeUser1,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:admins": ["user2@"]
|
||||
},
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admins"],
|
||||
"dst": ["user1@"],
|
||||
"users": ["ubuntu", "root"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
// specific usernames should map to themselves, not "="
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"root": "root",
|
||||
"ubuntu": "ubuntu",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -2425,6 +2371,177 @@ func TestReduceRoutes(t *testing.T) {
|
||||
netip.MustParsePrefix("10.10.12.0/24"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "return-path-subnet-router-to-regular-node-issue-2608",
|
||||
args: args{
|
||||
node: &types.Node{
|
||||
ID: 2,
|
||||
IPv4: ap("100.123.45.89"), // Node B - regular node
|
||||
User: types.User{Name: "node-b"},
|
||||
},
|
||||
routes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"), // Subnet connected to Node A
|
||||
},
|
||||
rules: []tailcfg.FilterRule{
|
||||
{
|
||||
// Policy allows 192.168.1.0/24 and group:routers to access *:*
|
||||
SrcIPs: []string{
|
||||
"192.168.1.0/24", // Subnet behind router
|
||||
"100.123.45.67", // Node A (router, part of group:routers)
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "*", Ports: tailcfg.PortRangeAny}, // Access to everything
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node B should receive the 192.168.1.0/24 route for return traffic
|
||||
// even though Node B cannot initiate connections to that network
|
||||
want: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "return-path-router-perspective-2608",
|
||||
args: args{
|
||||
node: &types.Node{
|
||||
ID: 1,
|
||||
IPv4: ap("100.123.45.67"), // Node A - router node
|
||||
User: types.User{Name: "router"},
|
||||
},
|
||||
routes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"), // Subnet connected to this router
|
||||
},
|
||||
rules: []tailcfg.FilterRule{
|
||||
{
|
||||
// Policy allows 192.168.1.0/24 and group:routers to access *:*
|
||||
SrcIPs: []string{
|
||||
"192.168.1.0/24", // Subnet behind router
|
||||
"100.123.45.67", // Node A (router, part of group:routers)
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "*", Ports: tailcfg.PortRangeAny}, // Access to everything
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Router should have access to its own routes
|
||||
want: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "subnet-behind-router-bidirectional-connectivity-issue-2608",
|
||||
args: args{
|
||||
node: &types.Node{
|
||||
ID: 2,
|
||||
IPv4: ap("100.123.45.89"), // Node B - regular node that should be reachable
|
||||
User: types.User{Name: "node-b"},
|
||||
},
|
||||
routes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"), // Subnet behind router
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Another subnet
|
||||
},
|
||||
rules: []tailcfg.FilterRule{
|
||||
{
|
||||
// Only 192.168.1.0/24 and routers can access everything
|
||||
SrcIPs: []string{
|
||||
"192.168.1.0/24", // Subnet that can connect to Node B
|
||||
"100.123.45.67", // Router node
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "*", Ports: tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Node B cannot access anything (no rules with Node B as source)
|
||||
SrcIPs: []string{"100.123.45.89"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
// No destinations - Node B cannot initiate connections
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node B should still get the 192.168.1.0/24 route for return traffic
|
||||
// but should NOT get 10.0.0.0/24 since nothing allows that subnet to connect to Node B
|
||||
want: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-route-leakage-when-no-connection-allowed-2608",
|
||||
args: args{
|
||||
node: &types.Node{
|
||||
ID: 3,
|
||||
IPv4: ap("100.123.45.99"), // Node C - isolated node
|
||||
User: types.User{Name: "isolated-node"},
|
||||
},
|
||||
routes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/24"), // Subnet behind router
|
||||
netip.MustParsePrefix("10.0.0.0/24"), // Another private subnet
|
||||
netip.MustParsePrefix("172.16.0.0/24"), // Yet another subnet
|
||||
},
|
||||
rules: []tailcfg.FilterRule{
|
||||
{
|
||||
// Only specific subnets and routers can access specific destinations
|
||||
SrcIPs: []string{
|
||||
"192.168.1.0/24", // This subnet can access everything
|
||||
"100.123.45.67", // Router node can access everything
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "100.123.45.89", Ports: tailcfg.PortRangeAny}, // Only to Node B
|
||||
},
|
||||
},
|
||||
{
|
||||
// 10.0.0.0/24 can only access router
|
||||
SrcIPs: []string{"10.0.0.0/24"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "100.123.45.67", Ports: tailcfg.PortRangeAny}, // Only to router
|
||||
},
|
||||
},
|
||||
{
|
||||
// 172.16.0.0/24 has no access rules at all
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node C should get NO routes because:
|
||||
// - 192.168.1.0/24 can only connect to Node B (not Node C)
|
||||
// - 10.0.0.0/24 can only connect to router (not Node C)
|
||||
// - 172.16.0.0/24 has no rules allowing it to connect anywhere
|
||||
// - Node C is not in any rules as a destination
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "original-issue-2608-with-slash14-network",
|
||||
args: args{
|
||||
node: &types.Node{
|
||||
ID: 2,
|
||||
IPv4: ap("100.123.45.89"), // Node B - regular node
|
||||
User: types.User{Name: "node-b"},
|
||||
},
|
||||
routes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/14"), // Network 192.168.1.0/14 as mentioned in original issue
|
||||
},
|
||||
rules: []tailcfg.FilterRule{
|
||||
{
|
||||
// Policy allows 192.168.1.0/24 (part of /14) and group:routers to access *:*
|
||||
SrcIPs: []string{
|
||||
"192.168.1.0/24", // Subnet behind router (part of the larger /14 network)
|
||||
"100.123.45.67", // Node A (router, part of group:routers)
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "*", Ports: tailcfg.PortRangeAny}, // Access to everything
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node B should receive the 192.168.1.0/14 route for return traffic
|
||||
// even though only 192.168.1.0/24 (part of /14) can connect to Node B
|
||||
// This is the exact scenario from the original issue
|
||||
want: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.1.0/14"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -771,6 +771,29 @@ func TestNodeCanApproveRoute(t *testing.T) {
|
||||
policy: `{"acls":[{"action":"accept","src":["*"],"dst":["*:*"]}]}`,
|
||||
canApprove: false,
|
||||
},
|
||||
{
|
||||
name: "policy-without-autoApprovers-section",
|
||||
node: normalNode,
|
||||
route: p("10.33.0.0/16"),
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:admin": ["user1@"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["group:admin:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"dst": ["10.33.0.0/16:*"]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
canApprove: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -21,42 +21,37 @@ func (pol *Policy) compileFilterRules(
|
||||
users types.Users,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
if pol == nil {
|
||||
if pol == nil || pol.ACLs == nil {
|
||||
return tailcfg.FilterAllowAll, nil
|
||||
}
|
||||
|
||||
var rules []tailcfg.FilterRule
|
||||
|
||||
for _, acl := range pol.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
if acl.Action != ActionAccept {
|
||||
return nil, ErrInvalidAction
|
||||
}
|
||||
|
||||
srcIPs, err := acl.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving source ips")
|
||||
log.Trace().Caller().Err(err).Msgf("resolving source ips")
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO(kradalby): integrate type into schema
|
||||
// TODO(kradalby): figure out the _ is wildcard stuff
|
||||
protocols, _, err := parseProtocol(acl.Protocol)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing policy, protocol err: %w ", err)
|
||||
}
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
for _, dest := range acl.Destinations {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
log.Debug().Caller().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -87,13 +82,167 @@ func (pol *Policy) compileFilterRules(
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// compileFilterRulesForNode compiles filter rules for a specific node.
|
||||
func (pol *Policy) compileFilterRulesForNode(
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
if pol == nil {
|
||||
return tailcfg.FilterAllowAll, nil
|
||||
}
|
||||
|
||||
var rules []tailcfg.FilterRule
|
||||
|
||||
for _, acl := range pol.ACLs {
|
||||
if acl.Action != ActionAccept {
|
||||
return nil, ErrInvalidAction
|
||||
}
|
||||
|
||||
rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("compiling ACL")
|
||||
continue
|
||||
}
|
||||
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
}
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
|
||||
// autogroup:self per-node while supporting all other alias types normally.
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) (*tailcfg.FilterRule, error) {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
|
||||
// Resolve sources to only include devices from the same user as the target node.
|
||||
for _, src := range acl.Sources {
|
||||
// autogroup:self is not allowed in sources
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
|
||||
}
|
||||
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving source ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips != nil {
|
||||
if hasAutogroupSelfInDst {
|
||||
// Instead of iterating all addresses (which could be millions),
|
||||
// check each node's IPs against the source set
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No autogroup:self in destination, use all resolved sources
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcSet == nil || len(srcSet.Prefixes()) == 0 {
|
||||
// No sources resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) == 0 {
|
||||
// No destinations resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
return &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
||||
return tailcfg.SSHAction{
|
||||
Reject: !accept,
|
||||
Accept: accept,
|
||||
SessionDuration: duration,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
Reject: !accept,
|
||||
Accept: accept,
|
||||
SessionDuration: duration,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,18 +255,43 @@ func (pol *Policy) compileSSHPolicy(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Trace().Msgf("compiling SSH policy for node %q", node.Hostname())
|
||||
log.Trace().Caller().Msgf("compiling SSH policy for node %q", node.Hostname())
|
||||
|
||||
var rules []*tailcfg.SSHRule
|
||||
|
||||
for index, rule := range pol.SSHs {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
for _, dst := range rule.Destinations {
|
||||
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If autogroup:self is used, skip tagged nodes
|
||||
if hasAutogroupSelfInDst && node.IsTagged() {
|
||||
continue
|
||||
}
|
||||
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, src := range rule.Destinations {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
// Handle autogroup:self specially
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
// For autogroup:self, only include the target user's untagged devices
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
@@ -131,9 +305,9 @@ func (pol *Policy) compileSSHPolicy(
|
||||
|
||||
var action tailcfg.SSHAction
|
||||
switch rule.Action {
|
||||
case "accept":
|
||||
case SSHActionAccept:
|
||||
action = sshAction(true, 0)
|
||||
case "check":
|
||||
case SSHActionCheck:
|
||||
action = sshAction(true, time.Duration(rule.CheckPeriod))
|
||||
default:
|
||||
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
|
||||
@@ -142,10 +316,37 @@ func (pol *Policy) compileSSHPolicy(
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
continue // Skip this rule if we can't resolve sources
|
||||
}
|
||||
|
||||
// If autogroup:self is in destinations, filter sources to same user only
|
||||
if hasAutogroupSelfInDst {
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
// Instead of iterating all addresses, check each node's IPs
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcIPs, err = filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
// No valid sources after filtering, skip this rule
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
@@ -153,8 +354,17 @@ func (pol *Policy) compileSSHPolicy(
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(rule.Users))
|
||||
for _, user := range rule.Users {
|
||||
userMap[user.String()] = "="
|
||||
if rule.Users.ContainsNonRoot() {
|
||||
userMap["*"] = "="
|
||||
|
||||
// by default, we do not allow root unless explicitly stated
|
||||
userMap["root"] = ""
|
||||
}
|
||||
if rule.Users.ContainsRoot() {
|
||||
userMap["root"] = "root"
|
||||
}
|
||||
for _, u := range rule.Users.NormalUsers() {
|
||||
userMap[u.String()] = u.String()
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -37,6 +38,10 @@ type PolicyManager struct {
|
||||
|
||||
// Lazy map of SSH policies
|
||||
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
|
||||
|
||||
// Lazy map of per-node filter rules (when autogroup:self is used)
|
||||
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
|
||||
usesAutogroupSelf bool
|
||||
}
|
||||
|
||||
// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.
|
||||
@@ -49,10 +54,12 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
|
||||
}
|
||||
|
||||
pm := PolicyManager{
|
||||
pol: policy,
|
||||
users: users,
|
||||
nodes: nodes,
|
||||
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
|
||||
pol: policy,
|
||||
users: users,
|
||||
nodes: nodes,
|
||||
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
|
||||
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
|
||||
usesAutogroupSelf: policy.usesAutogroupSelf(),
|
||||
}
|
||||
|
||||
_, err = pm.updateLocked()
|
||||
@@ -71,14 +78,31 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
// policies for nodes that have changed. Particularly if the only difference is
|
||||
// that nodes has been added or removed.
|
||||
clear(pm.sshPolicyMap)
|
||||
clear(pm.filterRulesMap)
|
||||
|
||||
filter, err := pm.pol.compileFilterRules(pm.users, pm.nodes)
|
||||
// Check if policy uses autogroup:self
|
||||
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
|
||||
|
||||
var filter []tailcfg.FilterRule
|
||||
|
||||
var err error
|
||||
|
||||
// Standard compilation for all policies
|
||||
filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("compiling filter rules: %w", err)
|
||||
}
|
||||
|
||||
filterHash := deephash.Hash(&filter)
|
||||
filterChanged := filterHash != pm.filterHash
|
||||
if filterChanged {
|
||||
log.Debug().
|
||||
Str("filter.hash.old", pm.filterHash.String()[:8]).
|
||||
Str("filter.hash.new", filterHash.String()[:8]).
|
||||
Int("filter.rules", len(pm.filter)).
|
||||
Int("filter.rules.new", len(filter)).
|
||||
Msg("Policy filter hash changed")
|
||||
}
|
||||
pm.filter = filter
|
||||
pm.filterHash = filterHash
|
||||
if filterChanged {
|
||||
@@ -95,6 +119,14 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
|
||||
tagOwnerMapHash := deephash.Hash(&tagMap)
|
||||
tagOwnerChanged := tagOwnerMapHash != pm.tagOwnerMapHash
|
||||
if tagOwnerChanged {
|
||||
log.Debug().
|
||||
Str("tagOwner.hash.old", pm.tagOwnerMapHash.String()[:8]).
|
||||
Str("tagOwner.hash.new", tagOwnerMapHash.String()[:8]).
|
||||
Int("tagOwners.old", len(pm.tagOwnerMap)).
|
||||
Int("tagOwners.new", len(tagMap)).
|
||||
Msg("Tag owner hash changed")
|
||||
}
|
||||
pm.tagOwnerMap = tagMap
|
||||
pm.tagOwnerMapHash = tagOwnerMapHash
|
||||
|
||||
@@ -105,19 +137,42 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
|
||||
autoApproveMapHash := deephash.Hash(&autoMap)
|
||||
autoApproveChanged := autoApproveMapHash != pm.autoApproveMapHash
|
||||
if autoApproveChanged {
|
||||
log.Debug().
|
||||
Str("autoApprove.hash.old", pm.autoApproveMapHash.String()[:8]).
|
||||
Str("autoApprove.hash.new", autoApproveMapHash.String()[:8]).
|
||||
Int("autoApprovers.old", len(pm.autoApproveMap)).
|
||||
Int("autoApprovers.new", len(autoMap)).
|
||||
Msg("Auto-approvers hash changed")
|
||||
}
|
||||
pm.autoApproveMap = autoMap
|
||||
pm.autoApproveMapHash = autoApproveMapHash
|
||||
|
||||
exitSetHash := deephash.Hash(&autoMap)
|
||||
exitSetHash := deephash.Hash(&exitSet)
|
||||
exitSetChanged := exitSetHash != pm.exitSetHash
|
||||
if exitSetChanged {
|
||||
log.Debug().
|
||||
Str("exitSet.hash.old", pm.exitSetHash.String()[:8]).
|
||||
Str("exitSet.hash.new", exitSetHash.String()[:8]).
|
||||
Msg("Exit node set hash changed")
|
||||
}
|
||||
pm.exitSet = exitSet
|
||||
pm.exitSetHash = exitSetHash
|
||||
|
||||
// If neither of the calculated values changed, no need to update nodes
|
||||
if !filterChanged && !tagOwnerChanged && !autoApproveChanged && !exitSetChanged {
|
||||
log.Trace().
|
||||
Msg("Policy evaluation detected no changes - all hashes match")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Bool("filter.changed", filterChanged).
|
||||
Bool("tagOwners.changed", tagOwnerChanged).
|
||||
Bool("autoApprovers.changed", autoApproveChanged).
|
||||
Bool("exitNodes.changed", exitSetChanged).
|
||||
Msg("Policy changes require node updates")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -151,6 +206,16 @@ func (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// Log policy metadata for debugging
|
||||
log.Debug().
|
||||
Int("policy.bytes", len(polB)).
|
||||
Int("acls.count", len(pol.ACLs)).
|
||||
Int("groups.count", len(pol.Groups)).
|
||||
Int("hosts.count", len(pol.Hosts)).
|
||||
Int("tagOwners.count", len(pol.TagOwners)).
|
||||
Int("autoApprovers.routes.count", len(pol.AutoApprovers.Routes)).
|
||||
Msg("Policy parsed successfully")
|
||||
|
||||
pm.pol = pol
|
||||
|
||||
return pm.updateLocked()
|
||||
@@ -168,6 +233,35 @@ func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
|
||||
return pm.filter, pm.matchers
|
||||
}
|
||||
|
||||
// FilterForNode returns the filter rules for a specific node.
|
||||
// If the policy uses autogroup:self, this returns node-specific rules for security.
|
||||
// Otherwise, it returns the global filter rules for efficiency.
|
||||
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
|
||||
if pm == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
if !pm.usesAutogroupSelf {
|
||||
return pm.filter, nil
|
||||
}
|
||||
|
||||
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
|
||||
}
|
||||
|
||||
pm.filterRulesMap[node.ID()] = rules
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// SetUsers updates the users in the policy manager and updates the filter rules.
|
||||
func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
|
||||
if pm == nil {
|
||||
@@ -178,7 +272,23 @@ func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
|
||||
defer pm.mu.Unlock()
|
||||
pm.users = users
|
||||
|
||||
return pm.updateLocked()
|
||||
// Clear SSH policy map when users change to force SSH policy recomputation
|
||||
// This ensures that if SSH policy compilation previously failed due to missing users,
|
||||
// it will be retried with the new user list
|
||||
clear(pm.sshPolicyMap)
|
||||
|
||||
changed, err := pm.updateLocked()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If SSH policies exist, force a policy change when users are updated
|
||||
// This ensures nodes get updated SSH policies even if other policy hashes didn't change
|
||||
if pm.pol != nil && pm.pol.SSHs != nil && len(pm.pol.SSHs) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// SetNodes updates the nodes in the policy manager and updates the filter rules.
|
||||
@@ -189,6 +299,20 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// Clear cache based on what actually changed
|
||||
if pm.usesAutogroupSelf {
|
||||
// For autogroup:self, we need granular invalidation since rules depend on:
|
||||
// - User ownership (node.User().ID)
|
||||
// - Tag status (node.IsTagged())
|
||||
// - IP addresses (node.IPs())
|
||||
// - Node existence (added/removed)
|
||||
pm.invalidateAutogroupSelfCache(pm.nodes, nodes)
|
||||
} else {
|
||||
// For non-autogroup:self policies, we can clear everything
|
||||
clear(pm.filterRulesMap)
|
||||
}
|
||||
|
||||
pm.nodes = nodes
|
||||
|
||||
return pm.updateLocked()
|
||||
@@ -239,8 +363,9 @@ func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Pr
|
||||
// The fast path is that a node requests to approve a prefix
|
||||
// where there is an exact entry, e.g. 10.0.0.0/8, then
|
||||
// check and return quickly
|
||||
if _, ok := pm.autoApproveMap[route]; ok {
|
||||
if slices.ContainsFunc(node.IPs(), pm.autoApproveMap[route].Contains) {
|
||||
if approvers, ok := pm.autoApproveMap[route]; ok {
|
||||
canApprove := slices.ContainsFunc(node.IPs(), approvers.Contains)
|
||||
if canApprove {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -253,7 +378,8 @@ func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Pr
|
||||
// Check if prefix is larger (so containing) and then overlaps
|
||||
// the route to see if the node can approve a subset of an autoapprover
|
||||
if prefix.Bits() <= route.Bits() && prefix.Overlaps(route) {
|
||||
if slices.ContainsFunc(node.IPs(), approveAddrs.Contains) {
|
||||
canApprove := slices.ContainsFunc(node.IPs(), approveAddrs.Contains)
|
||||
if canApprove {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -331,3 +457,113 @@ func (pm *PolicyManager) DebugString() string {
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be
|
||||
// invalidated when using autogroup:self policies. This is much more efficient than clearing
|
||||
// the entire cache.
|
||||
func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) {
|
||||
// Build maps for efficient lookup
|
||||
oldNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range oldNodes.All() {
|
||||
oldNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
newNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range newNodes.All() {
|
||||
newNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
// Track which users are affected by changes
|
||||
affectedUsers := make(map[uint]struct{})
|
||||
|
||||
// Check for removed nodes
|
||||
for nodeID, oldNode := range oldNodeMap {
|
||||
if _, exists := newNodeMap[nodeID]; !exists {
|
||||
affectedUsers[oldNode.User().ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for added nodes
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if _, exists := oldNodeMap[nodeID]; !exists {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for modified nodes (user changes, tag changes, IP changes)
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if oldNode, exists := oldNodeMap[nodeID]; exists {
|
||||
// Check if user changed
|
||||
if oldNode.User().ID != newNode.User().ID {
|
||||
affectedUsers[oldNode.User().ID] = struct{}{}
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
|
||||
// Check if tag status changed
|
||||
if oldNode.IsTagged() != newNode.IsTagged() {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
|
||||
// Check if IPs changed (simple check - could be more sophisticated)
|
||||
oldIPs := oldNode.IPs()
|
||||
newIPs := newNode.IPs()
|
||||
if len(oldIPs) != len(newIPs) {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
} else {
|
||||
// Check if any IPs are different
|
||||
for i, oldIP := range oldIPs {
|
||||
if i >= len(newIPs) || oldIP != newIPs[i] {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear cache entries for affected users only
|
||||
// For autogroup:self, we need to clear all nodes belonging to affected users
|
||||
// because autogroup:self rules depend on the entire user's device set
|
||||
for nodeID := range pm.filterRulesMap {
|
||||
// Find the user for this cached node
|
||||
var nodeUserID uint
|
||||
found := false
|
||||
|
||||
// Check in new nodes first
|
||||
for _, node := range newNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
nodeUserID = node.User().ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If not found in new nodes, check old nodes
|
||||
if !found {
|
||||
for _, node := range oldNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
nodeUserID = node.User().ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found the user and they're affected, clear this cache entry
|
||||
if found {
|
||||
if _, affected := affectedUsers[nodeUserID]; affected {
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
} else {
|
||||
// Node not found in either old or new list, clear it
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
if len(affectedUsers) > 0 {
|
||||
log.Debug().
|
||||
Int("affected_users", len(affectedUsers)).
|
||||
Int("remaining_cache_entries", len(pm.filterRulesMap)).
|
||||
Msg("Selectively cleared autogroup:self cache for affected users")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,8 +40,8 @@ func TestPolicyManager(t *testing.T) {
|
||||
name: "empty-policy",
|
||||
pol: "{}",
|
||||
nodes: types.Nodes{},
|
||||
wantFilter: nil,
|
||||
wantMatchers: []matcher.Match{},
|
||||
wantFilter: tailcfg.FilterAllowAll,
|
||||
wantMatchers: matcher.MatchesFromFilterRules(tailcfg.FilterAllowAll),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -66,3 +66,141 @@ func TestPolicyManager(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
|
||||
}
|
||||
|
||||
policy := `{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
initialNodes := types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
}
|
||||
|
||||
for i, n := range initialNodes {
|
||||
n.ID = types.NodeID(i + 1)
|
||||
}
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add to cache by calling FilterForNode for each node
|
||||
for _, n := range initialNodes {
|
||||
_, err := pm.FilterForNode(n.View())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, len(initialNodes), len(pm.filterRulesMap))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
newNodes types.Nodes
|
||||
expectedCleared int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "no_changes",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 0,
|
||||
description: "No changes should clear no cache entries",
|
||||
},
|
||||
{
|
||||
name: "node_added",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user1-node3", "100.64.0.5", "fd7a:115c:a1e0::5", users[0], nil), // New node
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's existing nodes should be cleared
|
||||
description: "Adding a node should clear cache for that user's existing nodes",
|
||||
},
|
||||
{
|
||||
name: "node_removed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
// user1-node2 removed
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's remaining node + removed node should be cleared
|
||||
description: "Removing a node should clear cache for that user's remaining nodes",
|
||||
},
|
||||
{
|
||||
name: "user_changed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[2], nil), // Changed to user3
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared
|
||||
description: "Changing a node's user should clear cache for both old and new users",
|
||||
},
|
||||
{
|
||||
name: "ip_changed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.10", "fd7a:115c:a1e0::10", users[0], nil), // IP changed
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's nodes should be cleared
|
||||
description: "Changing a node's IP should clear cache for that user's nodes",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for i, n := range tt.newNodes {
|
||||
found := false
|
||||
for _, origNode := range initialNodes {
|
||||
if n.Hostname == origNode.Hostname {
|
||||
n.ID = origNode.ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
n.ID = types.NodeID(len(initialNodes) + i + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule)
|
||||
for _, n := range initialNodes {
|
||||
_, err := pm.FilterForNode(n.View())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
initialCacheSize := len(pm.filterRulesMap)
|
||||
require.Equal(t, len(initialNodes), initialCacheSize)
|
||||
|
||||
pm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice())
|
||||
|
||||
// Verify the expected number of cache entries were cleared
|
||||
finalCacheSize := len(pm.filterRulesMap)
|
||||
clearedEntries := initialCacheSize - finalCacheSize
|
||||
require.Equal(t, tt.expectedCleared, clearedEntries, tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
@@ -10,6 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-json-experiment/json"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/common/model"
|
||||
@@ -20,10 +20,20 @@ import (
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/multierr"
|
||||
"tailscale.com/util/slicesx"
|
||||
)
|
||||
|
||||
// Global JSON options for consistent parsing across all struct unmarshaling
|
||||
var policyJSONOpts = []json.Options{
|
||||
json.DefaultOptionsV2(),
|
||||
json.MatchCaseInsensitiveNames(true),
|
||||
json.RejectUnknownMembers(true),
|
||||
}
|
||||
|
||||
const Wildcard = Asterix(0)
|
||||
|
||||
var ErrAutogroupSelfRequiresPerNodeResolution = errors.New("autogroup:self requires per-node resolution and cannot be resolved in this context")
|
||||
|
||||
type Asterix int
|
||||
|
||||
func (a Asterix) Validate() error {
|
||||
@@ -477,9 +487,7 @@ const (
|
||||
AutoGroupMember AutoGroup = "autogroup:member"
|
||||
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
|
||||
AutoGroupTagged AutoGroup = "autogroup:tagged"
|
||||
|
||||
// These are not yet implemented.
|
||||
AutoGroupSelf AutoGroup = "autogroup:self"
|
||||
AutoGroupSelf AutoGroup = "autogroup:self"
|
||||
)
|
||||
|
||||
var autogroups = []AutoGroup{
|
||||
@@ -487,6 +495,7 @@ var autogroups = []AutoGroup{
|
||||
AutoGroupMember,
|
||||
AutoGroupNonRoot,
|
||||
AutoGroupTagged,
|
||||
AutoGroupSelf,
|
||||
}
|
||||
|
||||
func (ag AutoGroup) Validate() error {
|
||||
@@ -506,6 +515,10 @@ func (ag *AutoGroup) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ag AutoGroup) String() string {
|
||||
return string(ag)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the AutoGroup to JSON.
|
||||
func (ag AutoGroup) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(string(ag))
|
||||
@@ -578,6 +591,12 @@ func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[type
|
||||
|
||||
return build.IPSet()
|
||||
|
||||
case AutoGroupSelf:
|
||||
// autogroup:self represents all devices owned by the same user.
|
||||
// This cannot be resolved in the general context and should be handled
|
||||
// specially during policy compilation per-node for security.
|
||||
return nil, ErrAutogroupSelfRequiresPerNodeResolution
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown autogroup %q", ag)
|
||||
}
|
||||
@@ -609,10 +628,8 @@ type AliasWithPorts struct {
|
||||
}
|
||||
|
||||
func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {
|
||||
// TODO(kradalby): use encoding/json/v2 (go-json-experiment)
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
var v any
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -730,7 +747,7 @@ type Aliases []Alias
|
||||
|
||||
func (a *Aliases) UnmarshalJSON(b []byte) error {
|
||||
var aliases []AliasEnc
|
||||
err := json.Unmarshal(b, &aliases)
|
||||
err := json.Unmarshal(b, &aliases, policyJSONOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -820,7 +837,7 @@ type AutoApprovers []AutoApprover
|
||||
|
||||
func (aa *AutoApprovers) UnmarshalJSON(b []byte) error {
|
||||
var autoApprovers []AutoApproverEnc
|
||||
err := json.Unmarshal(b, &autoApprovers)
|
||||
err := json.Unmarshal(b, &autoApprovers, policyJSONOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -915,7 +932,7 @@ type Owners []Owner
|
||||
|
||||
func (o *Owners) UnmarshalJSON(b []byte) error {
|
||||
var owners []OwnerEnc
|
||||
err := json.Unmarshal(b, &owners)
|
||||
err := json.Unmarshal(b, &owners, policyJSONOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -989,18 +1006,46 @@ func (g Groups) Contains(group *Group) error {
|
||||
// that all group names conform to the expected format, which is always prefixed
|
||||
// with "group:". If any group name is invalid, an error is returned.
|
||||
func (g *Groups) UnmarshalJSON(b []byte) error {
|
||||
var rawGroups map[string][]string
|
||||
if err := json.Unmarshal(b, &rawGroups); err != nil {
|
||||
// First unmarshal as a generic map to validate group names first
|
||||
var rawMap map[string]interface{}
|
||||
if err := json.Unmarshal(b, &rawMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate group names first before checking data types
|
||||
for key := range rawMap {
|
||||
group := Group(key)
|
||||
if err := group.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Then validate each field can be converted to []string
|
||||
rawGroups := make(map[string][]string)
|
||||
for key, value := range rawMap {
|
||||
switch v := value.(type) {
|
||||
case []interface{}:
|
||||
// Convert []interface{} to []string
|
||||
var stringSlice []string
|
||||
for _, item := range v {
|
||||
if str, ok := item.(string); ok {
|
||||
stringSlice = append(stringSlice, str)
|
||||
} else {
|
||||
return fmt.Errorf(`Group "%s" contains invalid member type, expected string but got %T`, key, item)
|
||||
}
|
||||
}
|
||||
rawGroups[key] = stringSlice
|
||||
case string:
|
||||
return fmt.Errorf(`Group "%s" value must be an array of users, got string: "%s"`, key, v)
|
||||
default:
|
||||
return fmt.Errorf(`Group "%s" value must be an array of users, got %T`, key, v)
|
||||
}
|
||||
}
|
||||
|
||||
*g = make(Groups)
|
||||
for key, value := range rawGroups {
|
||||
group := Group(key)
|
||||
if err := group.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Group name already validated above
|
||||
var usernames Usernames
|
||||
|
||||
for _, u := range value {
|
||||
@@ -1026,7 +1071,7 @@ type Hosts map[Host]Prefix
|
||||
|
||||
func (h *Hosts) UnmarshalJSON(b []byte) error {
|
||||
var rawHosts map[string]string
|
||||
if err := json.Unmarshal(b, &rawHosts); err != nil {
|
||||
if err := json.Unmarshal(b, &rawHosts, policyJSONOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1237,13 +1282,290 @@ func resolveAutoApprovers(p *Policy, users types.Users, nodes views.Slice[types.
|
||||
return ret, exitNodeSet, nil
|
||||
}
|
||||
|
||||
// Action represents the action to take for an ACL rule.
|
||||
type Action string
|
||||
|
||||
const (
|
||||
ActionAccept Action = "accept"
|
||||
)
|
||||
|
||||
// SSHAction represents the action to take for an SSH rule.
|
||||
type SSHAction string
|
||||
|
||||
const (
|
||||
SSHActionAccept SSHAction = "accept"
|
||||
SSHActionCheck SSHAction = "check"
|
||||
)
|
||||
|
||||
// String returns the string representation of the Action.
|
||||
func (a Action) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements JSON unmarshaling for Action.
|
||||
func (a *Action) UnmarshalJSON(b []byte) error {
|
||||
str := strings.Trim(string(b), `"`)
|
||||
switch str {
|
||||
case "accept":
|
||||
*a = ActionAccept
|
||||
default:
|
||||
return fmt.Errorf("invalid action %q, must be %q", str, ActionAccept)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements JSON marshaling for Action.
|
||||
func (a Action) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(string(a))
|
||||
}
|
||||
|
||||
// String returns the string representation of the SSHAction.
|
||||
func (a SSHAction) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements JSON unmarshaling for SSHAction.
|
||||
func (a *SSHAction) UnmarshalJSON(b []byte) error {
|
||||
str := strings.Trim(string(b), `"`)
|
||||
switch str {
|
||||
case "accept":
|
||||
*a = SSHActionAccept
|
||||
case "check":
|
||||
*a = SSHActionCheck
|
||||
default:
|
||||
return fmt.Errorf("invalid SSH action %q, must be one of: accept, check", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements JSON marshaling for SSHAction.
|
||||
func (a SSHAction) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(string(a))
|
||||
}
|
||||
|
||||
// Protocol represents a network protocol with its IANA number and descriptions.
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
ProtocolICMP Protocol = "icmp"
|
||||
ProtocolIGMP Protocol = "igmp"
|
||||
ProtocolIPv4 Protocol = "ipv4"
|
||||
ProtocolIPInIP Protocol = "ip-in-ip"
|
||||
ProtocolTCP Protocol = "tcp"
|
||||
ProtocolEGP Protocol = "egp"
|
||||
ProtocolIGP Protocol = "igp"
|
||||
ProtocolUDP Protocol = "udp"
|
||||
ProtocolGRE Protocol = "gre"
|
||||
ProtocolESP Protocol = "esp"
|
||||
ProtocolAH Protocol = "ah"
|
||||
ProtocolIPv6ICMP Protocol = "ipv6-icmp"
|
||||
ProtocolSCTP Protocol = "sctp"
|
||||
ProtocolFC Protocol = "fc"
|
||||
ProtocolWildcard Protocol = "*"
|
||||
)
|
||||
|
||||
// String returns the string representation of the Protocol.
|
||||
func (p Protocol) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
// Description returns the human-readable description of the Protocol.
|
||||
func (p Protocol) Description() string {
|
||||
switch p {
|
||||
case ProtocolICMP:
|
||||
return "Internet Control Message Protocol"
|
||||
case ProtocolIGMP:
|
||||
return "Internet Group Management Protocol"
|
||||
case ProtocolIPv4:
|
||||
return "IPv4 encapsulation"
|
||||
case ProtocolTCP:
|
||||
return "Transmission Control Protocol"
|
||||
case ProtocolEGP:
|
||||
return "Exterior Gateway Protocol"
|
||||
case ProtocolIGP:
|
||||
return "Interior Gateway Protocol"
|
||||
case ProtocolUDP:
|
||||
return "User Datagram Protocol"
|
||||
case ProtocolGRE:
|
||||
return "Generic Routing Encapsulation"
|
||||
case ProtocolESP:
|
||||
return "Encapsulating Security Payload"
|
||||
case ProtocolAH:
|
||||
return "Authentication Header"
|
||||
case ProtocolIPv6ICMP:
|
||||
return "Internet Control Message Protocol for IPv6"
|
||||
case ProtocolSCTP:
|
||||
return "Stream Control Transmission Protocol"
|
||||
case ProtocolFC:
|
||||
return "Fibre Channel"
|
||||
case ProtocolWildcard:
|
||||
return "Wildcard (not supported - use specific protocol)"
|
||||
default:
|
||||
return "Unknown Protocol"
|
||||
}
|
||||
}
|
||||
|
||||
// parseProtocol converts a Protocol to its IANA protocol numbers and wildcard requirement.
|
||||
// Since validation happens during UnmarshalJSON, this method should not fail for valid Protocol values.
|
||||
func (p Protocol) parseProtocol() ([]int, bool) {
|
||||
switch p {
|
||||
case "":
|
||||
// Empty protocol applies to TCP and UDP traffic only
|
||||
return []int{protocolTCP, protocolUDP}, false
|
||||
case ProtocolWildcard:
|
||||
// Wildcard protocol - defensive handling (should not reach here due to validation)
|
||||
return nil, false
|
||||
case ProtocolIGMP:
|
||||
return []int{protocolIGMP}, true
|
||||
case ProtocolIPv4, ProtocolIPInIP:
|
||||
return []int{protocolIPv4}, true
|
||||
case ProtocolTCP:
|
||||
return []int{protocolTCP}, false
|
||||
case ProtocolEGP:
|
||||
return []int{protocolEGP}, true
|
||||
case ProtocolIGP:
|
||||
return []int{protocolIGP}, true
|
||||
case ProtocolUDP:
|
||||
return []int{protocolUDP}, false
|
||||
case ProtocolGRE:
|
||||
return []int{protocolGRE}, true
|
||||
case ProtocolESP:
|
||||
return []int{protocolESP}, true
|
||||
case ProtocolAH:
|
||||
return []int{protocolAH}, true
|
||||
case ProtocolSCTP:
|
||||
return []int{protocolSCTP}, false
|
||||
case ProtocolICMP:
|
||||
return []int{protocolICMP, protocolIPv6ICMP}, true
|
||||
default:
|
||||
// Try to parse as a numeric protocol number
|
||||
// This should not fail since validation happened during unmarshaling
|
||||
protocolNumber, _ := strconv.Atoi(string(p))
|
||||
|
||||
// Determine if wildcard is needed based on protocol number
|
||||
needsWildcard := protocolNumber != protocolTCP &&
|
||||
protocolNumber != protocolUDP &&
|
||||
protocolNumber != protocolSCTP
|
||||
|
||||
return []int{protocolNumber}, needsWildcard
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements JSON unmarshaling for Protocol.
|
||||
func (p *Protocol) UnmarshalJSON(b []byte) error {
|
||||
str := strings.Trim(string(b), `"`)
|
||||
|
||||
// Normalize to lowercase for case-insensitive matching
|
||||
*p = Protocol(strings.ToLower(str))
|
||||
|
||||
// Validate the protocol
|
||||
if err := p.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate checks if the Protocol is valid.
|
||||
func (p Protocol) validate() error {
|
||||
switch p {
|
||||
case "", ProtocolICMP, ProtocolIGMP, ProtocolIPv4, ProtocolIPInIP,
|
||||
ProtocolTCP, ProtocolEGP, ProtocolIGP, ProtocolUDP, ProtocolGRE,
|
||||
ProtocolESP, ProtocolAH, ProtocolSCTP:
|
||||
return nil
|
||||
case ProtocolWildcard:
|
||||
// Wildcard "*" is not allowed - Tailscale rejects it
|
||||
return fmt.Errorf("proto name \"*\" not known; use protocol number 0-255 or protocol name (icmp, tcp, udp, etc.)")
|
||||
default:
|
||||
// Try to parse as a numeric protocol number
|
||||
str := string(p)
|
||||
|
||||
// Check for leading zeros (not allowed by Tailscale)
|
||||
if str == "0" || (len(str) > 1 && str[0] == '0') {
|
||||
return fmt.Errorf("leading 0 not permitted in protocol number \"%s\"", str)
|
||||
}
|
||||
|
||||
protocolNumber, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid protocol %q: must be a known protocol name or valid protocol number 0-255", p)
|
||||
}
|
||||
|
||||
if protocolNumber < 0 || protocolNumber > 255 {
|
||||
return fmt.Errorf("protocol number %d out of range (0-255)", protocolNumber)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements JSON marshaling for Protocol.
|
||||
func (p Protocol) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(string(p))
|
||||
}
|
||||
|
||||
// Protocol constants matching the IANA numbers
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
protocolIGMP = 2 // Internet Group Management
|
||||
protocolIPv4 = 4 // IPv4 encapsulation
|
||||
protocolTCP = 6 // Transmission Control
|
||||
protocolEGP = 8 // Exterior Gateway Protocol
|
||||
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
protocolUDP = 17 // User Datagram
|
||||
protocolGRE = 47 // Generic Routing Encapsulation
|
||||
protocolESP = 50 // Encap Security Payload
|
||||
protocolAH = 51 // Authentication Header
|
||||
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
protocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
type ACL struct {
|
||||
Action string `json:"action"` // TODO(kradalby): add strict type
|
||||
Protocol string `json:"proto"` // TODO(kradalby): add strict type
|
||||
Action Action `json:"action"`
|
||||
Protocol Protocol `json:"proto"`
|
||||
Sources Aliases `json:"src"`
|
||||
Destinations []AliasWithPorts `json:"dst"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom unmarshalling for ACL that ignores fields starting with '#'.
|
||||
// headscale-admin uses # in some field names to add metadata, so we will ignore
|
||||
// those to ensure it doesnt break.
|
||||
// https://github.com/GoodiesHQ/headscale-admin/blob/214a44a9c15c92d2b42383f131b51df10c84017c/src/lib/common/acl.svelte.ts#L38
|
||||
func (a *ACL) UnmarshalJSON(b []byte) error {
|
||||
// First unmarshal into a map to filter out comment fields
|
||||
var raw map[string]any
|
||||
if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove any fields that start with '#'
|
||||
filtered := make(map[string]any)
|
||||
for key, value := range raw {
|
||||
if !strings.HasPrefix(key, "#") {
|
||||
filtered[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal the filtered map back to JSON
|
||||
filteredBytes, err := json.Marshal(filtered)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a type alias to avoid infinite recursion
|
||||
type aclAlias ACL
|
||||
var temp aclAlias
|
||||
|
||||
// Unmarshal into the temporary struct using the v2 JSON options
|
||||
if err := json.Unmarshal(filteredBytes, &temp, policyJSONOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the result back to the original struct
|
||||
*a = ACL(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Policy represents a Tailscale Network Policy.
|
||||
// TODO(kradalby):
|
||||
// Add validation method checking:
|
||||
@@ -1261,7 +1583,7 @@ type Policy struct {
|
||||
Hosts Hosts `json:"hosts,omitempty"`
|
||||
TagOwners TagOwners `json:"tagOwners,omitempty"`
|
||||
ACLs []ACL `json:"acls,omitempty"`
|
||||
AutoApprovers AutoApproverPolicy `json:"autoApprovers,omitempty"`
|
||||
AutoApprovers AutoApproverPolicy `json:"autoApprovers"`
|
||||
SSHs []SSH `json:"ssh,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1271,11 +1593,11 @@ type Policy struct {
|
||||
var (
|
||||
// TODO(kradalby): Add these checks for tagOwners and autoApprovers.
|
||||
autogroupForSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
|
||||
autogroupForSSHSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
|
||||
autogroupForSSHUser = []AutoGroup{AutoGroupNonRoot}
|
||||
autogroupNotSupported = []AutoGroup{AutoGroupSelf}
|
||||
autogroupNotSupported = []AutoGroup{}
|
||||
)
|
||||
|
||||
func validateAutogroupSupported(ag *AutoGroup) error {
|
||||
@@ -1299,6 +1621,10 @@ func validateAutogroupForSrc(src *AutoGroup) error {
|
||||
return errors.New(`"autogroup:internet" used in source, it can only be used in ACL destinations`)
|
||||
}
|
||||
|
||||
if src.Is(AutoGroupSelf) {
|
||||
return errors.New(`"autogroup:self" used in source, it can only be used in ACL destinations`)
|
||||
}
|
||||
|
||||
if !slices.Contains(autogroupForSrc, *src) {
|
||||
return fmt.Errorf("autogroup %q is not supported for ACL sources, can be %v", *src, autogroupForSrc)
|
||||
}
|
||||
@@ -1439,13 +1765,14 @@ func (p *Policy) validate() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate protocol-port compatibility
|
||||
if err := validateProtocolPortCompatibility(acl.Protocol, acl.Destinations); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ssh := range p.SSHs {
|
||||
if ssh.Action != "accept" && ssh.Action != "check" {
|
||||
errs = append(errs, fmt.Errorf("SSH action %q is not valid, must be accept or check", ssh.Action))
|
||||
}
|
||||
|
||||
for _, user := range ssh.Users {
|
||||
if strings.HasPrefix(string(user), "autogroup:") {
|
||||
maybeAuto := AutoGroup(user)
|
||||
@@ -1559,10 +1886,10 @@ func (p *Policy) validate() error {
|
||||
|
||||
// SSH controls who can ssh into which machines.
|
||||
type SSH struct {
|
||||
Action string `json:"action"`
|
||||
Action SSHAction `json:"action"`
|
||||
Sources SSHSrcAliases `json:"src"`
|
||||
Destinations SSHDstAliases `json:"dst"`
|
||||
Users []SSHUser `json:"users"`
|
||||
Users SSHUsers `json:"users"`
|
||||
CheckPeriod model.Duration `json:"checkPeriod,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1590,7 +1917,7 @@ func (g Groups) MarshalJSON() ([]byte, error) {
|
||||
|
||||
func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {
|
||||
var aliases []AliasEnc
|
||||
err := json.Unmarshal(b, &aliases)
|
||||
err := json.Unmarshal(b, &aliases, policyJSONOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1613,7 +1940,7 @@ func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {
|
||||
|
||||
func (a *SSHDstAliases) UnmarshalJSON(b []byte) error {
|
||||
var aliases []AliasEnc
|
||||
err := json.Unmarshal(b, &aliases)
|
||||
err := json.Unmarshal(b, &aliases, policyJSONOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1715,6 +2042,22 @@ func (a SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes views.Slice[t
|
||||
// It can be a list of usernames, tags or autogroups.
|
||||
type SSHDstAliases []Alias
|
||||
|
||||
type SSHUsers []SSHUser
|
||||
|
||||
func (u SSHUsers) ContainsRoot() bool {
|
||||
return slices.Contains(u, "root")
|
||||
}
|
||||
|
||||
func (u SSHUsers) ContainsNonRoot() bool {
|
||||
return slices.Contains(u, SSHUser(AutoGroupNonRoot))
|
||||
}
|
||||
|
||||
func (u SSHUsers) NormalUsers() []SSHUser {
|
||||
return slicesx.Filter(nil, u, func(user SSHUser) bool {
|
||||
return user != "root" && user != SSHUser(AutoGroupNonRoot)
|
||||
})
|
||||
}
|
||||
|
||||
type SSHUser string
|
||||
|
||||
func (u SSHUser) String() string {
|
||||
@@ -1741,9 +2084,13 @@ func unmarshalPolicy(b []byte) (*Policy, error) {
|
||||
}
|
||||
|
||||
ast.Standardize()
|
||||
acl := ast.Pack()
|
||||
|
||||
if err = json.Unmarshal(acl, &policy); err != nil {
|
||||
if err = json.Unmarshal(ast.Pack(), &policy, policyJSONOpts...); err != nil {
|
||||
var serr *json.SemanticError
|
||||
if errors.As(err, &serr) && serr.Err == json.ErrUnknownName {
|
||||
ptr := serr.JSONPointer
|
||||
name := ptr.LastToken()
|
||||
return nil, fmt.Errorf("unknown field %q", name)
|
||||
}
|
||||
return nil, fmt.Errorf("parsing policy from bytes: %w", err)
|
||||
}
|
||||
|
||||
@@ -1754,6 +2101,62 @@ func unmarshalPolicy(b []byte) (*Policy, error) {
|
||||
return &policy, nil
|
||||
}
|
||||
|
||||
const (
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
// validateProtocolPortCompatibility checks that only TCP, UDP, and SCTP protocols
|
||||
// can have specific ports. All other protocols should only use wildcard ports.
|
||||
func validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWithPorts) error {
|
||||
// Only TCP, UDP, and SCTP support specific ports
|
||||
supportsSpecificPorts := protocol == ProtocolTCP || protocol == ProtocolUDP || protocol == ProtocolSCTP || protocol == ""
|
||||
|
||||
if supportsSpecificPorts {
|
||||
return nil // No validation needed for these protocols
|
||||
}
|
||||
|
||||
// For all other protocols, check that all destinations use wildcard ports
|
||||
for _, dst := range destinations {
|
||||
for _, portRange := range dst.Ports {
|
||||
// Check if it's not a wildcard port (0-65535)
|
||||
if !(portRange.First == 0 && portRange.Last == 65535) {
|
||||
return fmt.Errorf("protocol %q does not support specific ports; only \"*\" is allowed", protocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules.
|
||||
func (p *Policy) usesAutogroupSelf() bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check ACL rules
|
||||
for _, acl := range p.ACLs {
|
||||
for _, src := range acl.Sources {
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check SSH rules
|
||||
for _, ssh := range p.SSHs {
|
||||
for _, src := range ssh.Sources {
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dest := range ssh.Destinations {
|
||||
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -348,6 +348,26 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2652-asterix-error-better-explain",
|
||||
input: `
|
||||
{
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"*"
|
||||
],
|
||||
"dst": [
|
||||
"*"
|
||||
],
|
||||
"users": ["root"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: "alias v2.Asterix is not supported for SSH source",
|
||||
},
|
||||
{
|
||||
name: "invalid-username",
|
||||
input: `
|
||||
@@ -439,7 +459,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
],
|
||||
}
|
||||
`,
|
||||
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged]`,
|
||||
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`,
|
||||
},
|
||||
{
|
||||
name: "undefined-hostname-errors-2490",
|
||||
@@ -956,6 +976,500 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
`,
|
||||
wantErr: `first port must be >0, or use '*' for wildcard`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields",
|
||||
input: `
|
||||
{
|
||||
// rules doesnt exists, we have "acls"
|
||||
"rules": [
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "rules"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-nested",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{ "action": "accept", "BAD": ["FOO:BAR:FOO:BAR"], "NOT": ["BAD:BAD:BAD:BAD"] }
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field`,
|
||||
},
|
||||
{
|
||||
name: "invalid-group-name",
|
||||
input: `
|
||||
{
|
||||
"groups": {
|
||||
"group:test": ["user@example.com"],
|
||||
"INVALID_GROUP_FIELD": ["user@example.com"]
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `Group has to start with "group:", got: "INVALID_GROUP_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "invalid-group-datatype",
|
||||
input: `
|
||||
{
|
||||
"groups": {
|
||||
"group:test": ["user@example.com"],
|
||||
"group:invalid": "should fail"
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `Group "group:invalid" value must be an array of users, got string: "should fail"`,
|
||||
},
|
||||
{
|
||||
name: "invalid-group-name-and-datatype-fails-on-name-first",
|
||||
input: `
|
||||
{
|
||||
"groups": {
|
||||
"group:test": ["user@example.com"],
|
||||
"INVALID_GROUP_FIELD": "should fail"
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `Group has to start with "group:", got: "INVALID_GROUP_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-hosts-level",
|
||||
input: `
|
||||
{
|
||||
"hosts": {
|
||||
"host1": "10.0.0.1",
|
||||
"INVALID_HOST_FIELD": "should fail"
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `Hostname "INVALID_HOST_FIELD" contains an invalid IP address: "should fail"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-tagowners-level",
|
||||
input: `
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:test": ["user@example.com"],
|
||||
"INVALID_TAG_FIELD": "should fail"
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `tag has to start with "tag:", got: "INVALID_TAG_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-acls-level",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"],
|
||||
"INVALID_ACL_FIELD": "should fail"
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "INVALID_ACL_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-ssh-level",
|
||||
input: `
|
||||
{
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["user@example.com"],
|
||||
"dst": ["user@example.com"],
|
||||
"users": ["root"],
|
||||
"INVALID_SSH_FIELD": "should fail"
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "INVALID_SSH_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-policy-level",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
],
|
||||
"INVALID_POLICY_FIELD": "should fail at policy level"
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "INVALID_POLICY_FIELD"`,
|
||||
},
|
||||
{
|
||||
name: "disallow-unsupported-fields-autoapprovers-level",
|
||||
input: `
|
||||
{
|
||||
"autoApprovers": {
|
||||
"routes": {
|
||||
"10.0.0.0/8": ["user@example.com"]
|
||||
},
|
||||
"exitNode": ["user@example.com"],
|
||||
"INVALID_AUTO_APPROVER_FIELD": "should fail"
|
||||
}
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "INVALID_AUTO_APPROVER_FIELD"`,
|
||||
},
|
||||
// headscale-admin uses # in some field names to add metadata, so we will ignore
|
||||
// those to ensure it doesnt break.
|
||||
// https://github.com/GoodiesHQ/headscale-admin/blob/214a44a9c15c92d2b42383f131b51df10c84017c/src/lib/common/acl.svelte.ts#L38
|
||||
{
|
||||
name: "hash-fields-are-allowed-but-ignored",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"#ha-test": "SOME VALUE",
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"10.0.0.1"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: Aliases{
|
||||
pp("10.0.0.1/32"),
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: ptr.To(AutoGroup("autogroup:internet")),
|
||||
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ssh-asterix-invalid-acl-input",
|
||||
input: `
|
||||
{
|
||||
"ssh": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"user@example.com"
|
||||
],
|
||||
"dst": [
|
||||
"user@example.com"
|
||||
],
|
||||
"users": ["root"],
|
||||
"proto": "tcp"
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `unknown field "proto"`,
|
||||
},
|
||||
{
|
||||
name: "protocol-wildcard-not-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "*",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `proto name "*" not known; use protocol number 0-255 or protocol name (icmp, tcp, udp, etc.)`,
|
||||
},
|
||||
{
|
||||
name: "protocol-case-insensitive-uppercase",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "ICMP",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "icmp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-case-insensitive-mixed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "IcmP",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "icmp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-leading-zero-not-permitted",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "0",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `leading 0 not permitted in protocol number "0"`,
|
||||
},
|
||||
{
|
||||
name: "protocol-empty-applies-to-tcp-udp-only",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["*"],
|
||||
"dst": ["*:80"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-icmp-with-specific-port-not-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "icmp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:80"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `protocol "icmp" does not support specific ports; only "*" is allowed`,
|
||||
},
|
||||
{
|
||||
name: "protocol-icmp-with-wildcard-port-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "icmp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "icmp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-gre-with-specific-port-not-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "gre",
|
||||
"src": ["*"],
|
||||
"dst": ["*:443"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
wantErr: `protocol "gre" does not support specific ports; only "*" is allowed`,
|
||||
},
|
||||
{
|
||||
name: "protocol-tcp-with-specific-port-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:80"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "tcp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-udp-with-specific-port-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "udp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:53"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "udp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{{First: 53, Last: 53}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "protocol-sctp-with-specific-port-allowed",
|
||||
input: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "sctp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:9000"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
want: &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Protocol: "sctp",
|
||||
Sources: Aliases{
|
||||
Wildcard,
|
||||
},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: Wildcard,
|
||||
Ports: []tailcfg.PortRange{{First: 9000, Last: 9000}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmps := append(util.Comparers,
|
||||
@@ -1367,6 +1881,38 @@ func TestResolvePolicy(t *testing.T) {
|
||||
mp("100.100.101.7/32"), // Multiple forced tags
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "autogroup-self",
|
||||
toResolve: ptr.To(AutoGroupSelf),
|
||||
nodes: types.Nodes{
|
||||
{
|
||||
User: users["testuser"],
|
||||
IPv4: ap("100.100.101.1"),
|
||||
},
|
||||
{
|
||||
User: users["testuser2"],
|
||||
IPv4: ap("100.100.101.2"),
|
||||
},
|
||||
{
|
||||
User: users["testuser"],
|
||||
ForcedTags: []string{"tag:test"},
|
||||
IPv4: ap("100.100.101.3"),
|
||||
},
|
||||
{
|
||||
User: users["testuser2"],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RequestTags: []string{"tag:test"},
|
||||
},
|
||||
IPv4: ap("100.100.101.4"),
|
||||
},
|
||||
},
|
||||
pol: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:test"): Owners{ptr.To(Username("testuser@"))},
|
||||
},
|
||||
},
|
||||
wantErr: "autogroup:self requires per-node resolution",
|
||||
},
|
||||
{
|
||||
name: "autogroup-invalid",
|
||||
toResolve: ptr.To(AutoGroup("autogroup:invalid")),
|
||||
@@ -1579,6 +2125,134 @@ func TestResolveAutoApprovers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHUsers_NormalUsers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
users SSHUsers
|
||||
expected []SSHUser
|
||||
}{
|
||||
{
|
||||
name: "empty users",
|
||||
users: SSHUsers{},
|
||||
expected: []SSHUser{},
|
||||
},
|
||||
{
|
||||
name: "only root",
|
||||
users: SSHUsers{"root"},
|
||||
expected: []SSHUser{},
|
||||
},
|
||||
{
|
||||
name: "only autogroup:nonroot",
|
||||
users: SSHUsers{SSHUser(AutoGroupNonRoot)},
|
||||
expected: []SSHUser{},
|
||||
},
|
||||
{
|
||||
name: "only normal user",
|
||||
users: SSHUsers{"ssh-it-user"},
|
||||
expected: []SSHUser{"ssh-it-user"},
|
||||
},
|
||||
{
|
||||
name: "multiple normal users",
|
||||
users: SSHUsers{"ubuntu", "admin", "user1"},
|
||||
expected: []SSHUser{"ubuntu", "admin", "user1"},
|
||||
},
|
||||
{
|
||||
name: "mixed users with root",
|
||||
users: SSHUsers{"ubuntu", "root", "admin"},
|
||||
expected: []SSHUser{"ubuntu", "admin"},
|
||||
},
|
||||
{
|
||||
name: "mixed users with autogroup:nonroot",
|
||||
users: SSHUsers{"ubuntu", SSHUser(AutoGroupNonRoot), "admin"},
|
||||
expected: []SSHUser{"ubuntu", "admin"},
|
||||
},
|
||||
{
|
||||
name: "mixed users with both root and autogroup:nonroot",
|
||||
users: SSHUsers{"ubuntu", "root", SSHUser(AutoGroupNonRoot), "admin"},
|
||||
expected: []SSHUser{"ubuntu", "admin"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.users.NormalUsers()
|
||||
assert.ElementsMatch(t, tt.expected, result, "NormalUsers() should return expected normal users")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHUsers_ContainsRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
users SSHUsers
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "empty users",
|
||||
users: SSHUsers{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "contains root",
|
||||
users: SSHUsers{"root"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "does not contain root",
|
||||
users: SSHUsers{"ubuntu", "admin"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "contains root among others",
|
||||
users: SSHUsers{"ubuntu", "root", "admin"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.users.ContainsRoot()
|
||||
assert.Equal(t, tt.expected, result, "ContainsRoot() should return expected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHUsers_ContainsNonRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
users SSHUsers
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "empty users",
|
||||
users: SSHUsers{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "contains autogroup:nonroot",
|
||||
users: SSHUsers{SSHUser(AutoGroupNonRoot)},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "does not contain autogroup:nonroot",
|
||||
users: SSHUsers{"ubuntu", "admin", "root"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "contains autogroup:nonroot among others",
|
||||
users: SSHUsers{"ubuntu", SSHUser(AutoGroupNonRoot), "admin"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.users.ContainsNonRoot()
|
||||
assert.Equal(t, tt.expected, result, "ContainsNonRoot() should return expected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustIPSet(prefixes ...string) *netipx.IPSet {
|
||||
var builder netipx.IPSetBuilder
|
||||
for _, p := range prefixes {
|
||||
@@ -1927,3 +2601,291 @@ func TestNodeCanHaveTag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestACL_UnmarshalJSON_WithCommentFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected ACL
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "basic ACL with comment fields",
|
||||
input: `{
|
||||
"#comment": "This is a comment",
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["user1@example.com"],
|
||||
"dst": ["tag:server:80"]
|
||||
}`,
|
||||
expected: ACL{
|
||||
Action: "accept",
|
||||
Protocol: "tcp",
|
||||
Sources: []Alias{mustParseAlias("user1@example.com")},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: mustParseAlias("tag:server"),
|
||||
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple comment fields",
|
||||
input: `{
|
||||
"#description": "Allow access to web servers",
|
||||
"#note": "Created by admin",
|
||||
"#created_date": "2024-01-15",
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["group:developers"],
|
||||
"dst": ["10.0.0.0/24:443"]
|
||||
}`,
|
||||
expected: ACL{
|
||||
Action: "accept",
|
||||
Protocol: "tcp",
|
||||
Sources: []Alias{mustParseAlias("group:developers")},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: mustParseAlias("10.0.0.0/24"),
|
||||
Ports: []tailcfg.PortRange{{First: 443, Last: 443}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "comment field with complex object value",
|
||||
input: `{
|
||||
"#metadata": {
|
||||
"description": "Complex comment object",
|
||||
"tags": ["web", "production"],
|
||||
"created_by": "admin"
|
||||
},
|
||||
"action": "accept",
|
||||
"proto": "udp",
|
||||
"src": ["*"],
|
||||
"dst": ["autogroup:internet:53"]
|
||||
}`,
|
||||
expected: ACL{
|
||||
Action: ActionAccept,
|
||||
Protocol: "udp",
|
||||
Sources: []Alias{Wildcard},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: mustParseAlias("autogroup:internet"),
|
||||
Ports: []tailcfg.PortRange{{First: 53, Last: 53}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid action should fail",
|
||||
input: `{
|
||||
"action": "deny",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no comment fields",
|
||||
input: `{
|
||||
"action": "accept",
|
||||
"proto": "icmp",
|
||||
"src": ["tag:client"],
|
||||
"dst": ["tag:server:*"]
|
||||
}`,
|
||||
expected: ACL{
|
||||
Action: ActionAccept,
|
||||
Protocol: "icmp",
|
||||
Sources: []Alias{mustParseAlias("tag:client")},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: mustParseAlias("tag:server"),
|
||||
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "only comment fields",
|
||||
input: `{
|
||||
"#comment": "This rule is disabled",
|
||||
"#reason": "Temporary disable for maintenance"
|
||||
}`,
|
||||
expected: ACL{
|
||||
Action: Action(""),
|
||||
Protocol: Protocol(""),
|
||||
Sources: nil,
|
||||
Destinations: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid JSON",
|
||||
input: `{
|
||||
"#comment": "This is a comment",
|
||||
"action": "accept",
|
||||
"proto": "tcp"
|
||||
"src": ["invalid json"]
|
||||
}`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid field after comment filtering",
|
||||
input: `{
|
||||
"#comment": "This is a comment",
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["user1@example.com"],
|
||||
"dst": ["invalid-destination"]
|
||||
}`,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acl ACL
|
||||
err := json.Unmarshal([]byte(tt.input), &acl)
|
||||
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expected.Action, acl.Action)
|
||||
assert.Equal(t, tt.expected.Protocol, acl.Protocol)
|
||||
assert.Equal(t, len(tt.expected.Sources), len(acl.Sources))
|
||||
assert.Equal(t, len(tt.expected.Destinations), len(acl.Destinations))
|
||||
|
||||
// Compare sources
|
||||
for i, expectedSrc := range tt.expected.Sources {
|
||||
if i < len(acl.Sources) {
|
||||
assert.Equal(t, expectedSrc, acl.Sources[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Compare destinations
|
||||
for i, expectedDst := range tt.expected.Destinations {
|
||||
if i < len(acl.Destinations) {
|
||||
assert.Equal(t, expectedDst.Alias, acl.Destinations[i].Alias)
|
||||
assert.Equal(t, expectedDst.Ports, acl.Destinations[i].Ports)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestACL_UnmarshalJSON_Roundtrip(t *testing.T) {
|
||||
// Test that marshaling and unmarshaling preserves data (excluding comments)
|
||||
original := ACL{
|
||||
Action: "accept",
|
||||
Protocol: "tcp",
|
||||
Sources: []Alias{mustParseAlias("group:admins")},
|
||||
Destinations: []AliasWithPorts{
|
||||
{
|
||||
Alias: mustParseAlias("tag:server"),
|
||||
Ports: []tailcfg.PortRange{{First: 22, Last: 22}, {First: 80, Last: 80}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Marshal to JSON
|
||||
jsonBytes, err := json.Marshal(original)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Unmarshal back
|
||||
var unmarshaled ACL
|
||||
err = json.Unmarshal(jsonBytes, &unmarshaled)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should be equal
|
||||
assert.Equal(t, original.Action, unmarshaled.Action)
|
||||
assert.Equal(t, original.Protocol, unmarshaled.Protocol)
|
||||
assert.Equal(t, len(original.Sources), len(unmarshaled.Sources))
|
||||
assert.Equal(t, len(original.Destinations), len(unmarshaled.Destinations))
|
||||
}
|
||||
|
||||
func TestACL_UnmarshalJSON_PolicyIntegration(t *testing.T) {
|
||||
// Test that ACL unmarshaling works within a Policy context
|
||||
policyJSON := `{
|
||||
"groups": {
|
||||
"group:developers": ["user1@example.com", "user2@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:server": ["group:developers"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"#description": "Allow developers to access servers",
|
||||
"#priority": "high",
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["group:developers"],
|
||||
"dst": ["tag:server:22,80,443"]
|
||||
},
|
||||
{
|
||||
"#note": "Allow all other traffic",
|
||||
"action": "accept",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
policy, err := unmarshalPolicy([]byte(policyJSON))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
||||
// Check that ACLs were parsed correctly
|
||||
require.Len(t, policy.ACLs, 2)
|
||||
|
||||
// First ACL
|
||||
acl1 := policy.ACLs[0]
|
||||
assert.Equal(t, ActionAccept, acl1.Action)
|
||||
assert.Equal(t, Protocol("tcp"), acl1.Protocol)
|
||||
require.Len(t, acl1.Sources, 1)
|
||||
require.Len(t, acl1.Destinations, 1)
|
||||
|
||||
// Second ACL
|
||||
acl2 := policy.ACLs[1]
|
||||
assert.Equal(t, ActionAccept, acl2.Action)
|
||||
assert.Equal(t, Protocol("tcp"), acl2.Protocol)
|
||||
require.Len(t, acl2.Sources, 1)
|
||||
require.Len(t, acl2.Destinations, 1)
|
||||
}
|
||||
|
||||
func TestACL_UnmarshalJSON_InvalidAction(t *testing.T) {
|
||||
// Test that invalid actions are rejected
|
||||
policyJSON := `{
|
||||
"acls": [
|
||||
{
|
||||
"action": "deny",
|
||||
"proto": "tcp",
|
||||
"src": ["*"],
|
||||
"dst": ["*:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
_, err := unmarshalPolicy([]byte(policyJSON))
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), `invalid action "deny"`)
|
||||
}
|
||||
|
||||
// Helper function to parse aliases for testing
|
||||
func mustParseAlias(s string) Alias {
|
||||
alias, err := parseAlias(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return alias
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -97,72 +96,3 @@ func parsePort(portStr string) (uint16, error) {
|
||||
|
||||
return uint16(port), nil
|
||||
}
|
||||
|
||||
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
protocolIGMP = 2 // Internet Group Management
|
||||
protocolIPv4 = 4 // IPv4 encapsulation
|
||||
protocolTCP = 6 // Transmission Control
|
||||
protocolEGP = 8 // Exterior Gateway Protocol
|
||||
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
protocolUDP = 17 // User Datagram
|
||||
protocolGRE = 47 // Generic Routing Encapsulation
|
||||
protocolESP = 50 // Encap Security Payload
|
||||
protocolAH = 51 // Authentication Header
|
||||
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
// parseProtocol reads the proto field of the ACL and generates a list of
|
||||
// protocols that will be allowed, following the IANA IP protocol number
|
||||
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
//
|
||||
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
|
||||
// as per Tailscale behaviour (see tailcfg.FilterRule).
|
||||
//
|
||||
// Also returns a boolean indicating if the protocol
|
||||
// requires all the destinations to use wildcard as port number (only TCP,
|
||||
// UDP and SCTP support specifying ports).
|
||||
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
switch protocol {
|
||||
case "":
|
||||
return nil, false, nil
|
||||
case "igmp":
|
||||
return []int{protocolIGMP}, true, nil
|
||||
case "ipv4", "ip-in-ip":
|
||||
return []int{protocolIPv4}, true, nil
|
||||
case "tcp":
|
||||
return []int{protocolTCP}, false, nil
|
||||
case "egp":
|
||||
return []int{protocolEGP}, true, nil
|
||||
case "igp":
|
||||
return []int{protocolIGP}, true, nil
|
||||
case "udp":
|
||||
return []int{protocolUDP}, false, nil
|
||||
case "gre":
|
||||
return []int{protocolGRE}, true, nil
|
||||
case "esp":
|
||||
return []int{protocolESP}, true, nil
|
||||
case "ah":
|
||||
return []int{protocolAH}, true, nil
|
||||
case "sctp":
|
||||
return []int{protocolSCTP}, false, nil
|
||||
case "icmp":
|
||||
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
|
||||
|
||||
default:
|
||||
protocolNumber, err := strconv.Atoi(protocol)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("parsing protocol number: %w", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): What is this?
|
||||
needsWildcard := protocolNumber != protocolTCP &&
|
||||
protocolNumber != protocolUDP &&
|
||||
protocolNumber != protocolSCTP
|
||||
|
||||
return []int{protocolNumber}, needsWildcard, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/sasha-s/go-deadlock"
|
||||
@@ -101,6 +100,8 @@ func (m *mapSession) beforeServeLongPoll() {
|
||||
}
|
||||
}
|
||||
|
||||
// afterServeLongPoll is called when a long-polling session ends and the node
|
||||
// is disconnected.
|
||||
func (m *mapSession) afterServeLongPoll() {
|
||||
if m.node.IsEphemeral() {
|
||||
m.h.ephemeralGC.Schedule(m.node.ID, m.h.cfg.EphemeralNodeInactivityTimeout)
|
||||
@@ -112,6 +113,15 @@ func (m *mapSession) serve() {
|
||||
// This is the mechanism where the node gives us information about its
|
||||
// current configuration.
|
||||
//
|
||||
// Process the MapRequest to update node state (endpoints, hostinfo, etc.)
|
||||
c, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)
|
||||
if err != nil {
|
||||
httpError(m.w, err)
|
||||
return
|
||||
}
|
||||
|
||||
m.h.Change(c)
|
||||
|
||||
// If OmitPeers is true and Stream is false
|
||||
// then the server will let clients update their endpoints without
|
||||
// breaking existing long-polling (Stream == true) connections.
|
||||
@@ -122,14 +132,6 @@ func (m *mapSession) serve() {
|
||||
// the response and just wants a 200.
|
||||
// !req.stream && req.OmitPeers
|
||||
if m.isEndpointUpdate() {
|
||||
c, err := m.h.state.UpdateNodeFromMapRequest(m.node, m.req)
|
||||
if err != nil {
|
||||
httpError(m.w, err)
|
||||
return
|
||||
}
|
||||
|
||||
m.h.Change(c)
|
||||
|
||||
m.w.WriteHeader(http.StatusOK)
|
||||
mapResponseEndpointUpdates.WithLabelValues("ok").Inc()
|
||||
}
|
||||
@@ -142,6 +144,8 @@ func (m *mapSession) serve() {
|
||||
func (m *mapSession) serveLongPoll() {
|
||||
m.beforeServeLongPoll()
|
||||
|
||||
log.Trace().Caller().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Msg("Long poll session started because client connected")
|
||||
|
||||
// Clean up the session when the client disconnects
|
||||
defer func() {
|
||||
m.cancelChMu.Lock()
|
||||
@@ -149,51 +153,87 @@ func (m *mapSession) serveLongPoll() {
|
||||
close(m.cancelCh)
|
||||
m.cancelChMu.Unlock()
|
||||
|
||||
// TODO(kradalby): This can likely be made more effective, but likely most
|
||||
// nodes has access to the same routes, so it might not be a big deal.
|
||||
disconnectChange, err := m.h.state.Disconnect(m.node)
|
||||
if err != nil {
|
||||
m.errf(err, "Failed to disconnect node %s", m.node.Hostname)
|
||||
_ = m.h.mapBatcher.RemoveNode(m.node.ID, m.ch)
|
||||
|
||||
// When a node disconnects, it might rapidly reconnect (e.g. mobile clients, network weather).
|
||||
// Instead of immediately marking the node as offline, we wait a few seconds to see if it reconnects.
|
||||
// If it does reconnect, the existing mapSession will be replaced and the node remains online.
|
||||
// If it doesn't reconnect within the timeout, we mark it as offline.
|
||||
//
|
||||
// This avoids flapping nodes in the UI and unnecessary churn in the network.
|
||||
// This is not my favourite solution, but it kind of works in our eventually consistent world.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
disconnected := true
|
||||
// Wait up to 10 seconds for the node to reconnect.
|
||||
// 10 seconds was arbitrary chosen as a reasonable time to reconnect.
|
||||
for range 10 {
|
||||
if m.h.mapBatcher.IsConnected(m.node.ID) {
|
||||
disconnected = false
|
||||
break
|
||||
}
|
||||
<-ticker.C
|
||||
}
|
||||
m.h.Change(disconnectChange)
|
||||
|
||||
m.h.mapBatcher.RemoveNode(m.node.ID, m.ch, m.node.IsSubnetRouter())
|
||||
if disconnected {
|
||||
disconnectChanges, err := m.h.state.Disconnect(m.node.ID)
|
||||
if err != nil {
|
||||
m.errf(err, "Failed to disconnect node %s", m.node.Hostname)
|
||||
}
|
||||
|
||||
m.afterServeLongPoll()
|
||||
m.infof("node has disconnected, mapSession: %p, chan: %p", m, m.ch)
|
||||
m.h.Change(disconnectChanges...)
|
||||
m.afterServeLongPoll()
|
||||
m.infof("node has disconnected, mapSession: %p, chan: %p", m, m.ch)
|
||||
}
|
||||
}()
|
||||
|
||||
// Set up the client stream
|
||||
m.h.pollNetMapStreamWG.Add(1)
|
||||
defer m.h.pollNetMapStreamWG.Done()
|
||||
m.h.clientStreamsOpen.Add(1)
|
||||
defer m.h.clientStreamsOpen.Done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname))
|
||||
defer cancel()
|
||||
|
||||
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
||||
|
||||
// Add node to batcher BEFORE sending Connect change to prevent race condition
|
||||
// where the change is sent before the node is in the batcher's node map
|
||||
if err := m.h.mapBatcher.AddNode(m.node.ID, m.ch, m.node.IsSubnetRouter(), m.capVer); err != nil {
|
||||
m.errf(err, "failed to add node to batcher")
|
||||
// Send empty response to client to fail fast for invalid/non-existent nodes
|
||||
select {
|
||||
case m.ch <- &tailcfg.MapResponse{}:
|
||||
default:
|
||||
// Channel might be closed
|
||||
}
|
||||
// Process the initial MapRequest to update node state (endpoints, hostinfo, etc.)
|
||||
// This must be done BEFORE calling Connect() to ensure routes are properly synchronized.
|
||||
// When nodes reconnect, they send their hostinfo with announced routes in the MapRequest.
|
||||
// We need this data in NodeStore before Connect() sets up the primary routes, because
|
||||
// SubnetRoutes() calculates the intersection of announced and approved routes. If we
|
||||
// call Connect() first, SubnetRoutes() returns empty (no announced routes yet), causing
|
||||
// the node to be incorrectly removed from AvailableRoutes.
|
||||
mapReqChange, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)
|
||||
if err != nil {
|
||||
m.errf(err, "failed to update node from initial MapRequest")
|
||||
return
|
||||
}
|
||||
|
||||
// Now send the Connect change - the batcher handles NodeCameOnline internally
|
||||
// but we still need to update routes and other state-level changes
|
||||
connectChange := m.h.state.Connect(m.node)
|
||||
if !connectChange.Empty() && connectChange.Change != change.NodeCameOnline {
|
||||
m.h.Change(connectChange)
|
||||
}
|
||||
// Connect the node after its state has been updated.
|
||||
// We send two separate change notifications because these are distinct operations:
|
||||
// 1. UpdateNodeFromMapRequest: processes the client's reported state (routes, endpoints, hostinfo)
|
||||
// 2. Connect: marks the node online and recalculates primary routes based on the updated state
|
||||
// While this results in two notifications, it ensures route data is synchronized before
|
||||
// primary route selection occurs, which is critical for proper HA subnet router failover.
|
||||
connectChanges := m.h.state.Connect(m.node.ID)
|
||||
|
||||
m.infof("node has connected, mapSession: %p, chan: %p", m, m.ch)
|
||||
|
||||
// TODO(kradalby): Redo the comments here
|
||||
// Add node to batcher so it can receive updates,
|
||||
// adding this before connecting it to the state ensure that
|
||||
// it does not miss any updates that might be sent in the split
|
||||
// time between the node connecting and the batcher being ready.
|
||||
if err := m.h.mapBatcher.AddNode(m.node.ID, m.ch, m.capVer); err != nil {
|
||||
m.errf(err, "failed to add node to batcher")
|
||||
log.Error().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Err(err).Msg("AddNode failed in poll session")
|
||||
return
|
||||
}
|
||||
log.Debug().Caller().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Msg("AddNode succeeded in poll session because node added to batcher")
|
||||
|
||||
m.h.Change(mapReqChange)
|
||||
m.h.Change(connectChanges...)
|
||||
|
||||
// Loop through updates and continuously send them to the
|
||||
// client.
|
||||
for {
|
||||
@@ -205,7 +245,7 @@ func (m *mapSession) serveLongPoll() {
|
||||
return
|
||||
|
||||
case <-ctx.Done():
|
||||
m.tracef("poll context done")
|
||||
m.tracef("poll context done chan:%p", m.ch)
|
||||
mapResponseEnded.WithLabelValues("done").Inc()
|
||||
return
|
||||
|
||||
@@ -235,6 +275,7 @@ func (m *mapSession) serveLongPoll() {
|
||||
mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix()))
|
||||
}
|
||||
mapResponseSent.WithLabelValues("ok", "keepalive").Inc()
|
||||
m.resetKeepAlive()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -272,7 +313,15 @@ func (m *mapSession) writeMap(msg *tailcfg.MapResponse) error {
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node")
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node.name", m.node.Hostname).
|
||||
Uint64("node.id", m.node.ID.Uint64()).
|
||||
Str("chan", fmt.Sprintf("%p", m.ch)).
|
||||
TimeDiff("timeSpent", time.Now(), startWrite).
|
||||
Str("machine.key", m.node.MachineKey.String()).
|
||||
Bool("keepalive", msg.KeepAlive).
|
||||
Msgf("finished writing mapresp to node chan(%p)", m.ch)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -282,14 +331,14 @@ var keepAlive = tailcfg.MapResponse{
|
||||
}
|
||||
|
||||
func logTracePeerChange(hostname string, hostinfoChange bool, peerChange *tailcfg.PeerChange) {
|
||||
trace := log.Trace().Uint64("node.id", uint64(peerChange.NodeID)).Str("hostname", hostname)
|
||||
trace := log.Trace().Caller().Uint64("node.id", uint64(peerChange.NodeID)).Str("hostname", hostname)
|
||||
|
||||
if peerChange.Key != nil {
|
||||
trace = trace.Str("node_key", peerChange.Key.ShortString())
|
||||
trace = trace.Str("node.key", peerChange.Key.ShortString())
|
||||
}
|
||||
|
||||
if peerChange.DiscoKey != nil {
|
||||
trace = trace.Str("disco_key", peerChange.DiscoKey.ShortString())
|
||||
trace = trace.Str("disco.key", peerChange.DiscoKey.ShortString())
|
||||
}
|
||||
|
||||
if peerChange.Online != nil {
|
||||
@@ -326,7 +375,7 @@ func logPollFunc(
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Uint64("node.id", node.ID.Uint64()).
|
||||
Str("node", node.Hostname).
|
||||
Str("node.name", node.Hostname).
|
||||
Msgf(msg, a...)
|
||||
},
|
||||
func(msg string, a ...any) {
|
||||
@@ -335,7 +384,7 @@ func logPollFunc(
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Uint64("node.id", node.ID.Uint64()).
|
||||
Str("node", node.Hostname).
|
||||
Str("node.name", node.Hostname).
|
||||
Msgf(msg, a...)
|
||||
},
|
||||
func(msg string, a ...any) {
|
||||
@@ -344,7 +393,7 @@ func logPollFunc(
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Uint64("node.id", node.ID.Uint64()).
|
||||
Str("node", node.Hostname).
|
||||
Str("node.name", node.Hostname).
|
||||
Msgf(msg, a...)
|
||||
},
|
||||
func(err error, msg string, a ...any) {
|
||||
@@ -353,7 +402,7 @@ func logPollFunc(
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Uint64("node.id", node.ID.Uint64()).
|
||||
Str("node", node.Hostname).
|
||||
Str("node.name", node.Hostname).
|
||||
Err(err).
|
||||
Msgf(msg, a...)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/util/set"
|
||||
@@ -45,6 +46,8 @@ func New() *PrimaryRoutes {
|
||||
// 4. If the primary routes have changed, update the internal state and return true.
|
||||
// 5. Otherwise, return false.
|
||||
func (pr *PrimaryRoutes) updatePrimaryLocked() bool {
|
||||
log.Debug().Caller().Msg("updatePrimaryLocked starting")
|
||||
|
||||
// reset the primaries map, as we are going to recalculate it.
|
||||
allPrimaries := make(map[netip.Prefix][]types.NodeID)
|
||||
pr.isPrimary = make(map[types.NodeID]bool)
|
||||
@@ -74,21 +77,55 @@ func (pr *PrimaryRoutes) updatePrimaryLocked() bool {
|
||||
// If the current primary is still available, continue.
|
||||
// If the current primary is not available, select a new one.
|
||||
for prefix, nodes := range allPrimaries {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("prefix", prefix.String()).
|
||||
Uints64("availableNodes", func() []uint64 {
|
||||
ids := make([]uint64, len(nodes))
|
||||
for i, id := range nodes {
|
||||
ids[i] = id.Uint64()
|
||||
}
|
||||
|
||||
return ids
|
||||
}()).
|
||||
Msg("Processing prefix for primary route selection")
|
||||
|
||||
if node, ok := pr.primaries[prefix]; ok {
|
||||
// If the current primary is still available, continue.
|
||||
if slices.Contains(nodes, node) {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("prefix", prefix.String()).
|
||||
Uint64("currentPrimary", node.Uint64()).
|
||||
Msg("Current primary still available, keeping it")
|
||||
|
||||
continue
|
||||
} else {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("prefix", prefix.String()).
|
||||
Uint64("oldPrimary", node.Uint64()).
|
||||
Msg("Current primary no longer available")
|
||||
}
|
||||
}
|
||||
if len(nodes) >= 1 {
|
||||
pr.primaries[prefix] = nodes[0]
|
||||
changed = true
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("prefix", prefix.String()).
|
||||
Uint64("newPrimary", nodes[0].Uint64()).
|
||||
Msg("Selected new primary for prefix")
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any remaining primaries that are no longer valid.
|
||||
for prefix := range pr.primaries {
|
||||
if _, ok := allPrimaries[prefix]; !ok {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("prefix", prefix.String()).
|
||||
Msg("Cleaning up primary route that no longer has available nodes")
|
||||
delete(pr.primaries, prefix)
|
||||
changed = true
|
||||
}
|
||||
@@ -99,6 +136,12 @@ func (pr *PrimaryRoutes) updatePrimaryLocked() bool {
|
||||
pr.isPrimary[nodeID] = true
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("changed", changed).
|
||||
Str("finalState", pr.stringLocked()).
|
||||
Msg("updatePrimaryLocked completed")
|
||||
|
||||
return changed
|
||||
}
|
||||
|
||||
@@ -110,14 +153,33 @@ func (pr *PrimaryRoutes) SetRoutes(node types.NodeID, prefixes ...netip.Prefix)
|
||||
pr.mu.Lock()
|
||||
defer pr.mu.Unlock()
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Strs("prefixes", util.PrefixesToString(prefixes)).
|
||||
Msg("PrimaryRoutes.SetRoutes called")
|
||||
|
||||
// If no routes are being set, remove the node from the routes map.
|
||||
if len(prefixes) == 0 {
|
||||
wasPresent := false
|
||||
if _, ok := pr.routes[node]; ok {
|
||||
delete(pr.routes, node)
|
||||
return pr.updatePrimaryLocked()
|
||||
wasPresent = true
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Msg("Removed node from primary routes (no prefixes)")
|
||||
}
|
||||
changed := pr.updatePrimaryLocked()
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Bool("wasPresent", wasPresent).
|
||||
Bool("changed", changed).
|
||||
Str("newState", pr.stringLocked()).
|
||||
Msg("SetRoutes completed (remove)")
|
||||
|
||||
return false
|
||||
return changed
|
||||
}
|
||||
|
||||
rs := make(set.Set[netip.Prefix], len(prefixes))
|
||||
@@ -129,11 +191,28 @@ func (pr *PrimaryRoutes) SetRoutes(node types.NodeID, prefixes ...netip.Prefix)
|
||||
|
||||
if rs.Len() != 0 {
|
||||
pr.routes[node] = rs
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Strs("routes", util.PrefixesToString(rs.Slice())).
|
||||
Msg("Updated node routes in primary route manager")
|
||||
} else {
|
||||
delete(pr.routes, node)
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Msg("Removed node from primary routes (only exit routes)")
|
||||
}
|
||||
|
||||
return pr.updatePrimaryLocked()
|
||||
changed := pr.updatePrimaryLocked()
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", node.Uint64()).
|
||||
Bool("changed", changed).
|
||||
Str("newState", pr.stringLocked()).
|
||||
Msg("SetRoutes completed (update)")
|
||||
|
||||
return changed
|
||||
}
|
||||
|
||||
func (pr *PrimaryRoutes) PrimaryRoutes(id types.NodeID) []netip.Prefix {
|
||||
@@ -188,3 +267,41 @@ func (pr *PrimaryRoutes) stringLocked() string {
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// DebugRoutes represents the primary routes state in a structured format for JSON serialization.
|
||||
type DebugRoutes struct {
|
||||
// AvailableRoutes maps node IDs to their advertised routes
|
||||
// In the context of primary routes, this represents the routes that are available
|
||||
// for each node. A route will only be available if it is advertised by the node
|
||||
// AND approved.
|
||||
// Only routes by nodes currently connected to the headscale server are included.
|
||||
AvailableRoutes map[types.NodeID][]netip.Prefix `json:"available_routes"`
|
||||
|
||||
// PrimaryRoutes maps route prefixes to the primary node serving them
|
||||
PrimaryRoutes map[string]types.NodeID `json:"primary_routes"`
|
||||
}
|
||||
|
||||
// DebugJSON returns a structured representation of the primary routes state suitable for JSON serialization.
|
||||
func (pr *PrimaryRoutes) DebugJSON() DebugRoutes {
|
||||
pr.mu.Lock()
|
||||
defer pr.mu.Unlock()
|
||||
|
||||
debug := DebugRoutes{
|
||||
AvailableRoutes: make(map[types.NodeID][]netip.Prefix),
|
||||
PrimaryRoutes: make(map[string]types.NodeID),
|
||||
}
|
||||
|
||||
// Populate available routes
|
||||
for nodeID, routes := range pr.routes {
|
||||
prefixes := routes.Slice()
|
||||
tsaddr.SortPrefixes(prefixes)
|
||||
debug.AvailableRoutes[nodeID] = prefixes
|
||||
}
|
||||
|
||||
// Populate primary routes
|
||||
for prefix, nodeID := range pr.primaries {
|
||||
debug.PrimaryRoutes[prefix.String()] = nodeID
|
||||
}
|
||||
|
||||
return debug
|
||||
}
|
||||
|
||||
375
hscontrol/state/debug.go
Normal file
375
hscontrol/state/debug.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// DebugOverviewInfo represents the state overview information in a structured format.
|
||||
type DebugOverviewInfo struct {
|
||||
Nodes struct {
|
||||
Total int `json:"total"`
|
||||
Online int `json:"online"`
|
||||
Expired int `json:"expired"`
|
||||
Ephemeral int `json:"ephemeral"`
|
||||
} `json:"nodes"`
|
||||
Users map[string]int `json:"users"` // username -> node count
|
||||
TotalUsers int `json:"total_users"`
|
||||
Policy struct {
|
||||
Mode string `json:"mode"`
|
||||
Path string `json:"path,omitempty"`
|
||||
} `json:"policy"`
|
||||
DERP struct {
|
||||
Configured bool `json:"configured"`
|
||||
Regions int `json:"regions"`
|
||||
} `json:"derp"`
|
||||
PrimaryRoutes int `json:"primary_routes"`
|
||||
}
|
||||
|
||||
// DebugDERPInfo represents DERP map information in a structured format.
|
||||
type DebugDERPInfo struct {
|
||||
Configured bool `json:"configured"`
|
||||
TotalRegions int `json:"total_regions"`
|
||||
Regions map[int]*DebugDERPRegion `json:"regions,omitempty"`
|
||||
}
|
||||
|
||||
// DebugDERPRegion represents a single DERP region.
|
||||
type DebugDERPRegion struct {
|
||||
RegionID int `json:"region_id"`
|
||||
RegionName string `json:"region_name"`
|
||||
Nodes []*DebugDERPNode `json:"nodes"`
|
||||
}
|
||||
|
||||
// DebugDERPNode represents a single DERP node.
|
||||
type DebugDERPNode struct {
|
||||
Name string `json:"name"`
|
||||
HostName string `json:"hostname"`
|
||||
DERPPort int `json:"derp_port"`
|
||||
STUNPort int `json:"stun_port,omitempty"`
|
||||
}
|
||||
|
||||
// DebugStringInfo wraps a debug string for JSON serialization.
|
||||
type DebugStringInfo struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
// DebugOverview returns a comprehensive overview of the current state for debugging.
|
||||
func (s *State) DebugOverview() string {
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString("=== Headscale State Overview ===\n\n")
|
||||
|
||||
// Node statistics
|
||||
sb.WriteString(fmt.Sprintf("Nodes: %d total\n", allNodes.Len()))
|
||||
|
||||
userNodeCounts := make(map[string]int)
|
||||
onlineCount := 0
|
||||
expiredCount := 0
|
||||
ephemeralCount := 0
|
||||
|
||||
now := time.Now()
|
||||
for _, node := range allNodes.All() {
|
||||
if node.Valid() {
|
||||
userName := node.User().Name
|
||||
userNodeCounts[userName]++
|
||||
|
||||
if node.IsOnline().Valid() && node.IsOnline().Get() {
|
||||
onlineCount++
|
||||
}
|
||||
|
||||
if node.Expiry().Valid() && node.Expiry().Get().Before(now) {
|
||||
expiredCount++
|
||||
}
|
||||
|
||||
if node.AuthKey().Valid() && node.AuthKey().Ephemeral() {
|
||||
ephemeralCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" - Online: %d\n", onlineCount))
|
||||
sb.WriteString(fmt.Sprintf(" - Expired: %d\n", expiredCount))
|
||||
sb.WriteString(fmt.Sprintf(" - Ephemeral: %d\n", ephemeralCount))
|
||||
sb.WriteString("\n")
|
||||
|
||||
// User statistics
|
||||
sb.WriteString(fmt.Sprintf("Users: %d total\n", len(users)))
|
||||
for userName, nodeCount := range userNodeCounts {
|
||||
sb.WriteString(fmt.Sprintf(" - %s: %d nodes\n", userName, nodeCount))
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
// Policy information
|
||||
sb.WriteString("Policy:\n")
|
||||
sb.WriteString(fmt.Sprintf(" - Mode: %s\n", s.cfg.Policy.Mode))
|
||||
if s.cfg.Policy.Mode == types.PolicyModeFile {
|
||||
sb.WriteString(fmt.Sprintf(" - Path: %s\n", s.cfg.Policy.Path))
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
// DERP information
|
||||
derpMap := s.derpMap.Load()
|
||||
if derpMap != nil {
|
||||
sb.WriteString(fmt.Sprintf("DERP: %d regions configured\n", len(derpMap.Regions)))
|
||||
} else {
|
||||
sb.WriteString("DERP: not configured\n")
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
// Route information
|
||||
routeCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), "\n"))
|
||||
if s.primaryRoutes.String() == "" {
|
||||
routeCount = 0
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("Primary Routes: %d active\n", routeCount))
|
||||
sb.WriteString("\n")
|
||||
|
||||
// Registration cache
|
||||
sb.WriteString("Registration Cache: active\n")
|
||||
sb.WriteString("\n")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// DebugNodeStore returns debug information about the NodeStore.
|
||||
func (s *State) DebugNodeStore() string {
|
||||
return s.nodeStore.DebugString()
|
||||
}
|
||||
|
||||
// DebugDERPMap returns debug information about the DERP map configuration.
|
||||
func (s *State) DebugDERPMap() string {
|
||||
derpMap := s.derpMap.Load()
|
||||
if derpMap == nil {
|
||||
return "DERP Map: not configured\n"
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString("=== DERP Map Configuration ===\n\n")
|
||||
|
||||
sb.WriteString(fmt.Sprintf("Total Regions: %d\n\n", len(derpMap.Regions)))
|
||||
|
||||
for regionID, region := range derpMap.Regions {
|
||||
sb.WriteString(fmt.Sprintf("Region %d: %s\n", regionID, region.RegionName))
|
||||
sb.WriteString(fmt.Sprintf(" - Nodes: %d\n", len(region.Nodes)))
|
||||
|
||||
for _, node := range region.Nodes {
|
||||
sb.WriteString(fmt.Sprintf(" - %s (%s:%d)\n",
|
||||
node.Name, node.HostName, node.DERPPort))
|
||||
if node.STUNPort != 0 {
|
||||
sb.WriteString(fmt.Sprintf(" STUN: %d\n", node.STUNPort))
|
||||
}
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// DebugSSHPolicies returns debug information about SSH policies for all nodes.
|
||||
func (s *State) DebugSSHPolicies() map[string]*tailcfg.SSHPolicy {
|
||||
nodes := s.nodeStore.ListNodes()
|
||||
|
||||
sshPolicies := make(map[string]*tailcfg.SSHPolicy)
|
||||
|
||||
for _, node := range nodes.All() {
|
||||
if !node.Valid() {
|
||||
continue
|
||||
}
|
||||
|
||||
pol, err := s.SSHPolicy(node)
|
||||
if err != nil {
|
||||
// Store the error information
|
||||
continue
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("id:%d hostname:%s givenname:%s",
|
||||
node.ID(), node.Hostname(), node.GivenName())
|
||||
sshPolicies[key] = pol
|
||||
}
|
||||
|
||||
return sshPolicies
|
||||
}
|
||||
|
||||
// DebugRegistrationCache returns debug information about the registration cache.
|
||||
func (s *State) DebugRegistrationCache() map[string]interface{} {
|
||||
// The cache doesn't expose internal statistics, so we provide basic info
|
||||
result := map[string]interface{}{
|
||||
"type": "zcache",
|
||||
"expiration": registerCacheExpiration.String(),
|
||||
"cleanup": registerCacheCleanup.String(),
|
||||
"status": "active",
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// DebugConfig returns debug information about the current configuration.
|
||||
func (s *State) DebugConfig() *types.Config {
|
||||
return s.cfg
|
||||
}
|
||||
|
||||
// DebugPolicy returns the current policy data as a string.
|
||||
func (s *State) DebugPolicy() (string, error) {
|
||||
switch s.cfg.Policy.Mode {
|
||||
case types.PolicyModeDB:
|
||||
p, err := s.GetPolicy()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return p.Data, nil
|
||||
case types.PolicyModeFile:
|
||||
pol, err := policyBytes(s.db, s.cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pol), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported policy mode: %s", s.cfg.Policy.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
// DebugFilter returns the current filter rules and matchers.
|
||||
func (s *State) DebugFilter() ([]tailcfg.FilterRule, error) {
|
||||
filter, _ := s.Filter()
|
||||
return filter, nil
|
||||
}
|
||||
|
||||
// DebugRoutes returns the current primary routes information as a structured object.
|
||||
func (s *State) DebugRoutes() routes.DebugRoutes {
|
||||
return s.primaryRoutes.DebugJSON()
|
||||
}
|
||||
|
||||
// DebugRoutesString returns the current primary routes information as a string.
|
||||
func (s *State) DebugRoutesString() string {
|
||||
return s.PrimaryRoutesString()
|
||||
}
|
||||
|
||||
// DebugPolicyManager returns the policy manager debug string.
|
||||
func (s *State) DebugPolicyManager() string {
|
||||
return s.PolicyDebugString()
|
||||
}
|
||||
|
||||
// PolicyDebugString returns a debug representation of the current policy.
|
||||
func (s *State) PolicyDebugString() string {
|
||||
return s.polMan.DebugString()
|
||||
}
|
||||
|
||||
// DebugOverviewJSON returns a structured overview of the current state for debugging.
|
||||
func (s *State) DebugOverviewJSON() DebugOverviewInfo {
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
info := DebugOverviewInfo{
|
||||
Users: make(map[string]int),
|
||||
TotalUsers: len(users),
|
||||
}
|
||||
|
||||
// Node statistics
|
||||
info.Nodes.Total = allNodes.Len()
|
||||
now := time.Now()
|
||||
|
||||
for _, node := range allNodes.All() {
|
||||
if node.Valid() {
|
||||
userName := node.User().Name
|
||||
info.Users[userName]++
|
||||
|
||||
if node.IsOnline().Valid() && node.IsOnline().Get() {
|
||||
info.Nodes.Online++
|
||||
}
|
||||
|
||||
if node.Expiry().Valid() && node.Expiry().Get().Before(now) {
|
||||
info.Nodes.Expired++
|
||||
}
|
||||
|
||||
if node.AuthKey().Valid() && node.AuthKey().Ephemeral() {
|
||||
info.Nodes.Ephemeral++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Policy information
|
||||
info.Policy.Mode = string(s.cfg.Policy.Mode)
|
||||
if s.cfg.Policy.Mode == types.PolicyModeFile {
|
||||
info.Policy.Path = s.cfg.Policy.Path
|
||||
}
|
||||
|
||||
derpMap := s.derpMap.Load()
|
||||
if derpMap != nil {
|
||||
info.DERP.Configured = true
|
||||
info.DERP.Regions = len(derpMap.Regions)
|
||||
} else {
|
||||
info.DERP.Configured = false
|
||||
info.DERP.Regions = 0
|
||||
}
|
||||
|
||||
// Route information
|
||||
routeCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), "\n"))
|
||||
if s.primaryRoutes.String() == "" {
|
||||
routeCount = 0
|
||||
}
|
||||
info.PrimaryRoutes = routeCount
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
// DebugDERPJSON returns structured debug information about the DERP map configuration.
|
||||
func (s *State) DebugDERPJSON() DebugDERPInfo {
|
||||
derpMap := s.derpMap.Load()
|
||||
|
||||
info := DebugDERPInfo{
|
||||
Configured: derpMap != nil,
|
||||
Regions: make(map[int]*DebugDERPRegion),
|
||||
}
|
||||
|
||||
if derpMap == nil {
|
||||
return info
|
||||
}
|
||||
|
||||
info.TotalRegions = len(derpMap.Regions)
|
||||
|
||||
for regionID, region := range derpMap.Regions {
|
||||
debugRegion := &DebugDERPRegion{
|
||||
RegionID: regionID,
|
||||
RegionName: region.RegionName,
|
||||
Nodes: make([]*DebugDERPNode, 0, len(region.Nodes)),
|
||||
}
|
||||
|
||||
for _, node := range region.Nodes {
|
||||
debugNode := &DebugDERPNode{
|
||||
Name: node.Name,
|
||||
HostName: node.HostName,
|
||||
DERPPort: node.DERPPort,
|
||||
STUNPort: node.STUNPort,
|
||||
}
|
||||
debugRegion.Nodes = append(debugRegion.Nodes, debugNode)
|
||||
}
|
||||
|
||||
info.Regions[regionID] = debugRegion
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
// DebugNodeStoreJSON returns the actual nodes map from the current NodeStore snapshot.
|
||||
func (s *State) DebugNodeStoreJSON() map[types.NodeID]types.Node {
|
||||
snapshot := s.nodeStore.data.Load()
|
||||
return snapshot.nodesByID
|
||||
}
|
||||
|
||||
// DebugPolicyManagerJSON returns structured debug information about the policy manager.
|
||||
func (s *State) DebugPolicyManagerJSON() DebugStringInfo {
|
||||
return DebugStringInfo{
|
||||
Content: s.polMan.DebugString(),
|
||||
}
|
||||
}
|
||||
78
hscontrol/state/debug_test.go
Normal file
78
hscontrol/state/debug_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNodeStoreDebugString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFn func() *NodeStore
|
||||
contains []string
|
||||
}{
|
||||
{
|
||||
name: "empty nodestore",
|
||||
setupFn: func() *NodeStore {
|
||||
return NewNodeStore(nil, allowAllPeersFunc)
|
||||
},
|
||||
contains: []string{
|
||||
"=== NodeStore Debug Information ===",
|
||||
"Total Nodes: 0",
|
||||
"Users with Nodes: 0",
|
||||
"NodeKey Index: 0 entries",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nodestore with data",
|
||||
setupFn: func() *NodeStore {
|
||||
node1 := createTestNode(1, 1, "user1", "node1")
|
||||
node2 := createTestNode(2, 2, "user2", "node2")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
|
||||
_ = store.PutNode(node1)
|
||||
_ = store.PutNode(node2)
|
||||
|
||||
return store
|
||||
},
|
||||
contains: []string{
|
||||
"Total Nodes: 2",
|
||||
"Users with Nodes: 2",
|
||||
"Peer Relationships:",
|
||||
"NodeKey Index: 2 entries",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
store := tt.setupFn()
|
||||
if store.writeQueue != nil {
|
||||
defer store.Stop()
|
||||
}
|
||||
|
||||
debugStr := store.DebugString()
|
||||
|
||||
for _, expected := range tt.contains {
|
||||
assert.Contains(t, debugStr, expected,
|
||||
"Debug string should contain: %s\nActual debug:\n%s", expected, debugStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugRegistrationCache(t *testing.T) {
|
||||
// Create a minimal NodeStore for testing debug methods
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
|
||||
debugStr := store.DebugString()
|
||||
|
||||
// Should contain basic debug information
|
||||
assert.Contains(t, debugStr, "=== NodeStore Debug Information ===")
|
||||
assert.Contains(t, debugStr, "Total Nodes: 0")
|
||||
assert.Contains(t, debugStr, "Users with Nodes: 0")
|
||||
assert.Contains(t, debugStr, "NodeKey Index: 0 entries")
|
||||
}
|
||||
460
hscontrol/state/ephemeral_test.go
Normal file
460
hscontrol/state/ephemeral_test.go
Normal file
@@ -0,0 +1,460 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode
|
||||
// are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes
|
||||
// are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view
|
||||
// after the node has been deleted from the NodeStore.
|
||||
func TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) {
|
||||
// Create a simple test node
|
||||
node := createTestNode(1, 1, "test-user", "test-node")
|
||||
|
||||
// Create NodeStore
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put the node in the store
|
||||
resultNode := store.PutNode(node)
|
||||
require.True(t, resultNode.Valid(), "initial PutNode should return valid node")
|
||||
|
||||
// Verify node exists
|
||||
retrievedNode, found := store.GetNode(node.ID)
|
||||
require.True(t, found)
|
||||
require.Equal(t, node.ID, retrievedNode.ID())
|
||||
|
||||
// Test scenario: UpdateNode is called, returns a node view from the batch,
|
||||
// but in the same batch a DeleteNode removes the node.
|
||||
// This simulates what happens when:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode
|
||||
// 2. At the same time, handleLogout calls DeleteNode
|
||||
// 3. They get batched together: [UPDATE, DELETE]
|
||||
// 4. UPDATE modifies the node, DELETE removes it
|
||||
// 5. UpdateNode returns a node view based on the state AFTER both operations
|
||||
// 6. If DELETE came after UPDATE, the returned node should be invalid
|
||||
|
||||
done := make(chan bool, 2)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
// Small delay to increase chance of batching together
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
store.DeleteNode(node.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// The key assertion: if UpdateNode and DeleteNode were batched together
|
||||
// with DELETE after UPDATE, then UpdateNode should return an invalid node
|
||||
// OR it should return a valid node but the node should no longer exist in the store
|
||||
|
||||
_, found = store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted from NodeStore")
|
||||
|
||||
// If the update happened before delete in the batch, the returned node might be invalid
|
||||
if updateOk {
|
||||
t.Logf("UpdateNode returned ok=true, valid=%v", updatedNode.Valid())
|
||||
// This is the bug scenario - UpdateNode thinks it succeeded but node is gone
|
||||
if updatedNode.Valid() {
|
||||
t.Logf("WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug")
|
||||
}
|
||||
} else {
|
||||
t.Logf("UpdateNode correctly returned ok=false (node deleted in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when
|
||||
// UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE,
|
||||
// the UpdateNode should return an invalid node view.
|
||||
func TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) {
|
||||
node := createTestNode(2, 1, "test-user", "test-node-2")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate the exact sequence: UpdateNode gets queued, then DeleteNode gets queued,
|
||||
// they batch together, and we check what UpdateNode returns
|
||||
|
||||
resultChan := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
// Start UpdateNode - it will block until batch is applied
|
||||
go func() {
|
||||
node, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
resultChan <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{node, ok}
|
||||
}()
|
||||
|
||||
// Give UpdateNode a moment to queue its work
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Now queue DeleteNode - should batch with the UPDATE
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get the result from UpdateNode
|
||||
result := <-resultChan
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted")
|
||||
|
||||
// The critical check: what did UpdateNode return?
|
||||
// After the commit c6b09289988f34398eb3157e31ba092eb8721a9f,
|
||||
// UpdateNode returns the node state from the batch.
|
||||
// If DELETE came after UPDATE in the batch, the node doesn't exist anymore,
|
||||
// so UpdateNode should return (invalid, false)
|
||||
t.Logf("UpdateNode returned: ok=%v, valid=%v", result.ok, result.node.Valid())
|
||||
|
||||
// This is the expected behavior - if node was deleted in same batch,
|
||||
// UpdateNode should return invalid node
|
||||
if result.ok && result.node.Valid() {
|
||||
t.Error("BUG: UpdateNode returned valid node even though it was deleted in same batch")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles
|
||||
// the race condition where a node is deleted after UpdateNode returns but before
|
||||
// persistNodeToDB is called. This reproduces the ephemeral node deletion bug.
|
||||
func TestPersistNodeToDBPreventsRaceCondition(t *testing.T) {
|
||||
node := createTestNode(3, 1, "test-user", "test-node-3")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate UpdateNode being called
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// Now delete the node (simulating ephemeral logout happening concurrently)
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Wait for deletion to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify node is deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
require.False(t, found, "node should be deleted")
|
||||
|
||||
// Now try to use the updatedNode from before the deletion
|
||||
// In the old code, this would re-insert the node into the database
|
||||
// With our fix, GetNode check in persistNodeToDB should prevent this
|
||||
|
||||
// Simulate what persistNodeToDB does - check if node still exists
|
||||
_, exists := store.GetNode(updatedNode.ID())
|
||||
if !exists {
|
||||
t.Log("SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node")
|
||||
} else {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion")
|
||||
}
|
||||
|
||||
// The key assertion: after deletion, attempting to persist the old updatedNode
|
||||
// should fail because the node no longer exists in NodeStore
|
||||
assert.False(t, exists, "persistNodeToDB should detect node was deleted and refuse to persist")
|
||||
}
|
||||
|
||||
// TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs
|
||||
// when an ephemeral node logs out. This reproduces the bug where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view
|
||||
// 2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode get batched together
|
||||
// 4. If UpdateNode's result is used to call persistNodeToDB after the deletion,
|
||||
// the node could be re-inserted into the database even though it was deleted
|
||||
func TestEphemeralNodeLogoutRaceCondition(t *testing.T) {
|
||||
ephemeralNode := createTestNode(4, 1, "test-user", "ephemeral-node")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 1,
|
||||
Key: "test-key",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put ephemeral node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate concurrent operations:
|
||||
// 1. UpdateNode (from UpdateNodeFromMapRequest during polling)
|
||||
// 2. DeleteNode (from handleLogout when client sends logout request)
|
||||
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
done := make(chan bool, 2)
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
time.Sleep(1 * time.Millisecond) // Slight delay to batch operations
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted from store
|
||||
_, found := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, found, "ephemeral node should be deleted from NodeStore")
|
||||
|
||||
// Critical assertion: if UpdateNode returned before DeleteNode completed,
|
||||
// the updatedNode might be valid but the node is actually deleted.
|
||||
// This is the bug - UpdateNodeFromMapRequest would get a valid node,
|
||||
// then try to persist it, re-inserting the deleted ephemeral node.
|
||||
if updateOk && updatedNode.Valid() {
|
||||
t.Log("UpdateNode returned valid node, but node is deleted - this is the race condition")
|
||||
|
||||
// In the real code, this would cause persistNodeToDB to be called with updatedNode
|
||||
// The fix in persistNodeToDB checks if the node still exists:
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "persistNodeToDB should check NodeStore and find node deleted")
|
||||
} else if !updateOk || !updatedNode.Valid() {
|
||||
t.Log("UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence
|
||||
// that causes ephemeral node logout failures:
|
||||
// 1. Client sends MapRequest with updated endpoint info
|
||||
// 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode
|
||||
// 3. Client sends logout request (past expiry)
|
||||
// 4. handleLogout calls DeleteNode for ephemeral node
|
||||
// 5. UpdateNode and DeleteNode batch together
|
||||
// 6. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 7. persistNodeToDB is called with the stale valid node
|
||||
// 8. Node gets re-inserted into database instead of staying deleted
|
||||
func TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) {
|
||||
ephemeralNode := createTestNode(5, 1, "test-user", "ephemeral-node-5")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 2,
|
||||
Key: "test-key-2",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Initial state: ephemeral node exists
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Step 1: UpdateNodeFromMapRequest calls UpdateNode
|
||||
// (simulating client sending MapRequest with endpoint updates)
|
||||
updateStarted := make(chan bool)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
go func() {
|
||||
updateStarted <- true
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
endpoint := netip.MustParseAddrPort("10.0.0.1:41641")
|
||||
n.Endpoints = []netip.AddrPort{endpoint}
|
||||
})
|
||||
}()
|
||||
|
||||
<-updateStarted
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Step 2: Logout happens - handleLogout calls DeleteNode
|
||||
// (simulating client sending logout with past expiry)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for batching to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Step 3: Check results
|
||||
_, nodeExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, nodeExists, "ephemeral node must be deleted after logout")
|
||||
|
||||
// Step 4: Simulate what happens if we try to persist the updatedNode
|
||||
if updateOk && updatedNode.Valid() {
|
||||
// This is the problematic path - UpdateNode returned a valid node
|
||||
// but the node was deleted in the same batch
|
||||
t.Log("UpdateNode returned valid node even though node was deleted")
|
||||
|
||||
// The fix: persistNodeToDB must check NodeStore before persisting
|
||||
_, checkExists := store.GetNode(updatedNode.ID())
|
||||
if checkExists {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion - should be impossible")
|
||||
} else {
|
||||
t.Log("SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist")
|
||||
}
|
||||
} else {
|
||||
t.Log("UpdateNode correctly indicated node was deleted (returned invalid or not-ok)")
|
||||
}
|
||||
|
||||
// Final assertion: node must not exist
|
||||
_, finalExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, finalExists, "ephemeral node must remain deleted")
|
||||
}
|
||||
|
||||
// TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when
|
||||
// UpdateNode and DeleteNode are batched together with DELETE after UPDATE,
|
||||
// UpdateNode returns ok=false to indicate the node was deleted.
|
||||
func TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) {
|
||||
node := createTestNode(6, 1, "test-user", "test-node-6")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Queue UpdateNode
|
||||
updateDone := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
go func() {
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
updateDone <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{updatedNode, ok}
|
||||
}()
|
||||
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Queue DeleteNode - should batch with UpdateNode
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get UpdateNode result
|
||||
result := <-updateDone
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, exists := store.GetNode(node.ID)
|
||||
assert.False(t, exists, "node should be deleted from store")
|
||||
|
||||
// UpdateNode should indicate the node was deleted
|
||||
// After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE
|
||||
// are in the same batch with DELETE after UPDATE, UpdateNode returns
|
||||
// the state after the batch is applied - which means the node doesn't exist
|
||||
assert.False(t, result.ok, "UpdateNode should return ok=false when node deleted in same batch")
|
||||
assert.False(t, result.node.Valid(), "UpdateNode should return invalid node when node deleted in same batch")
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB
|
||||
// checks if the node still exists in NodeStore before persisting to database.
|
||||
// This prevents the race condition where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node
|
||||
// 2. Ephemeral node logout calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode batch together
|
||||
// 4. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node
|
||||
// 6. persistNodeToDB must detect the node is deleted and refuse to persist
|
||||
func TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) {
|
||||
ephemeralNode := createTestNode(7, 1, "test-user", "ephemeral-node-7")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 3,
|
||||
Key: "test-key-3",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate the race:
|
||||
// 1. UpdateNode is called (from UpdateNodeFromMapRequest)
|
||||
updatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// 2. Node is deleted (from handleLogout for ephemeral node)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for deletion
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// 3. Verify node is deleted from store
|
||||
_, exists := store.GetNode(ephemeralNode.ID)
|
||||
require.False(t, exists, "node should be deleted from NodeStore")
|
||||
|
||||
// 4. Simulate what persistNodeToDB does - check if node still exists
|
||||
// The fix in persistNodeToDB checks NodeStore before persisting:
|
||||
// if !exists { return error }
|
||||
// This prevents re-inserting the deleted node into the database
|
||||
|
||||
// Verify the node from UpdateNode is valid but node is gone from store
|
||||
assert.True(t, updatedNode.Valid(), "UpdateNode returned a valid node view")
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "but node should be deleted from NodeStore")
|
||||
|
||||
// This is the critical test: persistNodeToDB must check NodeStore
|
||||
// and refuse to persist if the node doesn't exist anymore
|
||||
// The actual persistNodeToDB implementation does:
|
||||
// _, exists := s.nodeStore.GetNode(node.ID())
|
||||
// if !exists { return error }
|
||||
}
|
||||
50
hscontrol/state/maprequest.go
Normal file
50
hscontrol/state/maprequest.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Package state provides pure functions for processing MapRequest data.
|
||||
// These functions are extracted from UpdateNodeFromMapRequest to improve
|
||||
// testability and maintainability.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// netInfoFromMapRequest determines the correct NetInfo to use.
|
||||
// Returns the NetInfo that should be used for this request.
|
||||
func netInfoFromMapRequest(
|
||||
nodeID types.NodeID,
|
||||
currentHostinfo *tailcfg.Hostinfo,
|
||||
reqHostinfo *tailcfg.Hostinfo,
|
||||
) *tailcfg.NetInfo {
|
||||
// If request has NetInfo, use it
|
||||
if reqHostinfo != nil && reqHostinfo.NetInfo != nil {
|
||||
return reqHostinfo.NetInfo
|
||||
}
|
||||
|
||||
// Otherwise, use current NetInfo if available
|
||||
if currentHostinfo != nil && currentHostinfo.NetInfo != nil {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", nodeID.Uint64()).
|
||||
Int("preferredDERP", currentHostinfo.NetInfo.PreferredDERP).
|
||||
Msg("using NetInfo from previous Hostinfo in MapRequest")
|
||||
return currentHostinfo.NetInfo
|
||||
}
|
||||
|
||||
// No NetInfo available anywhere - log for debugging
|
||||
var hostname string
|
||||
if reqHostinfo != nil {
|
||||
hostname = reqHostinfo.Hostname
|
||||
} else if currentHostinfo != nil {
|
||||
hostname = currentHostinfo.Hostname
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", nodeID.Uint64()).
|
||||
Str("node.hostname", hostname).
|
||||
Msg("node sent update but has no NetInfo in request or database")
|
||||
|
||||
return nil
|
||||
}
|
||||
160
hscontrol/state/maprequest_test.go
Normal file
160
hscontrol/state/maprequest_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func TestNetInfoFromMapRequest(t *testing.T) {
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentHostinfo *tailcfg.Hostinfo
|
||||
reqHostinfo *tailcfg.Hostinfo
|
||||
expectNetInfo *tailcfg.NetInfo
|
||||
}{
|
||||
{
|
||||
name: "no current NetInfo - return nil",
|
||||
currentHostinfo: nil,
|
||||
reqHostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
expectNetInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "current has NetInfo, request has NetInfo - use request",
|
||||
currentHostinfo: &tailcfg.Hostinfo{
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 1},
|
||||
},
|
||||
reqHostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 2},
|
||||
},
|
||||
expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 2},
|
||||
},
|
||||
{
|
||||
name: "current has NetInfo, request has no NetInfo - use current",
|
||||
currentHostinfo: &tailcfg.Hostinfo{
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 3},
|
||||
},
|
||||
reqHostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 3},
|
||||
},
|
||||
{
|
||||
name: "current has NetInfo, no request Hostinfo - use current",
|
||||
currentHostinfo: &tailcfg.Hostinfo{
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 4},
|
||||
},
|
||||
reqHostinfo: nil,
|
||||
expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 4},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
|
||||
|
||||
if tt.expectNetInfo == nil {
|
||||
assert.Nil(t, result, "expected nil NetInfo")
|
||||
} else {
|
||||
require.NotNil(t, result, "expected non-nil NetInfo")
|
||||
assert.Equal(t, tt.expectNetInfo.PreferredDERP, result.PreferredDERP, "DERP mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetInfoPreservationInRegistrationFlow(t *testing.T) {
|
||||
nodeID := types.NodeID(1)
|
||||
|
||||
// This test reproduces the bug in registration flows where NetInfo was lost
|
||||
// because we used the wrong hostinfo reference when calling NetInfoFromMapRequest
|
||||
t.Run("registration_flow_bug_reproduction", func(t *testing.T) {
|
||||
// Simulate existing node with NetInfo (before re-registration)
|
||||
existingNodeHostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 5},
|
||||
}
|
||||
|
||||
// Simulate new registration request (no NetInfo)
|
||||
newRegistrationHostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
// NetInfo is nil - this is what comes from the registration request
|
||||
}
|
||||
|
||||
// Simulate what was happening in the bug: we passed the "current node being modified"
|
||||
// hostinfo (which has no NetInfo) instead of the existing node's hostinfo
|
||||
nodeBeingModifiedHostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
// NetInfo is nil because this node is being modified/reset
|
||||
}
|
||||
|
||||
// BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo)
|
||||
buggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
|
||||
assert.Nil(t, buggyResult, "Bug: Should return nil when using wrong hostinfo reference")
|
||||
|
||||
// CORRECT: Using the existing node's hostinfo (has NetInfo)
|
||||
correctResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
|
||||
assert.NotNil(t, correctResult, "Fix: Should preserve NetInfo when using correct hostinfo reference")
|
||||
assert.Equal(t, 5, correctResult.PreferredDERP, "Should preserve the DERP region from existing node")
|
||||
})
|
||||
|
||||
t.Run("new_node_creation_for_different_user_should_preserve_netinfo", func(t *testing.T) {
|
||||
// This test covers the scenario where:
|
||||
// 1. A node exists for user1 with NetInfo
|
||||
// 2. The same machine logs in as user2 (different user)
|
||||
// 3. A NEW node is created for user2 (pre-auth key flow)
|
||||
// 4. The new node should preserve NetInfo from the old node
|
||||
|
||||
// Existing node for user1 with NetInfo
|
||||
existingNodeUser1Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 7},
|
||||
}
|
||||
|
||||
// New registration request for user2 (no NetInfo yet)
|
||||
newNodeUser2Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
// NetInfo is nil - registration request doesn't include it
|
||||
}
|
||||
|
||||
// When creating a new node for user2, we should preserve NetInfo from user1's node
|
||||
result := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo)
|
||||
assert.NotNil(t, result, "New node for user2 should preserve NetInfo from user1's node")
|
||||
assert.Equal(t, 7, result.PreferredDERP, "Should preserve DERP region from existing node")
|
||||
})
|
||||
}
|
||||
|
||||
// Simple helper function for tests
|
||||
func createTestNodeSimple(id types.NodeID) *types.Node {
|
||||
user := types.User{
|
||||
Name: "test-user",
|
||||
}
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
node := &types.Node{
|
||||
ID: id,
|
||||
Hostname: "test-node",
|
||||
UserID: uint(id),
|
||||
User: user,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
IPv4: &netip.Addr{},
|
||||
IPv6: &netip.Addr{},
|
||||
}
|
||||
|
||||
return node
|
||||
}
|
||||
557
hscontrol/state/node_store.go
Normal file
557
hscontrol/state/node_store.go
Normal file
@@ -0,0 +1,557 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
batchSize = 100
|
||||
batchTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
const (
|
||||
put = 1
|
||||
del = 2
|
||||
update = 3
|
||||
)
|
||||
|
||||
const prometheusNamespace = "headscale"
|
||||
|
||||
var (
|
||||
nodeStoreOperations = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_operations_total",
|
||||
Help: "Total number of NodeStore operations",
|
||||
}, []string{"operation"})
|
||||
nodeStoreOperationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_operation_duration_seconds",
|
||||
Help: "Duration of NodeStore operations",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"operation"})
|
||||
nodeStoreBatchSize = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_batch_size",
|
||||
Help: "Size of NodeStore write batches",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100},
|
||||
})
|
||||
nodeStoreBatchDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_batch_duration_seconds",
|
||||
Help: "Duration of NodeStore batch processing",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
})
|
||||
nodeStoreSnapshotBuildDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_snapshot_build_duration_seconds",
|
||||
Help: "Duration of NodeStore snapshot building from nodes",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
})
|
||||
nodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_nodes_total",
|
||||
Help: "Total number of nodes in the NodeStore",
|
||||
})
|
||||
nodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_peers_calculation_duration_seconds",
|
||||
Help: "Duration of peers calculation in NodeStore",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
})
|
||||
nodeStoreQueueDepth = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_queue_depth",
|
||||
Help: "Current depth of NodeStore write queue",
|
||||
})
|
||||
)
|
||||
|
||||
// NodeStore is a thread-safe store for nodes.
|
||||
// It is a copy-on-write structure, replacing the "snapshot"
|
||||
// when a change to the structure occurs. It is optimised for reads,
|
||||
// and while batches are not fast, they are grouped together
|
||||
// to do less of the expensive peer calculation if there are many
|
||||
// changes rapidly.
|
||||
//
|
||||
// Writes will block until committed, while reads are never
|
||||
// blocked. This means that the caller of a write operation
|
||||
// is responsible for ensuring an update depending on a write
|
||||
// is not issued before the write is complete.
|
||||
type NodeStore struct {
|
||||
data atomic.Pointer[Snapshot]
|
||||
|
||||
peersFunc PeersFunc
|
||||
writeQueue chan work
|
||||
}
|
||||
|
||||
func NewNodeStore(allNodes types.Nodes, peersFunc PeersFunc) *NodeStore {
|
||||
nodes := make(map[types.NodeID]types.Node, len(allNodes))
|
||||
for _, n := range allNodes {
|
||||
nodes[n.ID] = *n
|
||||
}
|
||||
snap := snapshotFromNodes(nodes, peersFunc)
|
||||
|
||||
store := &NodeStore{
|
||||
peersFunc: peersFunc,
|
||||
}
|
||||
store.data.Store(&snap)
|
||||
|
||||
// Initialize node count gauge
|
||||
nodeStoreNodesCount.Set(float64(len(nodes)))
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
// Snapshot is the representation of the current state of the NodeStore.
|
||||
// It contains all nodes and their relationships.
|
||||
// It is a copy-on-write structure, meaning that when a write occurs,
|
||||
// a new Snapshot is created with the updated state,
|
||||
// and replaces the old one atomically.
|
||||
type Snapshot struct {
|
||||
// nodesByID is the main source of truth for nodes.
|
||||
nodesByID map[types.NodeID]types.Node
|
||||
|
||||
// calculated from nodesByID
|
||||
nodesByNodeKey map[key.NodePublic]types.NodeView
|
||||
nodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView
|
||||
peersByNode map[types.NodeID][]types.NodeView
|
||||
nodesByUser map[types.UserID][]types.NodeView
|
||||
allNodes []types.NodeView
|
||||
}
|
||||
|
||||
// PeersFunc is a function that takes a list of nodes and returns a map
|
||||
// with the relationships between nodes and their peers.
|
||||
// This will typically be used to calculate which nodes can see each other
|
||||
// based on the current policy.
|
||||
type PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView
|
||||
|
||||
// work represents a single operation to be performed on the NodeStore.
|
||||
type work struct {
|
||||
op int
|
||||
nodeID types.NodeID
|
||||
node types.Node
|
||||
updateFn UpdateNodeFunc
|
||||
result chan struct{}
|
||||
nodeResult chan types.NodeView // Channel to return the resulting node after batch application
|
||||
}
|
||||
|
||||
// PutNode adds or updates a node in the store.
|
||||
// If the node already exists, it will be replaced.
|
||||
// If the node does not exist, it will be added.
|
||||
// This is a blocking operation that waits for the write to complete.
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
func (s *NodeStore) PutNode(n types.Node) types.NodeView {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("put"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: put,
|
||||
nodeID: n.ID,
|
||||
node: n,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
s.writeQueue <- work
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("put").Inc()
|
||||
|
||||
return resultNode
|
||||
}
|
||||
|
||||
// UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it.
|
||||
type UpdateNodeFunc func(n *types.Node)
|
||||
|
||||
// UpdateNode applies a function to modify a specific node in the store.
|
||||
// This is a blocking operation that waits for the write to complete.
|
||||
// This is analogous to a database "transaction", or, the caller should
|
||||
// rather collect all data they want to change, and then call this function.
|
||||
// Fewer calls are better.
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
//
|
||||
// TODO(kradalby): Technically we could have a version of this that modifies the node
|
||||
// in the current snapshot if _we know_ that the change will not affect the peer relationships.
|
||||
// This is because the main nodesByID map contains the struct, and every other map is using a
|
||||
// pointer to the underlying struct. The gotcha with this is that we will need to introduce
|
||||
// a lock around the nodesByID map to ensure that no other writes are happening
|
||||
// while we are modifying the node. Which mean we would need to implement read-write locks
|
||||
// on all read operations.
|
||||
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("update"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: update,
|
||||
nodeID: nodeID,
|
||||
updateFn: updateFn,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
s.writeQueue <- work
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("update").Inc()
|
||||
|
||||
// Return the node and whether it exists (is valid)
|
||||
return resultNode, resultNode.Valid()
|
||||
}
|
||||
|
||||
// DeleteNode removes a node from the store by its ID.
|
||||
// This is a blocking operation that waits for the write to complete.
|
||||
func (s *NodeStore) DeleteNode(id types.NodeID) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("delete"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: del,
|
||||
nodeID: id,
|
||||
result: make(chan struct{}),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
s.writeQueue <- work
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("delete").Inc()
|
||||
}
|
||||
|
||||
// Start initializes the NodeStore and starts processing the write queue.
|
||||
func (s *NodeStore) Start() {
|
||||
s.writeQueue = make(chan work)
|
||||
go s.processWrite()
|
||||
}
|
||||
|
||||
// Stop stops the NodeStore.
|
||||
func (s *NodeStore) Stop() {
|
||||
close(s.writeQueue)
|
||||
}
|
||||
|
||||
// processWrite processes the write queue in batches.
|
||||
func (s *NodeStore) processWrite() {
|
||||
c := time.NewTicker(batchTimeout)
|
||||
defer c.Stop()
|
||||
batch := make([]work, 0, batchSize)
|
||||
|
||||
for {
|
||||
select {
|
||||
case w, ok := <-s.writeQueue:
|
||||
if !ok {
|
||||
// Channel closed, apply any remaining batch and exit
|
||||
if len(batch) != 0 {
|
||||
s.applyBatch(batch)
|
||||
}
|
||||
return
|
||||
}
|
||||
batch = append(batch, w)
|
||||
if len(batch) >= batchSize {
|
||||
s.applyBatch(batch)
|
||||
batch = batch[:0]
|
||||
c.Reset(batchTimeout)
|
||||
}
|
||||
case <-c.C:
|
||||
if len(batch) != 0 {
|
||||
s.applyBatch(batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
c.Reset(batchTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// applyBatch applies a batch of work to the node store.
|
||||
// This means that it takes a copy of the current nodes,
|
||||
// then applies the batch of operations to that copy,
|
||||
// runs any precomputation needed (like calculating peers),
|
||||
// and finally replaces the snapshot in the store with the new one.
|
||||
// The replacement of the snapshot is atomic, ensuring that reads
|
||||
// are never blocked by writes.
|
||||
// Each write item is blocked until the batch is applied to ensure
|
||||
// the caller knows the operation is complete and do not send any
|
||||
// updates that are dependent on a read that is yet to be written.
|
||||
func (s *NodeStore) applyBatch(batch []work) {
|
||||
timer := prometheus.NewTimer(nodeStoreBatchDuration)
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreBatchSize.Observe(float64(len(batch)))
|
||||
|
||||
nodes := make(map[types.NodeID]types.Node)
|
||||
maps.Copy(nodes, s.data.Load().nodesByID)
|
||||
|
||||
// Track which work items need node results
|
||||
nodeResultRequests := make(map[types.NodeID][]*work)
|
||||
|
||||
for i := range batch {
|
||||
w := &batch[i]
|
||||
switch w.op {
|
||||
case put:
|
||||
nodes[w.nodeID] = w.node
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case update:
|
||||
// Update the specific node identified by nodeID
|
||||
if n, exists := nodes[w.nodeID]; exists {
|
||||
w.updateFn(&n)
|
||||
nodes[w.nodeID] = n
|
||||
}
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case del:
|
||||
delete(nodes, w.nodeID)
|
||||
// For delete operations, send an invalid NodeView if requested
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newSnap := snapshotFromNodes(nodes, s.peersFunc)
|
||||
s.data.Store(&newSnap)
|
||||
|
||||
// Update node count gauge
|
||||
nodeStoreNodesCount.Set(float64(len(nodes)))
|
||||
|
||||
// Send the resulting nodes to all work items that requested them
|
||||
for nodeID, workItems := range nodeResultRequests {
|
||||
if node, exists := nodes[nodeID]; exists {
|
||||
nodeView := node.View()
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- nodeView
|
||||
close(w.nodeResult)
|
||||
}
|
||||
} else {
|
||||
// Node was deleted or doesn't exist
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- types.NodeView{} // Send invalid view
|
||||
close(w.nodeResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal completion for all work items
|
||||
for _, w := range batch {
|
||||
close(w.result)
|
||||
}
|
||||
}
|
||||
|
||||
// snapshotFromNodes creates a new Snapshot from the provided nodes.
|
||||
// It builds a lot of "indexes" to make lookups fast for datasets we
|
||||
// that is used frequently, like nodesByNodeKey, peersByNode, and nodesByUser.
|
||||
// This is not a fast operation, it is the "slow" part of our copy-on-write
|
||||
// structure, but it allows us to have fast reads and efficient lookups.
|
||||
func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) Snapshot {
|
||||
timer := prometheus.NewTimer(nodeStoreSnapshotBuildDuration)
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
allNodes := make([]types.NodeView, 0, len(nodes))
|
||||
for _, n := range nodes {
|
||||
allNodes = append(allNodes, n.View())
|
||||
}
|
||||
|
||||
newSnap := Snapshot{
|
||||
nodesByID: nodes,
|
||||
allNodes: allNodes,
|
||||
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
|
||||
nodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView),
|
||||
|
||||
// peersByNode is most likely the most expensive operation,
|
||||
// it will use the list of all nodes, combined with the
|
||||
// current policy to precalculate which nodes are peers and
|
||||
// can see each other.
|
||||
peersByNode: func() map[types.NodeID][]types.NodeView {
|
||||
peersTimer := prometheus.NewTimer(nodeStorePeersCalculationDuration)
|
||||
defer peersTimer.ObserveDuration()
|
||||
return peersFunc(allNodes)
|
||||
}(),
|
||||
nodesByUser: make(map[types.UserID][]types.NodeView),
|
||||
}
|
||||
|
||||
// Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps
|
||||
for _, n := range nodes {
|
||||
nodeView := n.View()
|
||||
userID := types.UserID(n.UserID)
|
||||
|
||||
newSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView)
|
||||
newSnap.nodesByNodeKey[n.NodeKey] = nodeView
|
||||
|
||||
// Build machine key index
|
||||
if newSnap.nodesByMachineKey[n.MachineKey] == nil {
|
||||
newSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView)
|
||||
}
|
||||
newSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView
|
||||
}
|
||||
|
||||
return newSnap
|
||||
}
|
||||
|
||||
// GetNode retrieves a node by its ID.
|
||||
// The bool indicates if the node exists or is available (like "err not found").
|
||||
// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure
|
||||
// it isn't an invalid node (this is more of a node error or node is broken).
|
||||
func (s *NodeStore) GetNode(id types.NodeID) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get").Inc()
|
||||
|
||||
n, exists := s.data.Load().nodesByID[id]
|
||||
if !exists {
|
||||
return types.NodeView{}, false
|
||||
}
|
||||
|
||||
return n.View(), true
|
||||
}
|
||||
|
||||
// GetNodeByNodeKey retrieves a node by its NodeKey.
|
||||
// The bool indicates if the node exists or is available (like "err not found").
|
||||
// The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure
|
||||
// it isn't an invalid node (this is more of a node error or node is broken).
|
||||
func (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_key"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_key").Inc()
|
||||
|
||||
nodeView, exists := s.data.Load().nodesByNodeKey[nodeKey]
|
||||
|
||||
return nodeView, exists
|
||||
}
|
||||
|
||||
// GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists.
|
||||
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
if node, exists := userMap[userID]; exists {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
return types.NodeView{}, false
|
||||
}
|
||||
|
||||
// GetNodeByMachineKeyAnyUser returns the first node with the given machine key,
|
||||
// regardless of which user it belongs to. This is useful for scenarios like
|
||||
// transferring a node to a different user when re-authenticating with a
|
||||
// different user's auth key.
|
||||
// If multiple nodes exist with the same machine key (different users), the
|
||||
// first one found is returned (order is not guaranteed).
|
||||
func (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key_any_user"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key_any_user").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
// Return the first node found (order not guaranteed due to map iteration)
|
||||
for _, node := range userMap {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
return types.NodeView{}, false
|
||||
}
|
||||
|
||||
// DebugString returns debug information about the NodeStore.
|
||||
func (s *NodeStore) DebugString() string {
|
||||
snapshot := s.data.Load()
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString("=== NodeStore Debug Information ===\n\n")
|
||||
|
||||
// Basic counts
|
||||
sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", len(snapshot.nodesByID)))
|
||||
sb.WriteString(fmt.Sprintf("Users with Nodes: %d\n", len(snapshot.nodesByUser)))
|
||||
sb.WriteString("\n")
|
||||
|
||||
// User distribution
|
||||
sb.WriteString("Nodes by User:\n")
|
||||
for userID, nodes := range snapshot.nodesByUser {
|
||||
if len(nodes) > 0 {
|
||||
userName := "unknown"
|
||||
if len(nodes) > 0 && nodes[0].Valid() {
|
||||
userName = nodes[0].User().Name
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf(" - User %d (%s): %d nodes\n", userID, userName, len(nodes)))
|
||||
}
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
// Peer relationships summary
|
||||
sb.WriteString("Peer Relationships:\n")
|
||||
totalPeers := 0
|
||||
for nodeID, peers := range snapshot.peersByNode {
|
||||
peerCount := len(peers)
|
||||
totalPeers += peerCount
|
||||
if node, exists := snapshot.nodesByID[nodeID]; exists {
|
||||
sb.WriteString(fmt.Sprintf(" - Node %d (%s): %d peers\n",
|
||||
nodeID, node.Hostname, peerCount))
|
||||
}
|
||||
}
|
||||
if len(snapshot.peersByNode) > 0 {
|
||||
avgPeers := float64(totalPeers) / float64(len(snapshot.peersByNode))
|
||||
sb.WriteString(fmt.Sprintf(" - Average peers per node: %.1f\n", avgPeers))
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
|
||||
// Node key index
|
||||
sb.WriteString(fmt.Sprintf("NodeKey Index: %d entries\n", len(snapshot.nodesByNodeKey)))
|
||||
sb.WriteString("\n")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ListNodes returns a slice of all nodes in the store.
|
||||
func (s *NodeStore) ListNodes() views.Slice[types.NodeView] {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("list").Inc()
|
||||
|
||||
return views.SliceOf(s.data.Load().allNodes)
|
||||
}
|
||||
|
||||
// ListPeers returns a slice of all peers for a given node ID.
|
||||
func (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_peers"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("list_peers").Inc()
|
||||
|
||||
return views.SliceOf(s.data.Load().peersByNode[id])
|
||||
}
|
||||
|
||||
// ListNodesByUser returns a slice of all nodes for a given user ID.
|
||||
func (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_by_user"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("list_by_user").Inc()
|
||||
|
||||
return views.SliceOf(s.data.Load().nodesByUser[uid])
|
||||
}
|
||||
1140
hscontrol/state/node_store_test.go
Normal file
1140
hscontrol/state/node_store_test.go
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user