mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-11 20:00:28 +01:00
Compare commits
23 Commits
copilot/de
...
v0.27.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f658a8eacd | ||
|
|
785168a7b8 | ||
|
|
3bd4ecd9cd | ||
|
|
3455d1cb59 | ||
|
|
ddd31ba774 | ||
|
|
4a8dc2d445 | ||
|
|
773a46a968 | ||
|
|
4728a2ba9e | ||
|
|
abed534628 | ||
|
|
21e3f2598d | ||
|
|
a28d9bed6d | ||
|
|
28faf8cd71 | ||
|
|
5a2ee0c391 | ||
|
|
5cd15c3656 | ||
|
|
2024219bd1 | ||
|
|
d9c3eaf8c8 | ||
|
|
bd9cf42b96 | ||
|
|
d7a43a7cf1 | ||
|
|
1c0bb0338d | ||
|
|
c649c89e00 | ||
|
|
af2de35b6c | ||
|
|
02c7c1a0e7 | ||
|
|
d23fa26395 |
2
.github/workflows/test-integration.yaml
vendored
2
.github/workflows/test-integration.yaml
vendored
@@ -38,7 +38,9 @@ jobs:
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestOIDCFollowUpUrl
|
||||
- TestOIDCMultipleOpenedLoginUrls
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestOIDCExpiryAfterRestart
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -4,8 +4,37 @@
|
||||
|
||||
### Changes
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Changes
|
||||
|
||||
- Expire nodes with a custom timestamp
|
||||
[#2828](https://github.com/juanfont/headscale/pull/2828)
|
||||
- Fix issue where node expiry was reset when tailscaled restarts
|
||||
[#2875](https://github.com/juanfont/headscale/pull/2875)
|
||||
- Fix OIDC authentication when multiple login URLs are opened
|
||||
[#2861](https://github.com/juanfont/headscale/pull/2861)
|
||||
- Fix node re-registration failing with expired auth keys
|
||||
[#2859](https://github.com/juanfont/headscale/pull/2859)
|
||||
- Remove old unused database tables and indices
|
||||
[#2844](https://github.com/juanfont/headscale/pull/2844)
|
||||
[#2872](https://github.com/juanfont/headscale/pull/2872)
|
||||
- Ignore litestream tables during database validation
|
||||
[#2843](https://github.com/juanfont/headscale/pull/2843)
|
||||
- Fix exit node visibility to respect ACL rules
|
||||
[#2855](https://github.com/juanfont/headscale/pull/2855)
|
||||
- Fix SSH policy becoming empty when unknown user is referenced
|
||||
[#2874](https://github.com/juanfont/headscale/pull/2874)
|
||||
- Fix policy validation when using bypass-grpc mode
|
||||
[#2854](https://github.com/juanfont/headscale/pull/2854)
|
||||
- Fix autogroup:self interaction with other ACL rules
|
||||
[#2842](https://github.com/juanfont/headscale/pull/2842)
|
||||
- Fix flaky DERP map shuffle test
|
||||
[#2848](https://github.com/juanfont/headscale/pull/2848)
|
||||
- Use current stable base images for Debian and Alpine containers
|
||||
[#2827](https://github.com/juanfont/headscale/pull/2827)
|
||||
|
||||
## 0.27.0 (2025-10-27)
|
||||
|
||||
@@ -89,7 +118,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2692](https://github.com/juanfont/headscale/pull/2692)
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
- Stricter hostname validation [#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Stricter hostname validation
|
||||
[#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Hostnames must be valid DNS labels (2-63 characters, alphanumeric and
|
||||
hyphens only, cannot start/end with hyphen)
|
||||
- **Client Registration (New Nodes)**: Invalid hostnames are automatically
|
||||
@@ -144,7 +174,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2776](https://github.com/juanfont/headscale/pull/2776)
|
||||
- EXPERIMENTAL: Add support for `autogroup:self`
|
||||
[#2789](https://github.com/juanfont/headscale/pull/2789)
|
||||
- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
- Add healthcheck command
|
||||
[#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
|
||||
@@ -127,12 +127,6 @@ var setPolicy = &cobra.Command{
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
@@ -159,6 +153,17 @@ var setPolicy = &cobra.Command{
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
}
|
||||
|
||||
users, err := d.ListUsers()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to load users for policy validation: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = d.SetPolicy(string(policyBytes))
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
|
||||
@@ -216,6 +216,39 @@ nodes.
|
||||
}
|
||||
```
|
||||
|
||||
### Restrict access to exit nodes per user or group
|
||||
|
||||
A user can use _any_ of the available exit nodes with `autogroup:internet`. Alternatively, the ACL snippet below assigns
|
||||
each user a specific exit node while hiding all other exit nodes. The user `alice` can only use exit node `exit1` while
|
||||
user `bob` can only use exit node `exit2`.
|
||||
|
||||
```json title="Assign each user a dedicated exit node"
|
||||
{
|
||||
"hosts": {
|
||||
"exit1": "100.64.0.1/32",
|
||||
"exit2": "100.64.0.2/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["alice@"],
|
||||
"dst": ["exit1:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["bob@"],
|
||||
"dst": ["exit2:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
- The above implementation is Headscale specific and will likely be removed once [support for
|
||||
`via`](https://github.com/juanfont/headscale/issues/2409) is available.
|
||||
- Beware that a user can also connect to any port of the exit node itself.
|
||||
|
||||
### Automatically approve an exit node with auto approvers
|
||||
|
||||
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
|
||||
|
||||
@@ -71,6 +71,13 @@ func (h *Headscale) handleRegister(
|
||||
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
|
||||
// nodes, separated by users and this path is handling expiring/logout paths.
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
// When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero.
|
||||
// Return the current node state without modification.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
if req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() {
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
@@ -173,6 +180,7 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
|
||||
@@ -3004,3 +3004,296 @@ func createTestApp(t *testing.T) *Headscale {
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
// TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey tests the scenario reported in
|
||||
// https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Scenario:
|
||||
// 1. Node registers successfully with a single-use pre-auth key
|
||||
// 2. Node is running fine
|
||||
// 3. Node restarts (e.g., after headscale upgrade or tailscale container restart)
|
||||
// 4. Node sends RegisterRequest with the same pre-auth key
|
||||
// 5. BUG: Headscale rejects the request with "authkey expired" or "authkey already used"
|
||||
//
|
||||
// Expected behavior:
|
||||
// When an existing node (identified by matching NodeKey + MachineKey) re-registers
|
||||
// with a pre-auth key that it previously used, the registration should succeed.
|
||||
// The node is not creating a new registration - it's re-authenticating the same device.
|
||||
func TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create user and single-use pre-auth key
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil) // reusable=false
|
||||
require.NoError(t, err)
|
||||
require.False(t, pak.Reusable, "key should be single-use for this test")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// STEP 1: Initial registration with pre-auth key (simulates fresh node joining)
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
t.Log("Step 1: Initial registration with pre-auth key")
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "initial registration should succeed")
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized, "node should be authorized")
|
||||
assert.False(t, initialResp.NodeKeyExpired, "node key should not be expired")
|
||||
|
||||
// Verify node was created in database
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should exist after initial registration")
|
||||
assert.Equal(t, "test-node", node.Hostname())
|
||||
assert.Equal(t, nodeKey.Public(), node.NodeKey())
|
||||
assert.Equal(t, machineKey.Public(), node.MachineKey())
|
||||
|
||||
// Verify pre-auth key is now marked as used
|
||||
usedPak, err := app.state.GetPreAuthKey(pak.Key)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, usedPak.Used, "pre-auth key should be marked as used after initial registration")
|
||||
|
||||
// STEP 2: Simulate node restart - node sends RegisterRequest again with same pre-auth key
|
||||
// This happens when:
|
||||
// - Tailscale container restarts
|
||||
// - Tailscaled service restarts
|
||||
// - System reboots
|
||||
// The Tailscale client persists the pre-auth key in its state and sends it on every registration
|
||||
t.Log("Step 2: Node restart - re-registration with same (now used) pre-auth key")
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same node key
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
// BUG: This fails with "authkey already used" or "authkey expired"
|
||||
// EXPECTED: Should succeed because it's the same node re-registering
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
|
||||
// This is the assertion that currently FAILS in v0.27.0
|
||||
assert.NoError(t, err, "BUG: existing node re-registration with its own used pre-auth key should succeed")
|
||||
if err != nil {
|
||||
t.Logf("Error received (this is the bug): %v", err)
|
||||
t.Logf("Expected behavior: Node should be able to re-register with the same pre-auth key it used initially")
|
||||
return // Stop here to show the bug clearly
|
||||
}
|
||||
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized, "node should remain authorized after restart")
|
||||
assert.False(t, restartResp.NodeKeyExpired, "node key should not be expired after restart")
|
||||
|
||||
// Verify it's the same node (not a duplicate)
|
||||
nodeAfterRestart, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should still exist after restart")
|
||||
assert.Equal(t, node.ID(), nodeAfterRestart.ID(), "should be the same node, not a new one")
|
||||
assert.Equal(t, "test-node", nodeAfterRestart.Hostname())
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithReusablePreAuthKey tests that reusable keys work correctly
|
||||
// for node re-registration.
|
||||
func TestNodeReregistrationWithReusablePreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, nil, nil) // reusable=true
|
||||
require.NoError(t, err)
|
||||
require.True(t, pak.Reusable)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Initial registration
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized)
|
||||
|
||||
// Node restart - re-registration with reusable key
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Reusable key
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
require.NoError(t, err, "reusable key should allow re-registration")
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized)
|
||||
assert.False(t, restartResp.NodeKeyExpired)
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithExpiredPreAuthKey tests that truly expired keys
|
||||
// are still rejected even for existing nodes.
|
||||
func TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
expiry := time.Now().Add(-1 * time.Hour) // Already expired
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, &expiry, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Try to register with expired key
|
||||
req := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "expired-key-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegister(context.Background(), req, machineKey.Public())
|
||||
assert.Error(t, err, "expired pre-auth key should be rejected")
|
||||
assert.Contains(t, err.Error(), "authkey expired", "error should mention key expiration")
|
||||
}
|
||||
// TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey tests that an existing node
|
||||
// can re-register using a pre-auth key that's already marked as Used=true, as long as:
|
||||
// 1. The node is re-registering with the same MachineKey it originally used
|
||||
// 2. The node is using the same pre-auth key it was originally registered with (AuthKeyID matches)
|
||||
//
|
||||
// This is the fix for GitHub issue #2830: https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Background: When Docker/Kubernetes containers restart, they keep their persistent state
|
||||
// (including the MachineKey), but container entrypoints unconditionally run:
|
||||
// tailscale up --authkey=$TS_AUTHKEY
|
||||
//
|
||||
// This caused nodes to be rejected after restart because the pre-auth key was already
|
||||
// marked as Used=true from the initial registration. The fix allows re-registration of
|
||||
// existing nodes with their own used keys.
|
||||
func TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("testuser")
|
||||
|
||||
// Create a SINGLE-USE pre-auth key (reusable=false)
|
||||
// This is the type of key that triggers the bug in issue #2830
|
||||
preAuthKey, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, preAuthKey.Reusable, "Pre-auth key must be single-use to test issue #2830")
|
||||
require.False(t, preAuthKey.Used, "Pre-auth key should not be used yet")
|
||||
|
||||
// Generate node keys for the client
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Step 1: Initial registration with the pre-auth key
|
||||
// This simulates the first time the container starts and runs 'tailscale up --authkey=...'
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegisterWithAuthKey(initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, initialResp.MachineAuthorized, "Node should be authorized after initial registration")
|
||||
require.NotNil(t, initialResp.User, "User should be set in response")
|
||||
require.Equal(t, "testuser", initialResp.User.DisplayName, "User should match the pre-auth key's user")
|
||||
|
||||
// Verify the pre-auth key is now marked as Used
|
||||
updatedKey, err := app.state.GetPreAuthKey(preAuthKey.Key)
|
||||
require.NoError(t, err)
|
||||
require.True(t, updatedKey.Used, "Pre-auth key should be marked as Used after initial registration")
|
||||
|
||||
// Step 2: Container restart scenario
|
||||
// The container keeps its MachineKey (persistent state), but the entrypoint script
|
||||
// unconditionally runs 'tailscale up --authkey=$TS_AUTHKEY' again
|
||||
//
|
||||
// WITHOUT THE FIX: This would fail with "authkey already used" error
|
||||
// WITH THE FIX: This succeeds because it's the same node re-registering with its own key
|
||||
|
||||
// Simulate sending the same RegisterRequest again (same MachineKey, same AuthKey)
|
||||
// This is exactly what happens when a container restarts
|
||||
reregisterReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same NodeKey
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
reregisterResp, err := app.handleRegisterWithAuthKey(reregisterReq, machineKey.Public()) // Same MachineKey
|
||||
require.NoError(t, err, "Re-registration with same MachineKey and used pre-auth key should succeed (fixes #2830)")
|
||||
require.True(t, reregisterResp.MachineAuthorized, "Node should remain authorized after re-registration")
|
||||
require.NotNil(t, reregisterResp.User, "User should be set in re-registration response")
|
||||
require.Equal(t, "testuser", reregisterResp.User.DisplayName, "User should remain the same")
|
||||
|
||||
// Verify that only ONE node was created (not a duplicate)
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodes.Len(), "Should have exactly one node (no duplicates created)")
|
||||
require.Equal(t, "issue-2830-test-node", nodes.At(0).Hostname(), "Node hostname should match")
|
||||
|
||||
// Step 3: Verify that a DIFFERENT machine cannot use the same used key
|
||||
// This ensures we didn't break the security model - only the original node can re-register
|
||||
differentMachineKey := key.NewMachine()
|
||||
differentNodeKey := key.NewNode()
|
||||
|
||||
attackReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Try to use the same key
|
||||
},
|
||||
NodeKey: differentNodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "attacker-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegisterWithAuthKey(attackReq, differentMachineKey.Public())
|
||||
require.Error(t, err, "Different machine should NOT be able to use the same used pre-auth key")
|
||||
require.Contains(t, err.Error(), "already used", "Error should indicate key is already used")
|
||||
|
||||
// Verify still only one node (the original one)
|
||||
nodesAfterAttack := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodesAfterAttack.Len(), "Should still have exactly one node (attack prevented)")
|
||||
}
|
||||
|
||||
@@ -952,6 +952,41 @@ AND auth_key_id NOT IN (
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Drop all indices that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
// They should all be cleaned up by the db engine, but we are a bit
|
||||
// conservative to ensure all our previous mess is cleaned up.
|
||||
ID: "202511101554-drop-old-idx",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldIdx := range []struct{ name, table string }{
|
||||
{"idx_namespaces_deleted_at", "namespaces"},
|
||||
{"idx_routes_deleted_at", "routes"},
|
||||
{"idx_shared_machines_deleted_at", "shared_machines"},
|
||||
} {
|
||||
err := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name)
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Str("index", oldIdx.name).
|
||||
Str("table", oldIdx.table).
|
||||
Err(err).
|
||||
Msg("Error dropping old index, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
// Migrations **above** this points will be REMOVED in version **0.29.0**
|
||||
// This is to clean up a lot of old migrations that is seldom used
|
||||
// and carries a lot of technical debt.
|
||||
// Any new migrations should be added after the comment below and follow
|
||||
// the rules it sets out.
|
||||
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
|
||||
@@ -325,7 +325,11 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
if changed {
|
||||
err := tx.Save(node).Error
|
||||
// Use Updates() with Select() to only update IP fields, avoiding overwriting
|
||||
// other fields like Expiry. We need Select() because Updates() alone skips
|
||||
// zero values, but we DO want to update IPv4/IPv6 to nil when removing them.
|
||||
// See issue #2862.
|
||||
err := tx.Model(node).Select("ipv4", "ipv6").Updates(node).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -232,6 +233,17 @@ func SetApprovedRoutes(
|
||||
return nil
|
||||
}
|
||||
|
||||
// When approving exit routes, ensure both IPv4 and IPv6 are included
|
||||
// If either 0.0.0.0/0 or ::/0 is being approved, both should be approved
|
||||
hasIPv4Exit := slices.Contains(routes, tsaddr.AllIPv4())
|
||||
hasIPv6Exit := slices.Contains(routes, tsaddr.AllIPv6())
|
||||
|
||||
if hasIPv4Exit && !hasIPv6Exit {
|
||||
routes = append(routes, tsaddr.AllIPv6())
|
||||
} else if hasIPv6Exit && !hasIPv4Exit {
|
||||
routes = append(routes, tsaddr.AllIPv4())
|
||||
}
|
||||
|
||||
b, err := json.Marshal(routes)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -440,13 +452,6 @@ func NodeSetMachineKey(
|
||||
}).Error
|
||||
}
|
||||
|
||||
// NodeSave saves a node object to the database, prefer to use a specific save method rather
|
||||
// than this. It is intended to be used when we are changing or.
|
||||
// TODO(kradalby): Remove this func, just use Save.
|
||||
func NodeSave(tx *gorm.DB, node *types.Node) error {
|
||||
return tx.Save(node).Error
|
||||
}
|
||||
|
||||
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
// Strip invalid DNS characters for givenName
|
||||
suppliedName = strings.ToLower(suppliedName)
|
||||
|
||||
@@ -476,7 +476,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), node.ApprovedRoutes, tt.routes)
|
||||
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), nodeTagged.ApprovedRoutes, tt.routes)
|
||||
if changed2 {
|
||||
err = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2)
|
||||
require.NoError(t, err)
|
||||
@@ -490,7 +490,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes1) == 0 {
|
||||
expectedRoutes1 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
@@ -501,7 +501,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes2) == 0 {
|
||||
expectedRoutes2 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -145,11 +145,12 @@ func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error {
|
||||
|
||||
// UsePreAuthKey marks a PreAuthKey as used.
|
||||
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
k.Used = true
|
||||
if err := tx.Save(k).Error; err != nil {
|
||||
err := tx.Model(k).Update("used", true).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update key used status in the database: %w", err)
|
||||
}
|
||||
|
||||
k.Used = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -31,10 +31,15 @@ CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identif
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
|
||||
-- Create all the old tables we have had and ensure they are clean up.
|
||||
CREATE TABLE `namespaces` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `namespaces` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
|
||||
CREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);
|
||||
CREATE INDEX `idx_namespaces_deleted_at` ON `namespaces`(`deleted_at`);
|
||||
CREATE INDEX `idx_shared_machines_deleted_at` ON `shared_machines`(`deleted_at`);
|
||||
|
||||
COMMIT;
|
||||
|
||||
134
hscontrol/db/user_update_test.go
Normal file
134
hscontrol/db/user_update_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestUserUpdatePreservesUnchangedFields verifies that updating a user
|
||||
// preserves fields that aren't modified. This test validates the fix
|
||||
// for using Updates() instead of Save() in UpdateUser-like operations.
|
||||
func TestUserUpdatePreservesUnchangedFields(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user with all fields set
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Test User Display",
|
||||
Email: "test@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-123",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, createdUser)
|
||||
|
||||
// Verify initial state
|
||||
assert.Equal(t, "testuser", createdUser.Name)
|
||||
assert.Equal(t, "Test User Display", createdUser.DisplayName)
|
||||
assert.Equal(t, "test@example.com", createdUser.Email)
|
||||
assert.True(t, createdUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-123", createdUser.ProviderIdentifier.String)
|
||||
|
||||
// Simulate what UpdateUser does: load user, modify one field, save
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify ONLY DisplayName
|
||||
user.DisplayName = "Updated Display Name"
|
||||
|
||||
// This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones
|
||||
err = tx.Save(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read user back from database
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that DisplayName was updated
|
||||
assert.Equal(t, "Updated Display Name", updatedUser.DisplayName)
|
||||
|
||||
// CRITICAL: Verify that other fields were NOT overwritten
|
||||
// With Save(), these assertions should pass because the user object
|
||||
// was loaded from DB and has all fields populated.
|
||||
// But if Updates() is used, these will also pass (and it's safer).
|
||||
assert.Equal(t, "testuser", updatedUser.Name, "Name should be preserved")
|
||||
assert.Equal(t, "test@example.com", updatedUser.Email, "Email should be preserved")
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid, "ProviderIdentifier should be preserved")
|
||||
assert.Equal(t, "provider-123", updatedUser.ProviderIdentifier.String, "ProviderIdentifier value should be preserved")
|
||||
}
|
||||
|
||||
// TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save()
|
||||
// works correctly and only updates modified fields.
|
||||
func TestUserUpdateWithUpdatesMethod(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Original Display",
|
||||
Email: "original@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-abc",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update using Updates() method
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify multiple fields
|
||||
user.DisplayName = "New Display"
|
||||
user.Email = "new@example.com"
|
||||
|
||||
// Use Updates() instead of Save()
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify changes
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify updated fields
|
||||
assert.Equal(t, "New Display", updatedUser.DisplayName)
|
||||
assert.Equal(t, "new@example.com", updatedUser.Email)
|
||||
|
||||
// Verify preserved fields
|
||||
assert.Equal(t, "testuser", updatedUser.Name)
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-abc", updatedUser.ProviderIdentifier.String)
|
||||
}
|
||||
@@ -102,7 +102,8 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
||||
|
||||
oldUser.Name = newName
|
||||
|
||||
if err := tx.Save(&oldUser).Error; err != nil {
|
||||
err = tx.Updates(&oldUser).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -126,7 +127,17 @@ func shuffleDERPMap(dm *tailcfg.DERPMap) {
|
||||
return
|
||||
}
|
||||
|
||||
for id, region := range dm.Regions {
|
||||
// Collect region IDs and sort them to ensure deterministic iteration order.
|
||||
// Map iteration order is non-deterministic in Go, which would cause the
|
||||
// shuffle to be non-deterministic even with a fixed seed.
|
||||
ids := make([]int, 0, len(dm.Regions))
|
||||
for id := range dm.Regions {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
slices.Sort(ids)
|
||||
|
||||
for _, id := range ids {
|
||||
region := dm.Regions[id]
|
||||
if len(region.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -83,9 +83,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sea",
|
||||
RegionName: "Seattle",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
@@ -93,9 +93,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sfo",
|
||||
RegionName: "San Francisco",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -169,6 +169,74 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with another base domain",
|
||||
baseDomain: "another.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with yet another base domain",
|
||||
baseDomain: "yetanother.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -108,11 +108,12 @@ func TestTailNode(t *testing.T) {
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.0.0.0/10"),
|
||||
},
|
||||
},
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), netip.MustParsePrefix("192.168.0.0/24")},
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix("192.168.0.0/24")},
|
||||
CreatedAt: created,
|
||||
},
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
@@ -150,6 +151,7 @@ func TestTailNode(t *testing.T) {
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.0.0.0/10"),
|
||||
},
|
||||
|
||||
@@ -213,7 +213,8 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
cookieState, err := req.Cookie("state")
|
||||
stateCookieName := getCookieName("state", state)
|
||||
cookieState, err := req.Cookie(stateCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err))
|
||||
return
|
||||
@@ -235,8 +236,13 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
if idToken.Nonce == "" {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err))
|
||||
return
|
||||
}
|
||||
|
||||
nonce, err := req.Cookie("nonce")
|
||||
nonceCookieName := getCookieName("nonce", idToken.Nonce)
|
||||
nonce, err := req.Cookie(nonceCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err))
|
||||
return
|
||||
@@ -584,6 +590,11 @@ func renderOIDCCallbackTemplate(
|
||||
return &content, nil
|
||||
}
|
||||
|
||||
// getCookieName generates a unique cookie name based on a cookie value.
|
||||
func getCookieName(baseName, value string) string {
|
||||
return fmt.Sprintf("%s_%s", baseName, value[:6])
|
||||
}
|
||||
|
||||
func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {
|
||||
val, err := util.GenerateRandomStringURLSafe(64)
|
||||
if err != nil {
|
||||
@@ -592,7 +603,7 @@ func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string,
|
||||
|
||||
c := &http.Cookie{
|
||||
Path: "/oidc/callback",
|
||||
Name: name,
|
||||
Name: getCookieName(name, val),
|
||||
Value: val,
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
Secure: r.TLS != nil,
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -91,3 +92,12 @@ func (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
|
||||
}
|
||||
|
||||
// DestsIsTheInternet reports if the destination is equal to "the internet"
|
||||
// which is a IPSet that represents "autogroup:internet" and is special
|
||||
// cased for exit nodes.
|
||||
func (m Match) DestsIsTheInternet() bool {
|
||||
return m.dests.Equal(util.TheInternet()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv6())
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -782,12 +783,287 @@ func TestReduceNodes(t *testing.T) {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff)
|
||||
t.Errorf("ReduceNodes() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {
|
||||
var routes []netip.Prefix
|
||||
for _, route := range routess {
|
||||
routes = append(routes, netip.MustParsePrefix(route))
|
||||
}
|
||||
|
||||
return &types.Node{
|
||||
ID: id,
|
||||
IPv4: ap(ip),
|
||||
Hostname: hostname,
|
||||
User: types.User{Name: username},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: routes,
|
||||
},
|
||||
ApprovedRoutes: routes,
|
||||
}
|
||||
}
|
||||
|
||||
type args struct {
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes types.Nodes
|
||||
policy string
|
||||
node *types.Node
|
||||
want types.Nodes
|
||||
wantMatchers int
|
||||
}{
|
||||
{
|
||||
name: "2788-exit-node-too-visible",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
},
|
||||
wantMatchers: 1,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-autogroup:internet",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-0000-route",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"0.0.0.0/0:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-::0-route",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"::0/0:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2784-split-exit-node-access",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "user", "user"),
|
||||
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
|
||||
n(3, "100.64.0.3", "exit2", "exit", "0.0.0.0/0", "::/0"),
|
||||
n(4, "100.64.0.4", "otheruser", "otheruser"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"user": "100.64.0.1/32",
|
||||
"exit1": "100.64.0.2/32",
|
||||
"exit2": "100.64.0.3/32",
|
||||
"otheruser": "100.64.0.4/32",
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"user"
|
||||
],
|
||||
"dst": [
|
||||
"exit1:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"otheruser"
|
||||
],
|
||||
"dst": [
|
||||
"exit2:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "user", "user"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
pm, err = pmf(nil, tt.nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
matchers, err := pm.MatchersForNode(tt.node.View())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, matchers, tt.wantMatchers)
|
||||
|
||||
gotViews := ReduceNodes(
|
||||
tt.node.View(),
|
||||
tt.nodes.ViewSlice(),
|
||||
matchers,
|
||||
)
|
||||
// Convert views back to nodes for comparison in tests
|
||||
var got types.Nodes
|
||||
for _, v := range gotViews.All() {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("TestReduceNodesFromPolicy() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHPolicyRules(t *testing.T) {
|
||||
users := []types.User{
|
||||
{Name: "user1", Model: gorm.Model{ID: 1}},
|
||||
@@ -1077,6 +1353,55 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "2863-allow-predefined-missing-users",
|
||||
targetNode: taggedClient,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:example-infra": [
|
||||
"user2@",
|
||||
"not-created-yet@",
|
||||
],
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:client": [
|
||||
"user2@"
|
||||
],
|
||||
},
|
||||
"ssh": [
|
||||
// Allow infra to ssh to tag:example-infra server as debian
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"group:example-infra"
|
||||
],
|
||||
"dst": [
|
||||
"tag:client",
|
||||
],
|
||||
"users": [
|
||||
"debian",
|
||||
],
|
||||
},
|
||||
],
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"debian": "debian",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -99,14 +99,16 @@ func (pol *Policy) compileFilterRulesForNode(
|
||||
return nil, ErrInvalidAction
|
||||
}
|
||||
|
||||
rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
aclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("compiling ACL")
|
||||
continue
|
||||
}
|
||||
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
for _, rule := range aclRules {
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,27 +117,32 @@ func (pol *Policy) compileFilterRulesForNode(
|
||||
|
||||
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
|
||||
// autogroup:self per-node while supporting all other alias types normally.
|
||||
// It returns a slice of filter rules because when an ACL has both autogroup:self
|
||||
// and other destinations, they need to be split into separate rules with different
|
||||
// source filtering logic.
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) (*tailcfg.FilterRule, error) {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
) ([]*tailcfg.FilterRule, error) {
|
||||
var autogroupSelfDests []AliasWithPorts
|
||||
var otherDests []AliasWithPorts
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
autogroupSelfDests = append(autogroupSelfDests, dest)
|
||||
} else {
|
||||
otherDests = append(otherDests, dest)
|
||||
}
|
||||
}
|
||||
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
var rules []*tailcfg.FilterRule
|
||||
|
||||
var resolvedSrcIPs []*netipx.IPSet
|
||||
|
||||
// Resolve sources to only include devices from the same user as the target node.
|
||||
for _, src := range acl.Sources {
|
||||
// autogroup:self is not allowed in sources
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
|
||||
}
|
||||
@@ -147,92 +154,121 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
}
|
||||
|
||||
if ips != nil {
|
||||
if hasAutogroupSelfInDst {
|
||||
// Instead of iterating all addresses (which could be millions),
|
||||
// check each node's IPs against the source set
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No autogroup:self in destination, use all resolved sources
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
resolvedSrcIPs = append(resolvedSrcIPs, ips)
|
||||
}
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(resolvedSrcIPs) == 0 {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
if srcSet == nil || len(srcSet.Prefixes()) == 0 {
|
||||
// No sources resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
// Handle autogroup:self destinations (if any)
|
||||
if len(autogroupSelfDests) > 0 {
|
||||
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
if len(sameUserNodes) > 0 {
|
||||
// Filter sources to only same-user untagged devices
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
for _, ips := range resolvedSrcIPs {
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
for _, dest := range autogroupSelfDests {
|
||||
for _, n := range sameUserNodes {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
destPorts = append(destPorts, tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
|
||||
if len(destPorts) > 0 {
|
||||
rules = append(rules, &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) == 0 {
|
||||
// No destinations resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
if len(otherDests) > 0 {
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
|
||||
for _, ips := range resolvedSrcIPs {
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range otherDests {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) > 0 {
|
||||
rules = append(rules, &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
}, nil
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
||||
@@ -260,46 +296,29 @@ func (pol *Policy) compileSSHPolicy(
|
||||
var rules []*tailcfg.SSHRule
|
||||
|
||||
for index, rule := range pol.SSHs {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
// Separate destinations into autogroup:self and others
|
||||
// This is needed because autogroup:self requires filtering sources to same-user only,
|
||||
// while other destinations should use all resolved sources
|
||||
var autogroupSelfDests []Alias
|
||||
var otherDests []Alias
|
||||
|
||||
for _, dst := range rule.Destinations {
|
||||
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If autogroup:self is used, skip tagged nodes
|
||||
if hasAutogroupSelfInDst && node.IsTagged() {
|
||||
continue
|
||||
}
|
||||
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, src := range rule.Destinations {
|
||||
// Handle autogroup:self specially
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
// For autogroup:self, only include the target user's untagged devices
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
autogroupSelfDests = append(autogroupSelfDests, dst)
|
||||
} else {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
otherDests = append(otherDests, dst)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
// Note: Tagged nodes can't match autogroup:self destinations, but can still match other destinations
|
||||
|
||||
// Resolve sources once - we'll use them differently for each destination type
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
}
|
||||
|
||||
if !node.InIPSet(destSet) {
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -313,50 +332,9 @@ func (pol *Policy) compileSSHPolicy(
|
||||
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
|
||||
}
|
||||
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
continue // Skip this rule if we can't resolve sources
|
||||
}
|
||||
|
||||
// If autogroup:self is in destinations, filter sources to same user only
|
||||
if hasAutogroupSelfInDst {
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
// Instead of iterating all addresses, check each node's IPs
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcIPs, err = filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
// No valid sources after filtering, skip this rule
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(rule.Users))
|
||||
if rule.Users.ContainsNonRoot() {
|
||||
userMap["*"] = "="
|
||||
|
||||
// by default, we do not allow root unless explicitly stated
|
||||
userMap["root"] = ""
|
||||
}
|
||||
@@ -366,11 +344,108 @@ func (pol *Policy) compileSSHPolicy(
|
||||
for _, u := range rule.Users.NormalUsers() {
|
||||
userMap[u.String()] = u.String()
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
|
||||
// Handle autogroup:self destinations (if any)
|
||||
// Note: Tagged nodes can't match autogroup:self, so skip this block for tagged nodes
|
||||
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
|
||||
// Build destination set for autogroup:self (same-user untagged devices only)
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only create rule if this node is in the destination set
|
||||
if node.InIPSet(destSet) {
|
||||
// Filter sources to only same-user untagged devices
|
||||
// Pre-filter to same-user untagged devices for efficiency
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filteredSrcSet, err := filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if filteredSrcSet != nil && len(filteredSrcSet.Prefixes()) > 0 {
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
for addr := range util.IPSetAddrIter(filteredSrcSet) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
if len(principals) > 0 {
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle other destinations (if any)
|
||||
if len(otherDests) > 0 {
|
||||
// Build destination set for other destinations
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, dst := range otherDests {
|
||||
ips, err := dst.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
if ips != nil {
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only create rule if this node is in the destination set
|
||||
if node.InIPSet(destSet) {
|
||||
// For non-autogroup:self destinations, use all resolved sources (no filtering)
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
if len(principals) > 0 {
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &tailcfg.SSHPolicy{
|
||||
|
||||
@@ -1339,3 +1339,70 @@ func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {
|
||||
assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfAndMixedDestinations tests that SSH rules can have both
|
||||
// autogroup:self and other destinations (like tag:router) in the same rule, and that
|
||||
// autogroup:self filtering only applies to autogroup:self destinations, not others.
|
||||
func TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-device"},
|
||||
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-device2"},
|
||||
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-device"},
|
||||
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-router", ForcedTags: []string{"tag:router"}},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:router"): Owners{up("user2@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:member")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self"), tp("tag:router")},
|
||||
Users: []SSHUser{"admin"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test 1: Compile for user1's device (should only match autogroup:self destination)
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy1, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy1)
|
||||
require.Len(t, sshPolicy1.Rules, 1, "user1's device should have 1 SSH rule (autogroup:self)")
|
||||
|
||||
// Verify autogroup:self rule has filtered sources (only same-user devices)
|
||||
selfRule := sshPolicy1.Rules[0]
|
||||
require.Len(t, selfRule.Principals, 2, "autogroup:self rule should only have user1's devices")
|
||||
selfPrincipals := make([]string, len(selfRule.Principals))
|
||||
for i, p := range selfRule.Principals {
|
||||
selfPrincipals[i] = p.NodeIP
|
||||
}
|
||||
require.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, selfPrincipals,
|
||||
"autogroup:self rule should only include same-user untagged devices")
|
||||
|
||||
// Test 2: Compile for router (should only match tag:router destination)
|
||||
routerNode := nodes[3].View() // user2-router
|
||||
sshPolicyRouter, err := policy.compileSSHPolicy(users, routerNode, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicyRouter)
|
||||
require.Len(t, sshPolicyRouter.Rules, 1, "router should have 1 SSH rule (tag:router)")
|
||||
|
||||
routerRule := sshPolicyRouter.Rules[0]
|
||||
routerPrincipals := make([]string, len(routerRule.Principals))
|
||||
for i, p := range routerRule.Principals {
|
||||
routerPrincipals[i] = p.NodeIP
|
||||
}
|
||||
require.Contains(t, routerPrincipals, "100.64.0.1", "router rule should include user1's device (unfiltered sources)")
|
||||
require.Contains(t, routerPrincipals, "100.64.0.2", "router rule should include user1's other device (unfiltered sources)")
|
||||
require.Contains(t, routerPrincipals, "100.64.0.3", "router rule should include user2's device (unfiltered sources)")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -439,3 +440,82 @@ func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
|
||||
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
|
||||
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
|
||||
}
|
||||
|
||||
// When separate ACL rules exist (one with autogroup:self, one with tag:router),
|
||||
// the autogroup:self rule should not prevent the tag:router rule from working.
|
||||
// This ensures that autogroup:self doesn't interfere with other ACL rules.
|
||||
func TestAutogroupSelfWithOtherRules(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
|
||||
}
|
||||
|
||||
// test-1 has a regular device
|
||||
test1Node := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "test-1-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[0],
|
||||
UserID: users[0].ID,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// test-2 has a router device with tag:node-router
|
||||
test2RouterNode := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "test-2-router",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[1],
|
||||
UserID: users[1].ID,
|
||||
ForcedTags: []string{"tag:node-router"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{test1Node, test2RouterNode}
|
||||
|
||||
// This matches the exact policy from issue #2838:
|
||||
// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)
|
||||
// - Second rule: group:home -> tag:node-router (should allow group members to see router)
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:home": ["test-1@example.com", "test-2@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:node-router": ["group:home"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:home"],
|
||||
"dst": ["tag:node-router:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// test-1 (in group:home) should see:
|
||||
// 1. Their own node (from autogroup:self rule)
|
||||
// 2. The router node (from group:home -> tag:node-router rule)
|
||||
test1Peers := peerMap[test1Node.ID]
|
||||
|
||||
// Verify test-1 can see the router (group:home -> tag:node-router rule)
|
||||
require.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {
|
||||
return n.ID() == test2RouterNode.ID
|
||||
}), "test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)")
|
||||
|
||||
// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)
|
||||
rules, err := pm.FilterForNode(test1Node.View())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rules, "test-1 should have filter rules from both ACL rules")
|
||||
}
|
||||
|
||||
@@ -300,7 +300,9 @@ func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Save(user).Error; err != nil {
|
||||
// Use Updates() to only update modified fields, preserving unchanged values.
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating user: %w", err)
|
||||
}
|
||||
|
||||
@@ -386,7 +388,11 @@ func (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Cha
|
||||
|
||||
nodePtr := node.AsStruct()
|
||||
|
||||
if err := s.db.DB.Save(nodePtr).Error; err != nil {
|
||||
// Use Omit("expiry") to prevent overwriting expiry during MapRequest updates.
|
||||
// Expiry should only be updated through explicit SetNodeExpiry calls or re-registration.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
err := s.db.DB.Omit("expiry").Updates(nodePtr).Error
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("saving node: %w", err)
|
||||
}
|
||||
|
||||
@@ -456,9 +462,9 @@ func (s *State) Connect(id types.NodeID) []change.ChangeSet {
|
||||
log.Info().Uint64("node.id", id.Uint64()).Str("node.name", node.Hostname()).Msg("Node connected")
|
||||
|
||||
// Use the node's current routes for primary route update
|
||||
// SubnetRoutes() returns only the intersection of announced AND approved routes
|
||||
// We MUST use SubnetRoutes() to maintain the security model
|
||||
routeChange := s.primaryRoutes.SetRoutes(id, node.SubnetRoutes()...)
|
||||
// AllApprovedRoutes() returns only the intersection of announced AND approved routes
|
||||
// We MUST use AllApprovedRoutes() to maintain the security model
|
||||
routeChange := s.primaryRoutes.SetRoutes(id, node.AllApprovedRoutes()...)
|
||||
|
||||
if routeChange {
|
||||
c = append(c, change.NodeAdded(id))
|
||||
@@ -656,7 +662,7 @@ func (s *State) SetApprovedRoutes(nodeID types.NodeID, routes []netip.Prefix) (t
|
||||
// Update primary routes table based on SubnetRoutes (intersection of announced and approved).
|
||||
// The primary routes table is what the mapper uses to generate network maps, so updating it
|
||||
// here ensures that route changes are distributed to peers.
|
||||
routeChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.SubnetRoutes()...)
|
||||
routeChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.AllApprovedRoutes()...)
|
||||
|
||||
// If routes changed or the changeset isn't already a full update, trigger a policy change
|
||||
// to ensure all nodes get updated network maps
|
||||
@@ -1187,9 +1193,10 @@ func (s *State) HandleNodeFromAuthPath(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -1294,9 +1301,46 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
// Check if node exists with same machine key before validating the key.
|
||||
// For #2830: container restarts send the same pre-auth key which may be used/expired.
|
||||
// Skip validation for existing nodes re-registering with the same NodeKey, as the
|
||||
// key was only needed for initial authentication. NodeKey rotation requires validation.
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// Skip validation only if both the AuthKeyID and NodeKey match (not a rotation).
|
||||
isExistingNodeReregistering := existsSameUser && existingNodeSameUser.Valid() &&
|
||||
existingNodeSameUser.AuthKey().Valid() &&
|
||||
existingNodeSameUser.AuthKeyID().Valid() &&
|
||||
existingNodeSameUser.AuthKeyID().Get() == pak.ID
|
||||
|
||||
// Check if this is a NodeKey rotation (different NodeKey)
|
||||
isNodeKeyRotation := existsSameUser && existingNodeSameUser.Valid() &&
|
||||
existingNodeSameUser.NodeKey() != regReq.NodeKey
|
||||
|
||||
if isExistingNodeReregistering && !isNodeKeyRotation {
|
||||
// Existing node re-registering with same NodeKey: skip validation.
|
||||
// Pre-auth keys are only needed for initial authentication. Critical for
|
||||
// containers that run "tailscale up --authkey=KEY" on every restart.
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", existingNodeSameUser.ID().Uint64()).
|
||||
Str("node.name", existingNodeSameUser.Hostname()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key.existing", existingNodeSameUser.NodeKey().ShortString()).
|
||||
Str("node.key.request", regReq.NodeKey.ShortString()).
|
||||
Uint64("authkey.id", pak.ID).
|
||||
Bool("authkey.used", pak.Used).
|
||||
Bool("authkey.expired", pak.Expiration != nil && pak.Expiration.Before(time.Now())).
|
||||
Bool("authkey.reusable", pak.Reusable).
|
||||
Bool("nodekey.rotation", isNodeKeyRotation).
|
||||
Msg("Existing node re-registering with same NodeKey and auth key, skipping validation")
|
||||
|
||||
} else {
|
||||
// New node or NodeKey rotation: require valid auth key.
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname - handle nil/empty cases
|
||||
@@ -1328,9 +1372,6 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
|
||||
var finalNode types.NodeView
|
||||
|
||||
// Check if node already exists with same machine key for this user
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// If this node exists for this user, update the node in place.
|
||||
if existsSameUser && existingNodeSameUser.Valid() {
|
||||
log.Trace().
|
||||
@@ -1372,9 +1413,10 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
|
||||
@@ -1711,7 +1753,7 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
}
|
||||
|
||||
if needsRouteUpdate {
|
||||
// SetNodeRoutes sets the active/distributed routes, so we must use SubnetRoutes()
|
||||
// SetNodeRoutes sets the active/distributed routes, so we must use AllApprovedRoutes()
|
||||
// which returns only the intersection of announced AND approved routes.
|
||||
// Using AnnouncedRoutes() would bypass the security model and auto-approve everything.
|
||||
log.Debug().
|
||||
@@ -1719,9 +1761,9 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
Uint64("node.id", id.Uint64()).
|
||||
Strs("announcedRoutes", util.PrefixesToString(updatedNode.AnnouncedRoutes())).
|
||||
Strs("approvedRoutes", util.PrefixesToString(updatedNode.ApprovedRoutes().AsSlice())).
|
||||
Strs("subnetRoutes", util.PrefixesToString(updatedNode.SubnetRoutes())).
|
||||
Strs("allApprovedRoutes", util.PrefixesToString(updatedNode.AllApprovedRoutes())).
|
||||
Msg("updating node routes for distribution")
|
||||
nodeRouteChange = s.SetNodeRoutes(id, updatedNode.SubnetRoutes()...)
|
||||
nodeRouteChange = s.SetNodeRoutes(id, updatedNode.AllApprovedRoutes()...)
|
||||
}
|
||||
|
||||
_, policyChange, err := s.persistNodeToDB(updatedNode)
|
||||
|
||||
@@ -269,11 +269,19 @@ func (node *Node) Prefixes() []netip.Prefix {
|
||||
// node has any exit routes enabled.
|
||||
// If none are enabled, it will return nil.
|
||||
func (node *Node) ExitRoutes() []netip.Prefix {
|
||||
if slices.ContainsFunc(node.SubnetRoutes(), tsaddr.IsExitRoute) {
|
||||
return tsaddr.ExitRoutes()
|
||||
var routes []netip.Prefix
|
||||
|
||||
for _, route := range node.AnnouncedRoutes() {
|
||||
if tsaddr.IsExitRoute(route) && slices.Contains(node.ApprovedRoutes, route) {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return routes
|
||||
}
|
||||
|
||||
func (node *Node) IsExitNode() bool {
|
||||
return len(node.ExitRoutes()) > 0
|
||||
}
|
||||
|
||||
func (node *Node) IPsAsString() []string {
|
||||
@@ -311,9 +319,16 @@ func (node *Node) CanAccess(matchers []matcher.Match, node2 *Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if the node has access to routes that might be part of a
|
||||
// smaller subnet that is served from node2 as a subnet router.
|
||||
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the dst is "the internet" and node2 is an exit node, allow access.
|
||||
if matcher.DestsIsTheInternet() && node2.IsExitNode() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
@@ -440,16 +455,22 @@ func (node *Node) AnnouncedRoutes() []netip.Prefix {
|
||||
return node.Hostinfo.RoutableIPs
|
||||
}
|
||||
|
||||
// SubnetRoutes returns the list of routes that the node announces and are approved.
|
||||
// SubnetRoutes returns the list of routes (excluding exit routes) that the node
|
||||
// announces and are approved.
|
||||
//
|
||||
// IMPORTANT: This method is used for internal data structures and should NOT be used
|
||||
// for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated manually
|
||||
// with PrimaryRoutes to ensure it includes only routes actively served by the node.
|
||||
// See the comment in Proto() method and the implementation in grpcv1.go/nodesToProto.
|
||||
// IMPORTANT: This method is used for internal data structures and should NOT be
|
||||
// used for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated
|
||||
// manually with PrimaryRoutes to ensure it includes only routes actively served
|
||||
// by the node. See the comment in Proto() method and the implementation in
|
||||
// grpcv1.go/nodesToProto.
|
||||
func (node *Node) SubnetRoutes() []netip.Prefix {
|
||||
var routes []netip.Prefix
|
||||
|
||||
for _, route := range node.AnnouncedRoutes() {
|
||||
if tsaddr.IsExitRoute(route) {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(node.ApprovedRoutes, route) {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
@@ -463,6 +484,11 @@ func (node *Node) IsSubnetRouter() bool {
|
||||
return len(node.SubnetRoutes()) > 0
|
||||
}
|
||||
|
||||
// AllApprovedRoutes returns the combination of SubnetRoutes and ExitRoutes
|
||||
func (node *Node) AllApprovedRoutes() []netip.Prefix {
|
||||
return append(node.SubnetRoutes(), node.ExitRoutes()...)
|
||||
}
|
||||
|
||||
func (node *Node) String() string {
|
||||
return node.Hostname
|
||||
}
|
||||
@@ -653,6 +679,7 @@ func (node Node) DebugString() string {
|
||||
fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes)
|
||||
fmt.Fprintf(&sb, "\tAnnouncedRoutes: %v\n", node.AnnouncedRoutes())
|
||||
fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes())
|
||||
fmt.Fprintf(&sb, "\tExitRoutes: %v\n", node.ExitRoutes())
|
||||
sb.WriteString("\n")
|
||||
|
||||
return sb.String()
|
||||
@@ -678,27 +705,11 @@ func (v NodeView) InIPSet(set *netipx.IPSet) bool {
|
||||
}
|
||||
|
||||
func (v NodeView) CanAccess(matchers []matcher.Match, node2 NodeView) bool {
|
||||
if !v.Valid() || !node2.Valid() {
|
||||
if !v.Valid() {
|
||||
return false
|
||||
}
|
||||
src := v.IPs()
|
||||
allowedIPs := node2.IPs()
|
||||
|
||||
for _, matcher := range matchers {
|
||||
if !matcher.SrcsContainsIPs(src...) {
|
||||
continue
|
||||
}
|
||||
|
||||
if matcher.DestsContainsIP(allowedIPs...) {
|
||||
return true
|
||||
}
|
||||
|
||||
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return v.ж.CanAccess(matchers, node2.AsStruct())
|
||||
}
|
||||
|
||||
func (v NodeView) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool {
|
||||
@@ -730,6 +741,13 @@ func (v NodeView) IsSubnetRouter() bool {
|
||||
return v.ж.IsSubnetRouter()
|
||||
}
|
||||
|
||||
func (v NodeView) AllApprovedRoutes() []netip.Prefix {
|
||||
if !v.Valid() {
|
||||
return nil
|
||||
}
|
||||
return v.ж.AllApprovedRoutes()
|
||||
}
|
||||
|
||||
func (v NodeView) AppendToIPSet(build *netipx.IPSetBuilder) {
|
||||
if !v.Valid() {
|
||||
return
|
||||
@@ -808,6 +826,13 @@ func (v NodeView) ExitRoutes() []netip.Prefix {
|
||||
return v.ж.ExitRoutes()
|
||||
}
|
||||
|
||||
func (v NodeView) IsExitNode() bool {
|
||||
if !v.Valid() {
|
||||
return false
|
||||
}
|
||||
return v.ж.IsExitNode()
|
||||
}
|
||||
|
||||
// RequestTags returns the ACL tags that the node is requesting.
|
||||
func (v NodeView) RequestTags() []string {
|
||||
if !v.Valid() || !v.Hostinfo().Valid() {
|
||||
|
||||
@@ -1611,37 +1611,170 @@ func TestACLAutogroupTagged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that only devices owned by the same user can access each other and cannot access devices of other users
|
||||
// Test structure:
|
||||
// - user1: 2 regular nodes (tests autogroup:self for same-user access)
|
||||
// - user2: 2 regular nodes (tests autogroup:self for same-user access and cross-user isolation)
|
||||
// - user-router: 1 node with tag:router-node (tests that autogroup:self doesn't interfere with other rules)
|
||||
func TestACLAutogroupSelf(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario := aclScenario(t,
|
||||
&policyv2.Policy{
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
|
||||
},
|
||||
// Policy with TWO separate ACL rules:
|
||||
// 1. autogroup:member -> autogroup:self (same-user access)
|
||||
// 2. group:home -> tag:router-node (router access)
|
||||
// This tests that autogroup:self doesn't prevent other rules from working
|
||||
policy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:home"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
},
|
||||
},
|
||||
TagOwners: policyv2.TagOwners{
|
||||
policyv2.Tag("tag:router-node"): policyv2.Owners{
|
||||
usernameOwner("user-router@"),
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:home")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(tagp("tag:router-node"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{tagp("tag:router-node")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(groupp("group:home"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
2,
|
||||
)
|
||||
}
|
||||
|
||||
// Create custom scenario: user1 and user2 with regular nodes, plus user-router with tagged node
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 2,
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err := scenario.WaitForTailscaleSyncWithPeerCount(1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithDockerEntrypoint([]string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
|
||||
}),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(policy),
|
||||
hsic.WithTestName("acl-autogroup-self"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add router node for user-router (single shared router node)
|
||||
networks := scenario.Networks()
|
||||
var network *dockertest.Network
|
||||
if len(networks) > 0 {
|
||||
network = networks[0]
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
routerUser, err := scenario.CreateUser("user-router")
|
||||
require.NoError(t, err)
|
||||
|
||||
authKey, err := scenario.CreatePreAuthKey(routerUser.GetId(), true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create router node (tagged with tag:router-node)
|
||||
routerClient, err := tsic.New(
|
||||
scenario.Pool(),
|
||||
"unstable",
|
||||
tsic.WithCACert(headscale.GetCert()),
|
||||
tsic.WithHeadscaleName(headscale.GetHostname()),
|
||||
tsic.WithNetwork(network),
|
||||
tsic.WithTags([]string{"tag:router-node"}),
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithDockerEntrypoint([]string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
|
||||
}),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.Login(headscale.GetEndpoint(), authKey.GetKey())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.WaitForRunning(integrationutil.PeerSyncTimeout())
|
||||
require.NoError(t, err)
|
||||
|
||||
userRouterObj := scenario.GetOrCreateUser("user-router")
|
||||
userRouterObj.Clients[routerClient.Hostname()] = routerClient
|
||||
|
||||
user1Clients, err := scenario.GetClients("user1")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2Clients, err := scenario.GetClients("user2")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that user1's devices can access each other
|
||||
var user1Regular, user2Regular []TailscaleClient
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user1Clients {
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
|
||||
user1Regular = append(user1Regular, client)
|
||||
}
|
||||
}
|
||||
for _, client := range user2Clients {
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
|
||||
user2Regular = append(user2Regular, client)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEmpty(t, user1Regular, "user1 should have regular (untagged) devices")
|
||||
require.NotEmpty(t, user2Regular, "user2 should have regular (untagged) devices")
|
||||
require.NotNil(t, routerClient, "router node should exist")
|
||||
|
||||
// Wait for all nodes to sync with their expected peer counts
|
||||
// With our ACL policy:
|
||||
// - Regular nodes (user1/user2): 1 same-user regular peer + 1 router-node = 2 peers
|
||||
// - Router node: 2 user1 regular + 2 user2 regular = 4 peers
|
||||
for _, client := range user1Regular {
|
||||
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "user1 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
|
||||
}
|
||||
for _, client := range user2Regular {
|
||||
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "user2 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
|
||||
}
|
||||
err = routerClient.WaitForPeers(4, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "router should see 4 peers (all group:home regular nodes)")
|
||||
|
||||
// Test that user1's regular devices can access each other
|
||||
for _, client := range user1Regular {
|
||||
for _, peer := range user1Regular {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
@@ -1656,13 +1789,13 @@ func TestACLAutogroupSelf(t *testing.T) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device via autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that user2's devices can access each other
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user2Clients {
|
||||
// Test that user2's regular devices can access each other
|
||||
for _, client := range user2Regular {
|
||||
for _, peer := range user2Regular {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
@@ -1677,36 +1810,64 @@ func TestACLAutogroupSelf(t *testing.T) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device via autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that devices from different users cannot access each other
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user2Clients {
|
||||
// Test that user1's regular devices can access router-node
|
||||
for _, client := range user1Regular {
|
||||
fqdn, err := routerClient.FQDN()
|
||||
require.NoError(t, err)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user1) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.NotEmpty(c, result, "user1 should be able to access router-node via group:home -> tag:router-node rule")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach router-node (proves autogroup:self doesn't interfere)")
|
||||
}
|
||||
|
||||
// Test that user2's regular devices can access router-node
|
||||
for _, client := range user2Regular {
|
||||
fqdn, err := routerClient.FQDN()
|
||||
require.NoError(t, err)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user2) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.NotEmpty(c, result, "user2 should be able to access router-node via group:home -> tag:router-node rule")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach router-node (proves autogroup:self doesn't interfere)")
|
||||
}
|
||||
|
||||
// Test that devices from different users cannot access each other's regular devices
|
||||
for _, client := range user1Regular {
|
||||
for _, peer := range user2Regular {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user1) to %s (user2) - should FAIL", client.Hostname(), fqdn)
|
||||
t.Logf("url from %s (user1) to %s (user2 regular) - should FAIL", client.Hostname(), fqdn)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result, "user1 should not be able to access user2's devices with autogroup:self")
|
||||
assert.Error(t, err, "connection from user1 to user2 should fail")
|
||||
assert.Empty(t, result, "user1 should not be able to access user2's regular devices (autogroup:self isolation)")
|
||||
assert.Error(t, err, "connection from user1 to user2 regular device should fail")
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user1Clients {
|
||||
for _, client := range user2Regular {
|
||||
for _, peer := range user1Regular {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user2) to %s (user1) - should FAIL", client.Hostname(), fqdn)
|
||||
t.Logf("url from %s (user2) to %s (user1 regular) - should FAIL", client.Hostname(), fqdn)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result, "user2 should not be able to access user1's devices with autogroup:self")
|
||||
assert.Error(t, err, "connection from user2 to user1 should fail")
|
||||
assert.Empty(t, result, "user2 should not be able to access user1's regular devices (autogroup:self isolation)")
|
||||
assert.Error(t, err, "connection from user2 to user1 regular device should fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +223,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
|
||||
@@ -454,3 +455,4 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -953,6 +953,119 @@ func TestOIDCFollowUpUrl(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login")
|
||||
}
|
||||
|
||||
// TestOIDCMultipleOpenedLoginUrls tests the scenario:
|
||||
// - client (mostly Windows) opens multiple browser tabs with different login URLs
|
||||
// - client performs auth on the first opened browser tab
|
||||
//
|
||||
// This test makes sure that cookies are still valid for the first browser tab.
|
||||
func TestOIDCMultipleOpenedLoginUrls(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(
|
||||
ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcauthrelog"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode(
|
||||
"unstable",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
u1, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
u2, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure login URLs are different
|
||||
require.NotEqual(t, u1.String(), u2.String())
|
||||
|
||||
loginClient, err := newLoginHTTPClient(ts.Hostname())
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the first login URL "in browser"
|
||||
_, redirect1, err := doLoginURLWithClient(ts.Hostname(), u1, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
// open the second login URL "in browser"
|
||||
_, redirect2, err := doLoginURLWithClient(ts.Hostname(), u2, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// two valid redirects with different state/nonce params
|
||||
require.NotEqual(t, redirect1.String(), redirect2.String())
|
||||
|
||||
// complete auth with the first opened "browser tab"
|
||||
_, redirect1, err = doLoginURLWithClient(ts.Hostname(), redirect1, loginClient, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
},
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(
|
||||
wantUsers,
|
||||
listUsers,
|
||||
cmpopts.IgnoreUnexported(v1.User{}),
|
||||
cmpopts.IgnoreFields(v1.User{}, "CreatedAt"),
|
||||
); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(c *assert.CollectT) {
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, 1)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login",
|
||||
)
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameNodeSameUser tests the scenario where a single Tailscale client
|
||||
// authenticates using OIDC (OpenID Connect), logs out, and then logs back in as the same user.
|
||||
//
|
||||
@@ -1181,3 +1294,131 @@ func TestOIDCReloginSameNodeSameUser(t *testing.T) {
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 node is online after same-user OIDC relogin")
|
||||
}
|
||||
|
||||
// TestOIDCExpiryAfterRestart validates that node expiry is preserved
|
||||
// when a tailscaled client restarts and reconnects to headscale.
|
||||
//
|
||||
// This test reproduces the bug reported in https://github.com/juanfont/headscale/issues/2862
|
||||
// where OIDC expiry was reset to 0001-01-01 00:00:00 after tailscaled restart.
|
||||
//
|
||||
// Test flow:
|
||||
// 1. Node logs in with OIDC (gets 72h expiry)
|
||||
// 2. Verify expiry is set correctly in headscale
|
||||
// 3. Restart tailscaled container (simulates daemon restart)
|
||||
// 4. Wait for reconnection
|
||||
// 5. Verify expiry is still set correctly (not zero).
|
||||
func TestOIDCExpiryAfterRestart(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
"HEADSCALE_OIDC_EXPIRY": "72h",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcexpiry"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and login tailscale client
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating initial login and expiry at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Verify initial expiry is set
|
||||
var initialExpiry time.Time
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1)
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should be set after OIDC login")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
assert.False(ct, expiryTime.IsZero(), "Expiry should not be zero time")
|
||||
|
||||
initialExpiry = expiryTime
|
||||
t.Logf("Initial expiry set to: %v (expires in %v)", expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating initial expiry after OIDC login")
|
||||
|
||||
// Now restart the tailscaled container
|
||||
t.Logf("Restarting tailscaled container at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
err = ts.Restart()
|
||||
require.NoError(t, err, "Failed to restart tailscaled container")
|
||||
|
||||
t.Logf("Tailscaled restarted, waiting for reconnection at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Wait for the node to come back online
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
if !assert.NoError(ct, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if !assert.NotNil(ct, status) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 60*time.Second, 2*time.Second, "waiting for tailscale to reconnect after restart")
|
||||
|
||||
// THE CRITICAL TEST: Verify expiry is still set correctly after restart
|
||||
t.Logf("Validating expiry preservation after restart at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1, "Should still have exactly 1 node after restart")
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should NOT be nil after restart")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
|
||||
// This is the bug check - expiry should NOT be zero time
|
||||
assert.False(ct, expiryTime.IsZero(),
|
||||
"BUG: Expiry was reset to zero time after tailscaled restart! This is issue #2862")
|
||||
|
||||
// Expiry should be exactly the same as before restart
|
||||
assert.Equal(ct, initialExpiry, expiryTime,
|
||||
"Expiry should be exactly the same after restart, got %v, expected %v",
|
||||
expiryTime, initialExpiry)
|
||||
|
||||
t.Logf("SUCCESS: Expiry preserved after restart: %v (expires in %v)",
|
||||
expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating expiry preservation after restart")
|
||||
}
|
||||
|
||||
@@ -860,47 +860,183 @@ func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a
|
||||
// string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
type debugJar struct {
|
||||
inner *cookiejar.Jar
|
||||
mu sync.RWMutex
|
||||
store map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie
|
||||
}
|
||||
|
||||
var err error
|
||||
func newDebugJar() (*debugJar, error) {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &debugJar{
|
||||
inner: jar,
|
||||
store: make(map[string]map[string]map[string]*http.Cookie),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
|
||||
j.inner.SetCookies(u, cookies)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
for _, c := range cookies {
|
||||
if c == nil || c.Name == "" {
|
||||
continue
|
||||
}
|
||||
domain := c.Domain
|
||||
if domain == "" {
|
||||
domain = u.Hostname()
|
||||
}
|
||||
path := c.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if _, ok := j.store[domain]; !ok {
|
||||
j.store[domain] = make(map[string]map[string]*http.Cookie)
|
||||
}
|
||||
if _, ok := j.store[domain][path]; !ok {
|
||||
j.store[domain][path] = make(map[string]*http.Cookie)
|
||||
}
|
||||
j.store[domain][path][c.Name] = copyCookie(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *debugJar) Cookies(u *url.URL) []*http.Cookie {
|
||||
return j.inner.Cookies(u)
|
||||
}
|
||||
|
||||
func (j *debugJar) Dump(w io.Writer) {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
|
||||
for domain, paths := range j.store {
|
||||
fmt.Fprintf(w, "Domain: %s\n", domain)
|
||||
for path, byName := range paths {
|
||||
fmt.Fprintf(w, " Path: %s\n", path)
|
||||
for _, c := range byName {
|
||||
fmt.Fprintf(
|
||||
w, " %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\n",
|
||||
c.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyCookie(c *http.Cookie) *http.Cookie {
|
||||
cc := *c
|
||||
return &cc
|
||||
}
|
||||
|
||||
func newLoginHTTPClient(hostname string) (*http.Client, error) {
|
||||
hc := &http.Client{
|
||||
Transport: LoggingRoundTripper{Hostname: hostname},
|
||||
}
|
||||
hc.Jar, err = cookiejar.New(nil)
|
||||
|
||||
jar, err := newDebugJar()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to create cookiejar : %w", hostname, err)
|
||||
return nil, fmt.Errorf("%s failed to create cookiejar: %w", hostname, err)
|
||||
}
|
||||
|
||||
hc.Jar = jar
|
||||
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
|
||||
hc, err := newLoginHTTPClient(hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
body, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// doLoginURLWithClient performs the login request using the provided HTTP client.
|
||||
// When followRedirects is false, it will return the first redirect without following it.
|
||||
func doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (
|
||||
string,
|
||||
*url.URL,
|
||||
error,
|
||||
) {
|
||||
if hc == nil {
|
||||
return "", nil, fmt.Errorf("%s http client is nil", hostname)
|
||||
}
|
||||
|
||||
if loginURL == nil {
|
||||
return "", nil, fmt.Errorf("%s login url is nil", hostname)
|
||||
}
|
||||
|
||||
log.Printf("%s logging in with url: %s", hostname, loginURL.String())
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("%s failed to create http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
originalRedirect := hc.CheckRedirect
|
||||
if !followRedirects {
|
||||
hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
hc.CheckRedirect = originalRedirect
|
||||
}()
|
||||
|
||||
resp, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return "", fmt.Errorf("%s response code of login request was %w", hostname, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to read response body: %s", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
}
|
||||
body := string(bodyBytes)
|
||||
|
||||
return "", fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
var redirectURL *url.URL
|
||||
if resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {
|
||||
redirectURL, err = resp.Location()
|
||||
if err != nil {
|
||||
return body, nil, fmt.Errorf("%s failed to resolve redirect location: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
if followRedirects && resp.StatusCode != http.StatusOK {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if hc.Jar != nil {
|
||||
if jar, ok := hc.Jar.(*debugJar); ok {
|
||||
jar.Dump(os.Stdout)
|
||||
} else {
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
}
|
||||
}
|
||||
|
||||
return body, redirectURL, nil
|
||||
}
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
|
||||
@@ -29,6 +29,7 @@ type TailscaleClient interface {
|
||||
Login(loginServer, authKey string) error
|
||||
LoginWithURL(loginServer string) (*url.URL, error)
|
||||
Logout() error
|
||||
Restart() error
|
||||
Up() error
|
||||
Down() error
|
||||
IPs() ([]netip.Addr, error)
|
||||
|
||||
@@ -555,6 +555,39 @@ func (t *TailscaleInContainer) Logout() error {
|
||||
return t.waitForBackendState("NeedsLogin", integrationutil.PeerSyncTimeout())
|
||||
}
|
||||
|
||||
// Restart restarts the Tailscale container using Docker API.
|
||||
// This simulates a container restart (e.g., docker restart or Kubernetes pod restart).
|
||||
// The container's entrypoint will re-execute, which typically includes running
|
||||
// "tailscale up" with any auth keys stored in environment variables.
|
||||
func (t *TailscaleInContainer) Restart() error {
|
||||
if t.container == nil {
|
||||
return fmt.Errorf("container not initialized")
|
||||
}
|
||||
|
||||
// Use Docker API to restart the container
|
||||
err := t.pool.Client.RestartContainer(t.container.Container.ID, 30)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restart container %s: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
// Wait for the container to be back up and tailscaled to be ready
|
||||
// We use exponential backoff to poll until we can successfully execute a command
|
||||
_, err = backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
// Try to execute a simple command to verify the container is responsive
|
||||
_, _, err := t.Execute([]string{"tailscale", "version"}, dockertestutil.ExecuteCommandTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
return struct{}{}, fmt.Errorf("container not ready: %w", err)
|
||||
}
|
||||
return struct{}{}, nil
|
||||
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(30*time.Second))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("timeout waiting for container %s to restart and become ready: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper that runs `tailscale up` with no arguments.
|
||||
func (t *TailscaleInContainer) Up() error {
|
||||
command := []string{
|
||||
|
||||
@@ -104,7 +104,7 @@ extra:
|
||||
- icon: fontawesome/brands/discord
|
||||
link: https://discord.gg/c84AZQhmpx
|
||||
headscale:
|
||||
version: 0.27.0
|
||||
version: 0.27.1
|
||||
|
||||
# Extensions
|
||||
markdown_extensions:
|
||||
|
||||
Reference in New Issue
Block a user