mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-15 13:43:36 +01:00
Compare commits
14 Commits
dependabot
...
v0.27.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f658a8eacd | ||
|
|
785168a7b8 | ||
|
|
3bd4ecd9cd | ||
|
|
3455d1cb59 | ||
|
|
ddd31ba774 | ||
|
|
4a8dc2d445 | ||
|
|
773a46a968 | ||
|
|
4728a2ba9e | ||
|
|
abed534628 | ||
|
|
21e3f2598d | ||
|
|
a28d9bed6d | ||
|
|
28faf8cd71 | ||
|
|
5a2ee0c391 | ||
|
|
5cd15c3656 |
2
.github/workflows/test-integration.yaml
vendored
2
.github/workflows/test-integration.yaml
vendored
@@ -38,7 +38,9 @@ jobs:
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestOIDCFollowUpUrl
|
||||
- TestOIDCMultipleOpenedLoginUrls
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestOIDCExpiryAfterRestart
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -4,8 +4,37 @@
|
||||
|
||||
### Changes
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Changes
|
||||
|
||||
- Expire nodes with a custom timestamp
|
||||
[#2828](https://github.com/juanfont/headscale/pull/2828)
|
||||
- Fix issue where node expiry was reset when tailscaled restarts
|
||||
[#2875](https://github.com/juanfont/headscale/pull/2875)
|
||||
- Fix OIDC authentication when multiple login URLs are opened
|
||||
[#2861](https://github.com/juanfont/headscale/pull/2861)
|
||||
- Fix node re-registration failing with expired auth keys
|
||||
[#2859](https://github.com/juanfont/headscale/pull/2859)
|
||||
- Remove old unused database tables and indices
|
||||
[#2844](https://github.com/juanfont/headscale/pull/2844)
|
||||
[#2872](https://github.com/juanfont/headscale/pull/2872)
|
||||
- Ignore litestream tables during database validation
|
||||
[#2843](https://github.com/juanfont/headscale/pull/2843)
|
||||
- Fix exit node visibility to respect ACL rules
|
||||
[#2855](https://github.com/juanfont/headscale/pull/2855)
|
||||
- Fix SSH policy becoming empty when unknown user is referenced
|
||||
[#2874](https://github.com/juanfont/headscale/pull/2874)
|
||||
- Fix policy validation when using bypass-grpc mode
|
||||
[#2854](https://github.com/juanfont/headscale/pull/2854)
|
||||
- Fix autogroup:self interaction with other ACL rules
|
||||
[#2842](https://github.com/juanfont/headscale/pull/2842)
|
||||
- Fix flaky DERP map shuffle test
|
||||
[#2848](https://github.com/juanfont/headscale/pull/2848)
|
||||
- Use current stable base images for Debian and Alpine containers
|
||||
[#2827](https://github.com/juanfont/headscale/pull/2827)
|
||||
|
||||
## 0.27.0 (2025-10-27)
|
||||
|
||||
@@ -89,7 +118,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2692](https://github.com/juanfont/headscale/pull/2692)
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
- Stricter hostname validation [#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Stricter hostname validation
|
||||
[#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Hostnames must be valid DNS labels (2-63 characters, alphanumeric and
|
||||
hyphens only, cannot start/end with hyphen)
|
||||
- **Client Registration (New Nodes)**: Invalid hostnames are automatically
|
||||
@@ -144,7 +174,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2776](https://github.com/juanfont/headscale/pull/2776)
|
||||
- EXPERIMENTAL: Add support for `autogroup:self`
|
||||
[#2789](https://github.com/juanfont/headscale/pull/2789)
|
||||
- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
- Add healthcheck command
|
||||
[#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
|
||||
@@ -216,6 +216,39 @@ nodes.
|
||||
}
|
||||
```
|
||||
|
||||
### Restrict access to exit nodes per user or group
|
||||
|
||||
A user can use _any_ of the available exit nodes with `autogroup:internet`. Alternatively, the ACL snippet below assigns
|
||||
each user a specific exit node while hiding all other exit nodes. The user `alice` can only use exit node `exit1` while
|
||||
user `bob` can only use exit node `exit2`.
|
||||
|
||||
```json title="Assign each user a dedicated exit node"
|
||||
{
|
||||
"hosts": {
|
||||
"exit1": "100.64.0.1/32",
|
||||
"exit2": "100.64.0.2/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["alice@"],
|
||||
"dst": ["exit1:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["bob@"],
|
||||
"dst": ["exit2:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
- The above implementation is Headscale specific and will likely be removed once [support for
|
||||
`via`](https://github.com/juanfont/headscale/issues/2409) is available.
|
||||
- Beware that a user can also connect to any port of the exit node itself.
|
||||
|
||||
### Automatically approve an exit node with auto approvers
|
||||
|
||||
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
|
||||
|
||||
2
go.mod
2
go.mod
@@ -182,7 +182,7 @@ require (
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.3 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -124,6 +124,8 @@ github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbq
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
@@ -276,6 +278,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@@ -350,8 +354,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.3.3 h1:qlmBbbhu+yY0QM7jqfuat7M1H3/iXjju3VkP9lkFQr4=
|
||||
github.com/opencontainers/runc v1.3.3/go.mod h1:D7rL72gfWxVs9cJ2/AayxB0Hlvn9g0gaF1R7uunumSI=
|
||||
github.com/opencontainers/runc v1.3.2 h1:GUwgo0Fx9M/pl2utaSYlJfdBcXAB/CZXDxe322lvJ3Y=
|
||||
github.com/opencontainers/runc v1.3.2/go.mod h1:F7UQQEsxcjUNnFpT1qPLHZBKYP7yWwk6hq8suLy9cl0=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
|
||||
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
|
||||
@@ -459,6 +463,8 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
|
||||
@@ -71,6 +71,13 @@ func (h *Headscale) handleRegister(
|
||||
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
|
||||
// nodes, separated by users and this path is handling expiring/logout paths.
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
// When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero.
|
||||
// Return the current node state without modification.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
if req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() {
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
@@ -173,6 +180,7 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
|
||||
@@ -3004,3 +3004,296 @@ func createTestApp(t *testing.T) *Headscale {
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
// TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey tests the scenario reported in
|
||||
// https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Scenario:
|
||||
// 1. Node registers successfully with a single-use pre-auth key
|
||||
// 2. Node is running fine
|
||||
// 3. Node restarts (e.g., after headscale upgrade or tailscale container restart)
|
||||
// 4. Node sends RegisterRequest with the same pre-auth key
|
||||
// 5. BUG: Headscale rejects the request with "authkey expired" or "authkey already used"
|
||||
//
|
||||
// Expected behavior:
|
||||
// When an existing node (identified by matching NodeKey + MachineKey) re-registers
|
||||
// with a pre-auth key that it previously used, the registration should succeed.
|
||||
// The node is not creating a new registration - it's re-authenticating the same device.
|
||||
func TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create user and single-use pre-auth key
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil) // reusable=false
|
||||
require.NoError(t, err)
|
||||
require.False(t, pak.Reusable, "key should be single-use for this test")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// STEP 1: Initial registration with pre-auth key (simulates fresh node joining)
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
t.Log("Step 1: Initial registration with pre-auth key")
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "initial registration should succeed")
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized, "node should be authorized")
|
||||
assert.False(t, initialResp.NodeKeyExpired, "node key should not be expired")
|
||||
|
||||
// Verify node was created in database
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should exist after initial registration")
|
||||
assert.Equal(t, "test-node", node.Hostname())
|
||||
assert.Equal(t, nodeKey.Public(), node.NodeKey())
|
||||
assert.Equal(t, machineKey.Public(), node.MachineKey())
|
||||
|
||||
// Verify pre-auth key is now marked as used
|
||||
usedPak, err := app.state.GetPreAuthKey(pak.Key)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, usedPak.Used, "pre-auth key should be marked as used after initial registration")
|
||||
|
||||
// STEP 2: Simulate node restart - node sends RegisterRequest again with same pre-auth key
|
||||
// This happens when:
|
||||
// - Tailscale container restarts
|
||||
// - Tailscaled service restarts
|
||||
// - System reboots
|
||||
// The Tailscale client persists the pre-auth key in its state and sends it on every registration
|
||||
t.Log("Step 2: Node restart - re-registration with same (now used) pre-auth key")
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same node key
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
// BUG: This fails with "authkey already used" or "authkey expired"
|
||||
// EXPECTED: Should succeed because it's the same node re-registering
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
|
||||
// This is the assertion that currently FAILS in v0.27.0
|
||||
assert.NoError(t, err, "BUG: existing node re-registration with its own used pre-auth key should succeed")
|
||||
if err != nil {
|
||||
t.Logf("Error received (this is the bug): %v", err)
|
||||
t.Logf("Expected behavior: Node should be able to re-register with the same pre-auth key it used initially")
|
||||
return // Stop here to show the bug clearly
|
||||
}
|
||||
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized, "node should remain authorized after restart")
|
||||
assert.False(t, restartResp.NodeKeyExpired, "node key should not be expired after restart")
|
||||
|
||||
// Verify it's the same node (not a duplicate)
|
||||
nodeAfterRestart, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should still exist after restart")
|
||||
assert.Equal(t, node.ID(), nodeAfterRestart.ID(), "should be the same node, not a new one")
|
||||
assert.Equal(t, "test-node", nodeAfterRestart.Hostname())
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithReusablePreAuthKey tests that reusable keys work correctly
|
||||
// for node re-registration.
|
||||
func TestNodeReregistrationWithReusablePreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, nil, nil) // reusable=true
|
||||
require.NoError(t, err)
|
||||
require.True(t, pak.Reusable)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Initial registration
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized)
|
||||
|
||||
// Node restart - re-registration with reusable key
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Reusable key
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
require.NoError(t, err, "reusable key should allow re-registration")
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized)
|
||||
assert.False(t, restartResp.NodeKeyExpired)
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithExpiredPreAuthKey tests that truly expired keys
|
||||
// are still rejected even for existing nodes.
|
||||
func TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
expiry := time.Now().Add(-1 * time.Hour) // Already expired
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, &expiry, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Try to register with expired key
|
||||
req := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "expired-key-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegister(context.Background(), req, machineKey.Public())
|
||||
assert.Error(t, err, "expired pre-auth key should be rejected")
|
||||
assert.Contains(t, err.Error(), "authkey expired", "error should mention key expiration")
|
||||
}
|
||||
// TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey tests that an existing node
|
||||
// can re-register using a pre-auth key that's already marked as Used=true, as long as:
|
||||
// 1. The node is re-registering with the same MachineKey it originally used
|
||||
// 2. The node is using the same pre-auth key it was originally registered with (AuthKeyID matches)
|
||||
//
|
||||
// This is the fix for GitHub issue #2830: https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Background: When Docker/Kubernetes containers restart, they keep their persistent state
|
||||
// (including the MachineKey), but container entrypoints unconditionally run:
|
||||
// tailscale up --authkey=$TS_AUTHKEY
|
||||
//
|
||||
// This caused nodes to be rejected after restart because the pre-auth key was already
|
||||
// marked as Used=true from the initial registration. The fix allows re-registration of
|
||||
// existing nodes with their own used keys.
|
||||
func TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("testuser")
|
||||
|
||||
// Create a SINGLE-USE pre-auth key (reusable=false)
|
||||
// This is the type of key that triggers the bug in issue #2830
|
||||
preAuthKey, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, preAuthKey.Reusable, "Pre-auth key must be single-use to test issue #2830")
|
||||
require.False(t, preAuthKey.Used, "Pre-auth key should not be used yet")
|
||||
|
||||
// Generate node keys for the client
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Step 1: Initial registration with the pre-auth key
|
||||
// This simulates the first time the container starts and runs 'tailscale up --authkey=...'
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegisterWithAuthKey(initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, initialResp.MachineAuthorized, "Node should be authorized after initial registration")
|
||||
require.NotNil(t, initialResp.User, "User should be set in response")
|
||||
require.Equal(t, "testuser", initialResp.User.DisplayName, "User should match the pre-auth key's user")
|
||||
|
||||
// Verify the pre-auth key is now marked as Used
|
||||
updatedKey, err := app.state.GetPreAuthKey(preAuthKey.Key)
|
||||
require.NoError(t, err)
|
||||
require.True(t, updatedKey.Used, "Pre-auth key should be marked as Used after initial registration")
|
||||
|
||||
// Step 2: Container restart scenario
|
||||
// The container keeps its MachineKey (persistent state), but the entrypoint script
|
||||
// unconditionally runs 'tailscale up --authkey=$TS_AUTHKEY' again
|
||||
//
|
||||
// WITHOUT THE FIX: This would fail with "authkey already used" error
|
||||
// WITH THE FIX: This succeeds because it's the same node re-registering with its own key
|
||||
|
||||
// Simulate sending the same RegisterRequest again (same MachineKey, same AuthKey)
|
||||
// This is exactly what happens when a container restarts
|
||||
reregisterReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same NodeKey
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
reregisterResp, err := app.handleRegisterWithAuthKey(reregisterReq, machineKey.Public()) // Same MachineKey
|
||||
require.NoError(t, err, "Re-registration with same MachineKey and used pre-auth key should succeed (fixes #2830)")
|
||||
require.True(t, reregisterResp.MachineAuthorized, "Node should remain authorized after re-registration")
|
||||
require.NotNil(t, reregisterResp.User, "User should be set in re-registration response")
|
||||
require.Equal(t, "testuser", reregisterResp.User.DisplayName, "User should remain the same")
|
||||
|
||||
// Verify that only ONE node was created (not a duplicate)
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodes.Len(), "Should have exactly one node (no duplicates created)")
|
||||
require.Equal(t, "issue-2830-test-node", nodes.At(0).Hostname(), "Node hostname should match")
|
||||
|
||||
// Step 3: Verify that a DIFFERENT machine cannot use the same used key
|
||||
// This ensures we didn't break the security model - only the original node can re-register
|
||||
differentMachineKey := key.NewMachine()
|
||||
differentNodeKey := key.NewNode()
|
||||
|
||||
attackReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Try to use the same key
|
||||
},
|
||||
NodeKey: differentNodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "attacker-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegisterWithAuthKey(attackReq, differentMachineKey.Public())
|
||||
require.Error(t, err, "Different machine should NOT be able to use the same used pre-auth key")
|
||||
require.Contains(t, err.Error(), "already used", "Error should indicate key is already used")
|
||||
|
||||
// Verify still only one node (the original one)
|
||||
nodesAfterAttack := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodesAfterAttack.Len(), "Should still have exactly one node (attack prevented)")
|
||||
}
|
||||
|
||||
@@ -952,6 +952,41 @@ AND auth_key_id NOT IN (
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Drop all indices that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
// They should all be cleaned up by the db engine, but we are a bit
|
||||
// conservative to ensure all our previous mess is cleaned up.
|
||||
ID: "202511101554-drop-old-idx",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldIdx := range []struct{ name, table string }{
|
||||
{"idx_namespaces_deleted_at", "namespaces"},
|
||||
{"idx_routes_deleted_at", "routes"},
|
||||
{"idx_shared_machines_deleted_at", "shared_machines"},
|
||||
} {
|
||||
err := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name)
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Str("index", oldIdx.name).
|
||||
Str("table", oldIdx.table).
|
||||
Err(err).
|
||||
Msg("Error dropping old index, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
// Migrations **above** this points will be REMOVED in version **0.29.0**
|
||||
// This is to clean up a lot of old migrations that is seldom used
|
||||
// and carries a lot of technical debt.
|
||||
// Any new migrations should be added after the comment below and follow
|
||||
// the rules it sets out.
|
||||
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
|
||||
@@ -325,7 +325,11 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
if changed {
|
||||
err := tx.Save(node).Error
|
||||
// Use Updates() with Select() to only update IP fields, avoiding overwriting
|
||||
// other fields like Expiry. We need Select() because Updates() alone skips
|
||||
// zero values, but we DO want to update IPv4/IPv6 to nil when removing them.
|
||||
// See issue #2862.
|
||||
err := tx.Model(node).Select("ipv4", "ipv6").Updates(node).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err)
|
||||
}
|
||||
|
||||
@@ -452,13 +452,6 @@ func NodeSetMachineKey(
|
||||
}).Error
|
||||
}
|
||||
|
||||
// NodeSave saves a node object to the database, prefer to use a specific save method rather
|
||||
// than this. It is intended to be used when we are changing or.
|
||||
// TODO(kradalby): Remove this func, just use Save.
|
||||
func NodeSave(tx *gorm.DB, node *types.Node) error {
|
||||
return tx.Save(node).Error
|
||||
}
|
||||
|
||||
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
// Strip invalid DNS characters for givenName
|
||||
suppliedName = strings.ToLower(suppliedName)
|
||||
|
||||
@@ -145,11 +145,12 @@ func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error {
|
||||
|
||||
// UsePreAuthKey marks a PreAuthKey as used.
|
||||
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
k.Used = true
|
||||
if err := tx.Save(k).Error; err != nil {
|
||||
err := tx.Model(k).Update("used", true).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update key used status in the database: %w", err)
|
||||
}
|
||||
|
||||
k.Used = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -31,10 +31,15 @@ CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identif
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
|
||||
-- Create all the old tables we have had and ensure they are clean up.
|
||||
CREATE TABLE `namespaces` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `namespaces` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
|
||||
CREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);
|
||||
CREATE INDEX `idx_namespaces_deleted_at` ON `namespaces`(`deleted_at`);
|
||||
CREATE INDEX `idx_shared_machines_deleted_at` ON `shared_machines`(`deleted_at`);
|
||||
|
||||
COMMIT;
|
||||
|
||||
134
hscontrol/db/user_update_test.go
Normal file
134
hscontrol/db/user_update_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestUserUpdatePreservesUnchangedFields verifies that updating a user
|
||||
// preserves fields that aren't modified. This test validates the fix
|
||||
// for using Updates() instead of Save() in UpdateUser-like operations.
|
||||
func TestUserUpdatePreservesUnchangedFields(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user with all fields set
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Test User Display",
|
||||
Email: "test@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-123",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, createdUser)
|
||||
|
||||
// Verify initial state
|
||||
assert.Equal(t, "testuser", createdUser.Name)
|
||||
assert.Equal(t, "Test User Display", createdUser.DisplayName)
|
||||
assert.Equal(t, "test@example.com", createdUser.Email)
|
||||
assert.True(t, createdUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-123", createdUser.ProviderIdentifier.String)
|
||||
|
||||
// Simulate what UpdateUser does: load user, modify one field, save
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify ONLY DisplayName
|
||||
user.DisplayName = "Updated Display Name"
|
||||
|
||||
// This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones
|
||||
err = tx.Save(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read user back from database
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that DisplayName was updated
|
||||
assert.Equal(t, "Updated Display Name", updatedUser.DisplayName)
|
||||
|
||||
// CRITICAL: Verify that other fields were NOT overwritten
|
||||
// With Save(), these assertions should pass because the user object
|
||||
// was loaded from DB and has all fields populated.
|
||||
// But if Updates() is used, these will also pass (and it's safer).
|
||||
assert.Equal(t, "testuser", updatedUser.Name, "Name should be preserved")
|
||||
assert.Equal(t, "test@example.com", updatedUser.Email, "Email should be preserved")
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid, "ProviderIdentifier should be preserved")
|
||||
assert.Equal(t, "provider-123", updatedUser.ProviderIdentifier.String, "ProviderIdentifier value should be preserved")
|
||||
}
|
||||
|
||||
// TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save()
|
||||
// works correctly and only updates modified fields.
|
||||
func TestUserUpdateWithUpdatesMethod(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Original Display",
|
||||
Email: "original@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-abc",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update using Updates() method
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify multiple fields
|
||||
user.DisplayName = "New Display"
|
||||
user.Email = "new@example.com"
|
||||
|
||||
// Use Updates() instead of Save()
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify changes
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify updated fields
|
||||
assert.Equal(t, "New Display", updatedUser.DisplayName)
|
||||
assert.Equal(t, "new@example.com", updatedUser.Email)
|
||||
|
||||
// Verify preserved fields
|
||||
assert.Equal(t, "testuser", updatedUser.Name)
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-abc", updatedUser.ProviderIdentifier.String)
|
||||
}
|
||||
@@ -102,7 +102,8 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
||||
|
||||
oldUser.Name = newName
|
||||
|
||||
if err := tx.Save(&oldUser).Error; err != nil {
|
||||
err = tx.Updates(&oldUser).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -213,7 +213,8 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
cookieState, err := req.Cookie("state")
|
||||
stateCookieName := getCookieName("state", state)
|
||||
cookieState, err := req.Cookie(stateCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err))
|
||||
return
|
||||
@@ -235,8 +236,13 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
if idToken.Nonce == "" {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err))
|
||||
return
|
||||
}
|
||||
|
||||
nonce, err := req.Cookie("nonce")
|
||||
nonceCookieName := getCookieName("nonce", idToken.Nonce)
|
||||
nonce, err := req.Cookie(nonceCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err))
|
||||
return
|
||||
@@ -584,6 +590,11 @@ func renderOIDCCallbackTemplate(
|
||||
return &content, nil
|
||||
}
|
||||
|
||||
// getCookieName generates a unique cookie name based on a cookie value.
|
||||
func getCookieName(baseName, value string) string {
|
||||
return fmt.Sprintf("%s_%s", baseName, value[:6])
|
||||
}
|
||||
|
||||
func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {
|
||||
val, err := util.GenerateRandomStringURLSafe(64)
|
||||
if err != nil {
|
||||
@@ -592,7 +603,7 @@ func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string,
|
||||
|
||||
c := &http.Cookie{
|
||||
Path: "/oidc/callback",
|
||||
Name: name,
|
||||
Name: getCookieName(name, val),
|
||||
Value: val,
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
Secure: r.TLS != nil,
|
||||
|
||||
@@ -1353,6 +1353,55 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "2863-allow-predefined-missing-users",
|
||||
targetNode: taggedClient,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:example-infra": [
|
||||
"user2@",
|
||||
"not-created-yet@",
|
||||
],
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:client": [
|
||||
"user2@"
|
||||
],
|
||||
},
|
||||
"ssh": [
|
||||
// Allow infra to ssh to tag:example-infra server as debian
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"group:example-infra"
|
||||
],
|
||||
"dst": [
|
||||
"tag:client",
|
||||
],
|
||||
"users": [
|
||||
"debian",
|
||||
],
|
||||
},
|
||||
],
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"debian": "debian",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -316,7 +316,6 @@ func (pol *Policy) compileSSHPolicy(
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
continue // Skip this rule if we can't resolve sources
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
|
||||
@@ -300,7 +300,9 @@ func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Save(user).Error; err != nil {
|
||||
// Use Updates() to only update modified fields, preserving unchanged values.
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating user: %w", err)
|
||||
}
|
||||
|
||||
@@ -386,7 +388,11 @@ func (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Cha
|
||||
|
||||
nodePtr := node.AsStruct()
|
||||
|
||||
if err := s.db.DB.Save(nodePtr).Error; err != nil {
|
||||
// Use Omit("expiry") to prevent overwriting expiry during MapRequest updates.
|
||||
// Expiry should only be updated through explicit SetNodeExpiry calls or re-registration.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
err := s.db.DB.Omit("expiry").Updates(nodePtr).Error
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("saving node: %w", err)
|
||||
}
|
||||
|
||||
@@ -1187,9 +1193,10 @@ func (s *State) HandleNodeFromAuthPath(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -1294,9 +1301,46 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
// Check if node exists with same machine key before validating the key.
|
||||
// For #2830: container restarts send the same pre-auth key which may be used/expired.
|
||||
// Skip validation for existing nodes re-registering with the same NodeKey, as the
|
||||
// key was only needed for initial authentication. NodeKey rotation requires validation.
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// Skip validation only if both the AuthKeyID and NodeKey match (not a rotation).
|
||||
isExistingNodeReregistering := existsSameUser && existingNodeSameUser.Valid() &&
|
||||
existingNodeSameUser.AuthKey().Valid() &&
|
||||
existingNodeSameUser.AuthKeyID().Valid() &&
|
||||
existingNodeSameUser.AuthKeyID().Get() == pak.ID
|
||||
|
||||
// Check if this is a NodeKey rotation (different NodeKey)
|
||||
isNodeKeyRotation := existsSameUser && existingNodeSameUser.Valid() &&
|
||||
existingNodeSameUser.NodeKey() != regReq.NodeKey
|
||||
|
||||
if isExistingNodeReregistering && !isNodeKeyRotation {
|
||||
// Existing node re-registering with same NodeKey: skip validation.
|
||||
// Pre-auth keys are only needed for initial authentication. Critical for
|
||||
// containers that run "tailscale up --authkey=KEY" on every restart.
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", existingNodeSameUser.ID().Uint64()).
|
||||
Str("node.name", existingNodeSameUser.Hostname()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key.existing", existingNodeSameUser.NodeKey().ShortString()).
|
||||
Str("node.key.request", regReq.NodeKey.ShortString()).
|
||||
Uint64("authkey.id", pak.ID).
|
||||
Bool("authkey.used", pak.Used).
|
||||
Bool("authkey.expired", pak.Expiration != nil && pak.Expiration.Before(time.Now())).
|
||||
Bool("authkey.reusable", pak.Reusable).
|
||||
Bool("nodekey.rotation", isNodeKeyRotation).
|
||||
Msg("Existing node re-registering with same NodeKey and auth key, skipping validation")
|
||||
|
||||
} else {
|
||||
// New node or NodeKey rotation: require valid auth key.
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname - handle nil/empty cases
|
||||
@@ -1328,9 +1372,6 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
|
||||
var finalNode types.NodeView
|
||||
|
||||
// Check if node already exists with same machine key for this user
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// If this node exists for this user, update the node in place.
|
||||
if existsSameUser && existingNodeSameUser.Valid() {
|
||||
log.Trace().
|
||||
@@ -1372,9 +1413,10 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -223,6 +223,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
|
||||
@@ -454,3 +455,4 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -953,6 +953,119 @@ func TestOIDCFollowUpUrl(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login")
|
||||
}
|
||||
|
||||
// TestOIDCMultipleOpenedLoginUrls tests the scenario:
|
||||
// - client (mostly Windows) opens multiple browser tabs with different login URLs
|
||||
// - client performs auth on the first opened browser tab
|
||||
//
|
||||
// This test makes sure that cookies are still valid for the first browser tab.
|
||||
func TestOIDCMultipleOpenedLoginUrls(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(
|
||||
ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcauthrelog"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode(
|
||||
"unstable",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
u1, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
u2, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure login URLs are different
|
||||
require.NotEqual(t, u1.String(), u2.String())
|
||||
|
||||
loginClient, err := newLoginHTTPClient(ts.Hostname())
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the first login URL "in browser"
|
||||
_, redirect1, err := doLoginURLWithClient(ts.Hostname(), u1, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
// open the second login URL "in browser"
|
||||
_, redirect2, err := doLoginURLWithClient(ts.Hostname(), u2, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// two valid redirects with different state/nonce params
|
||||
require.NotEqual(t, redirect1.String(), redirect2.String())
|
||||
|
||||
// complete auth with the first opened "browser tab"
|
||||
_, redirect1, err = doLoginURLWithClient(ts.Hostname(), redirect1, loginClient, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
},
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(
|
||||
wantUsers,
|
||||
listUsers,
|
||||
cmpopts.IgnoreUnexported(v1.User{}),
|
||||
cmpopts.IgnoreFields(v1.User{}, "CreatedAt"),
|
||||
); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(c *assert.CollectT) {
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, 1)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login",
|
||||
)
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameNodeSameUser tests the scenario where a single Tailscale client
|
||||
// authenticates using OIDC (OpenID Connect), logs out, and then logs back in as the same user.
|
||||
//
|
||||
@@ -1181,3 +1294,131 @@ func TestOIDCReloginSameNodeSameUser(t *testing.T) {
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 node is online after same-user OIDC relogin")
|
||||
}
|
||||
|
||||
// TestOIDCExpiryAfterRestart validates that node expiry is preserved
|
||||
// when a tailscaled client restarts and reconnects to headscale.
|
||||
//
|
||||
// This test reproduces the bug reported in https://github.com/juanfont/headscale/issues/2862
|
||||
// where OIDC expiry was reset to 0001-01-01 00:00:00 after tailscaled restart.
|
||||
//
|
||||
// Test flow:
|
||||
// 1. Node logs in with OIDC (gets 72h expiry)
|
||||
// 2. Verify expiry is set correctly in headscale
|
||||
// 3. Restart tailscaled container (simulates daemon restart)
|
||||
// 4. Wait for reconnection
|
||||
// 5. Verify expiry is still set correctly (not zero).
|
||||
func TestOIDCExpiryAfterRestart(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
"HEADSCALE_OIDC_EXPIRY": "72h",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcexpiry"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and login tailscale client
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating initial login and expiry at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Verify initial expiry is set
|
||||
var initialExpiry time.Time
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1)
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should be set after OIDC login")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
assert.False(ct, expiryTime.IsZero(), "Expiry should not be zero time")
|
||||
|
||||
initialExpiry = expiryTime
|
||||
t.Logf("Initial expiry set to: %v (expires in %v)", expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating initial expiry after OIDC login")
|
||||
|
||||
// Now restart the tailscaled container
|
||||
t.Logf("Restarting tailscaled container at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
err = ts.Restart()
|
||||
require.NoError(t, err, "Failed to restart tailscaled container")
|
||||
|
||||
t.Logf("Tailscaled restarted, waiting for reconnection at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Wait for the node to come back online
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
if !assert.NoError(ct, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if !assert.NotNil(ct, status) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 60*time.Second, 2*time.Second, "waiting for tailscale to reconnect after restart")
|
||||
|
||||
// THE CRITICAL TEST: Verify expiry is still set correctly after restart
|
||||
t.Logf("Validating expiry preservation after restart at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1, "Should still have exactly 1 node after restart")
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should NOT be nil after restart")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
|
||||
// This is the bug check - expiry should NOT be zero time
|
||||
assert.False(ct, expiryTime.IsZero(),
|
||||
"BUG: Expiry was reset to zero time after tailscaled restart! This is issue #2862")
|
||||
|
||||
// Expiry should be exactly the same as before restart
|
||||
assert.Equal(ct, initialExpiry, expiryTime,
|
||||
"Expiry should be exactly the same after restart, got %v, expected %v",
|
||||
expiryTime, initialExpiry)
|
||||
|
||||
t.Logf("SUCCESS: Expiry preserved after restart: %v (expires in %v)",
|
||||
expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating expiry preservation after restart")
|
||||
}
|
||||
|
||||
@@ -860,47 +860,183 @@ func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a
|
||||
// string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
type debugJar struct {
|
||||
inner *cookiejar.Jar
|
||||
mu sync.RWMutex
|
||||
store map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie
|
||||
}
|
||||
|
||||
var err error
|
||||
func newDebugJar() (*debugJar, error) {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &debugJar{
|
||||
inner: jar,
|
||||
store: make(map[string]map[string]map[string]*http.Cookie),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
|
||||
j.inner.SetCookies(u, cookies)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
for _, c := range cookies {
|
||||
if c == nil || c.Name == "" {
|
||||
continue
|
||||
}
|
||||
domain := c.Domain
|
||||
if domain == "" {
|
||||
domain = u.Hostname()
|
||||
}
|
||||
path := c.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if _, ok := j.store[domain]; !ok {
|
||||
j.store[domain] = make(map[string]map[string]*http.Cookie)
|
||||
}
|
||||
if _, ok := j.store[domain][path]; !ok {
|
||||
j.store[domain][path] = make(map[string]*http.Cookie)
|
||||
}
|
||||
j.store[domain][path][c.Name] = copyCookie(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *debugJar) Cookies(u *url.URL) []*http.Cookie {
|
||||
return j.inner.Cookies(u)
|
||||
}
|
||||
|
||||
func (j *debugJar) Dump(w io.Writer) {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
|
||||
for domain, paths := range j.store {
|
||||
fmt.Fprintf(w, "Domain: %s\n", domain)
|
||||
for path, byName := range paths {
|
||||
fmt.Fprintf(w, " Path: %s\n", path)
|
||||
for _, c := range byName {
|
||||
fmt.Fprintf(
|
||||
w, " %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\n",
|
||||
c.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyCookie(c *http.Cookie) *http.Cookie {
|
||||
cc := *c
|
||||
return &cc
|
||||
}
|
||||
|
||||
func newLoginHTTPClient(hostname string) (*http.Client, error) {
|
||||
hc := &http.Client{
|
||||
Transport: LoggingRoundTripper{Hostname: hostname},
|
||||
}
|
||||
hc.Jar, err = cookiejar.New(nil)
|
||||
|
||||
jar, err := newDebugJar()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to create cookiejar : %w", hostname, err)
|
||||
return nil, fmt.Errorf("%s failed to create cookiejar: %w", hostname, err)
|
||||
}
|
||||
|
||||
hc.Jar = jar
|
||||
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
|
||||
hc, err := newLoginHTTPClient(hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
body, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// doLoginURLWithClient performs the login request using the provided HTTP client.
|
||||
// When followRedirects is false, it will return the first redirect without following it.
|
||||
func doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (
|
||||
string,
|
||||
*url.URL,
|
||||
error,
|
||||
) {
|
||||
if hc == nil {
|
||||
return "", nil, fmt.Errorf("%s http client is nil", hostname)
|
||||
}
|
||||
|
||||
if loginURL == nil {
|
||||
return "", nil, fmt.Errorf("%s login url is nil", hostname)
|
||||
}
|
||||
|
||||
log.Printf("%s logging in with url: %s", hostname, loginURL.String())
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("%s failed to create http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
originalRedirect := hc.CheckRedirect
|
||||
if !followRedirects {
|
||||
hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
hc.CheckRedirect = originalRedirect
|
||||
}()
|
||||
|
||||
resp, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return "", fmt.Errorf("%s response code of login request was %w", hostname, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to read response body: %s", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
}
|
||||
body := string(bodyBytes)
|
||||
|
||||
return "", fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
var redirectURL *url.URL
|
||||
if resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {
|
||||
redirectURL, err = resp.Location()
|
||||
if err != nil {
|
||||
return body, nil, fmt.Errorf("%s failed to resolve redirect location: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
if followRedirects && resp.StatusCode != http.StatusOK {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if hc.Jar != nil {
|
||||
if jar, ok := hc.Jar.(*debugJar); ok {
|
||||
jar.Dump(os.Stdout)
|
||||
} else {
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
}
|
||||
}
|
||||
|
||||
return body, redirectURL, nil
|
||||
}
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
|
||||
@@ -29,6 +29,7 @@ type TailscaleClient interface {
|
||||
Login(loginServer, authKey string) error
|
||||
LoginWithURL(loginServer string) (*url.URL, error)
|
||||
Logout() error
|
||||
Restart() error
|
||||
Up() error
|
||||
Down() error
|
||||
IPs() ([]netip.Addr, error)
|
||||
|
||||
@@ -555,6 +555,39 @@ func (t *TailscaleInContainer) Logout() error {
|
||||
return t.waitForBackendState("NeedsLogin", integrationutil.PeerSyncTimeout())
|
||||
}
|
||||
|
||||
// Restart restarts the Tailscale container using Docker API.
|
||||
// This simulates a container restart (e.g., docker restart or Kubernetes pod restart).
|
||||
// The container's entrypoint will re-execute, which typically includes running
|
||||
// "tailscale up" with any auth keys stored in environment variables.
|
||||
func (t *TailscaleInContainer) Restart() error {
|
||||
if t.container == nil {
|
||||
return fmt.Errorf("container not initialized")
|
||||
}
|
||||
|
||||
// Use Docker API to restart the container
|
||||
err := t.pool.Client.RestartContainer(t.container.Container.ID, 30)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restart container %s: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
// Wait for the container to be back up and tailscaled to be ready
|
||||
// We use exponential backoff to poll until we can successfully execute a command
|
||||
_, err = backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
// Try to execute a simple command to verify the container is responsive
|
||||
_, _, err := t.Execute([]string{"tailscale", "version"}, dockertestutil.ExecuteCommandTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
return struct{}{}, fmt.Errorf("container not ready: %w", err)
|
||||
}
|
||||
return struct{}{}, nil
|
||||
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(30*time.Second))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("timeout waiting for container %s to restart and become ready: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper that runs `tailscale up` with no arguments.
|
||||
func (t *TailscaleInContainer) Up() error {
|
||||
command := []string{
|
||||
|
||||
@@ -104,7 +104,7 @@ extra:
|
||||
- icon: fontawesome/brands/discord
|
||||
link: https://discord.gg/c84AZQhmpx
|
||||
headscale:
|
||||
version: 0.27.0
|
||||
version: 0.27.1
|
||||
|
||||
# Extensions
|
||||
markdown_extensions:
|
||||
|
||||
Reference in New Issue
Block a user