mirror of
https://github.com/juanfont/headscale.git
synced 2026-02-15 12:17:40 +01:00
Compare commits
39 Commits
v0.27.0-be
...
v0.27.2-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c6d399a66c | ||
|
|
4fe5cbe703 | ||
|
|
7e8cee6b10 | ||
|
|
7f1631c4f1 | ||
|
|
f658a8eacd | ||
|
|
785168a7b8 | ||
|
|
3bd4ecd9cd | ||
|
|
3455d1cb59 | ||
|
|
ddd31ba774 | ||
|
|
4a8dc2d445 | ||
|
|
773a46a968 | ||
|
|
4728a2ba9e | ||
|
|
abed534628 | ||
|
|
21e3f2598d | ||
|
|
a28d9bed6d | ||
|
|
28faf8cd71 | ||
|
|
5a2ee0c391 | ||
|
|
5cd15c3656 | ||
|
|
2024219bd1 | ||
|
|
d9c3eaf8c8 | ||
|
|
bd9cf42b96 | ||
|
|
d7a43a7cf1 | ||
|
|
1c0bb0338d | ||
|
|
c649c89e00 | ||
|
|
af2de35b6c | ||
|
|
02c7c1a0e7 | ||
|
|
d23fa26395 | ||
|
|
f9bb88ad24 | ||
|
|
456a5d5cce | ||
|
|
ddbd3e14ba | ||
|
|
0a43aab8f5 | ||
|
|
4bd614a559 | ||
|
|
19a33394f6 | ||
|
|
84fe3de251 | ||
|
|
450a7b15ec | ||
|
|
64b7142e22 | ||
|
|
52d27d58f0 | ||
|
|
e68e2288f7 | ||
|
|
c808587de0 |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
|
||||
@@ -62,6 +62,7 @@ jobs:
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Run Integration Test
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
run:
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
|
||||
9
.github/workflows/test-integration.yaml
vendored
9
.github/workflows/test-integration.yaml
vendored
@@ -24,21 +24,27 @@ jobs:
|
||||
- TestACLAutogroupMember
|
||||
- TestACLAutogroupTagged
|
||||
- TestACLAutogroupSelf
|
||||
- TestACLPolicyPropagationOverTime
|
||||
- TestAPIAuthenticationBypass
|
||||
- TestAPIAuthenticationBypassCurl
|
||||
- TestGRPCAuthenticationBypass
|
||||
- TestCLIWithConfigAuthenticationBypass
|
||||
- TestACLPolicyPropagationOverTime
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
- TestAuthKeyDeleteKey
|
||||
- TestAuthKeyLogoutAndReloginRoutesPreserved
|
||||
- TestOIDCAuthenticationPingAll
|
||||
- TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
- TestOIDC024UserCreation
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestOIDCFollowUpUrl
|
||||
- TestOIDCMultipleOpenedLoginUrls
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestOIDCExpiryAfterRestart
|
||||
- TestOIDCACLPolicyOnJoin
|
||||
- TestOIDCReloginSameUserRoutesPreserved
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
@@ -70,6 +76,7 @@ jobs:
|
||||
- TestTaildrop
|
||||
- TestUpdateHostnameFromClient
|
||||
- TestExpireNode
|
||||
- TestSetNodeExpiryInFuture
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
|
||||
@@ -8,6 +8,33 @@ before:
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: true
|
||||
header: |
|
||||
## Upgrade
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](https://headscale.net/stable/setup/upgrade/) to update your existing Headscale installation.
|
||||
|
||||
**It's best to update from one stable version to the next** (e.g., 0.24.0 → 0.25.1 → 0.26.1) in case you are multiple releases behind. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the changelog above for version-specific upgrade instructions and breaking changes.
|
||||
|
||||
### Backup Your Database
|
||||
|
||||
**Always backup your database before upgrading.** Here's how to backup a SQLite database:
|
||||
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
|
||||
# Backup sqlite database
|
||||
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
|
||||
|
||||
# Backup sqlite WAL/SHM files (if they exist)
|
||||
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
|
||||
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
|
||||
# Start headscale (migration will run automatically)
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
@@ -118,6 +145,8 @@ kos:
|
||||
- "{{ .Tag }}"
|
||||
- '{{ trimprefix .Tag "v" }}'
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
creation_time: "{{.CommitTimestamp}}"
|
||||
ko_data_creation_time: "{{.CommitTimestamp}}"
|
||||
|
||||
- id: ghcr-debug
|
||||
repositories:
|
||||
|
||||
53
CHANGELOG.md
53
CHANGELOG.md
@@ -2,7 +2,52 @@
|
||||
|
||||
## Next
|
||||
|
||||
## 0.27.0 (2025-xx-xx)
|
||||
### Changes
|
||||
|
||||
## 0.27.2 (2025-xx-xx)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix ACL policy not applied to new OIDC nodes until client restart
|
||||
[#2890](https://github.com/juanfont/headscale/pull/2890)
|
||||
- Fix autogroup:self preventing visibility of nodes matched by other ACL rules
|
||||
[#2882](https://github.com/juanfont/headscale/pull/2882)
|
||||
- Fix nodes being rejected after pre-authentication key expiration
|
||||
[#2917](https://github.com/juanfont/headscale/pull/2917)
|
||||
|
||||
## 0.27.1 (2025-11-11)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Changes
|
||||
|
||||
- Expire nodes with a custom timestamp
|
||||
[#2828](https://github.com/juanfont/headscale/pull/2828)
|
||||
- Fix issue where node expiry was reset when tailscaled restarts
|
||||
[#2875](https://github.com/juanfont/headscale/pull/2875)
|
||||
- Fix OIDC authentication when multiple login URLs are opened
|
||||
[#2861](https://github.com/juanfont/headscale/pull/2861)
|
||||
- Fix node re-registration failing with expired auth keys
|
||||
[#2859](https://github.com/juanfont/headscale/pull/2859)
|
||||
- Remove old unused database tables and indices
|
||||
[#2844](https://github.com/juanfont/headscale/pull/2844)
|
||||
[#2872](https://github.com/juanfont/headscale/pull/2872)
|
||||
- Ignore litestream tables during database validation
|
||||
[#2843](https://github.com/juanfont/headscale/pull/2843)
|
||||
- Fix exit node visibility to respect ACL rules
|
||||
[#2855](https://github.com/juanfont/headscale/pull/2855)
|
||||
- Fix SSH policy becoming empty when unknown user is referenced
|
||||
[#2874](https://github.com/juanfont/headscale/pull/2874)
|
||||
- Fix policy validation when using bypass-grpc mode
|
||||
[#2854](https://github.com/juanfont/headscale/pull/2854)
|
||||
- Fix autogroup:self interaction with other ACL rules
|
||||
[#2842](https://github.com/juanfont/headscale/pull/2842)
|
||||
- Fix flaky DERP map shuffle test
|
||||
[#2848](https://github.com/juanfont/headscale/pull/2848)
|
||||
- Use current stable base images for Debian and Alpine containers
|
||||
[#2827](https://github.com/juanfont/headscale/pull/2827)
|
||||
|
||||
## 0.27.0 (2025-10-27)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
@@ -84,7 +129,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2692](https://github.com/juanfont/headscale/pull/2692)
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
- Stricter hostname validation [#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Stricter hostname validation
|
||||
[#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Hostnames must be valid DNS labels (2-63 characters, alphanumeric and
|
||||
hyphens only, cannot start/end with hyphen)
|
||||
- **Client Registration (New Nodes)**: Invalid hostnames are automatically
|
||||
@@ -139,7 +185,8 @@ the code base over time and make it more correct and efficient.
|
||||
[#2776](https://github.com/juanfont/headscale/pull/2776)
|
||||
- EXPERIMENTAL: Add support for `autogroup:self`
|
||||
[#2789](https://github.com/juanfont/headscale/pull/2789)
|
||||
- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
- Add healthcheck command
|
||||
[#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ WORKDIR /go/src/tailscale
|
||||
ARG TARGETARCH
|
||||
RUN GOARCH=$TARGETARCH go install -v ./cmd/derper
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.25-bookworm
|
||||
FROM docker.io/golang:1.25-trixie
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends --yes less jq sqlite3 dnsutils \
|
||||
RUN apt-get --update install --no-install-recommends --yes less jq sqlite3 dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN GOARCH=$TARGETARCH go install -tags="${BUILD_TAGS}" -ldflags="\
|
||||
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
|
||||
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -51,6 +52,7 @@ func init() {
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
@@ -289,12 +291,37 @@ var expireNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
expiry, err := cmd.Flags().GetString("expiry")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
expiryTime := time.Now()
|
||||
if expiry != "" {
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
Expiry: timestamppb.New(expiryTime),
|
||||
}
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
|
||||
@@ -127,12 +127,6 @@ var setPolicy = &cobra.Command{
|
||||
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
@@ -159,6 +153,17 @@ var setPolicy = &cobra.Command{
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
|
||||
}
|
||||
|
||||
users, err := d.ListUsers()
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to load users for policy validation: %s", err), output)
|
||||
}
|
||||
|
||||
_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = d.SetPolicy(string(policyBytes))
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
|
||||
@@ -34,6 +34,7 @@ func init() {
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(deletePreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
@@ -232,3 +233,43 @@ var expirePreAuthKeyCmd = &cobra.Command{
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deletePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "delete KEY",
|
||||
Short: "Delete a preauthkey",
|
||||
Aliases: []string{"del", "rm", "d"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetUint64("user")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeletePreAuthKeyRequest{
|
||||
User: user,
|
||||
Key: args[0],
|
||||
}
|
||||
|
||||
response, err := client.DeletePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -75,8 +76,9 @@ func initConfig() {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
!versionInfo.Dirty {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
|
||||
}
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
@@ -91,6 +93,43 @@ func initConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
var prereleases = []string{"alpha", "beta", "rc", "dev"}
|
||||
|
||||
func isPreReleaseVersion(version string) bool {
|
||||
for _, unstable := range prereleases {
|
||||
if strings.Contains(version, unstable) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filterPreReleasesIfStable returns a function that filters out
|
||||
// pre-release tags if the current version is stable.
|
||||
// If the current version is a pre-release, it does not filter anything.
|
||||
// versionFunc is a function that returns the current version string, it is
|
||||
// a func for testability.
|
||||
func filterPreReleasesIfStable(versionFunc func() string) func(string) bool {
|
||||
return func(tag string) bool {
|
||||
version := versionFunc()
|
||||
|
||||
// If we are on a pre-release version, then we do not filter anything
|
||||
// as we want to recommend the user the latest pre-release.
|
||||
if isPreReleaseVersion(version) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we are on a stable release, filter out pre-releases.
|
||||
for _, ignore := range prereleases {
|
||||
if strings.Contains(tag, ignore) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
|
||||
293
cmd/headscale/cli/root_test.go
Normal file
293
cmd/headscale/cli/root_test.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterPreReleasesIfStable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentVersion string
|
||||
tag string
|
||||
expectedFilter bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "stable version filters alpha tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, alpha tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version filters beta tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-beta.2",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, beta tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version filters rc tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, rc tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version allows stable tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on stable release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows alpha tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-alpha.2",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows beta tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-beta.1",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows rc tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows stable tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows alpha tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows beta tag",
|
||||
currentVersion: "0.23.0-beta.2",
|
||||
tag: "v0.24.0-beta.3",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows rc tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows stable tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows alpha tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows beta tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0-beta.1",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows rc tag",
|
||||
currentVersion: "0.23.0-rc.2",
|
||||
tag: "v0.24.0-rc.3",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows stable tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version with patch filters alpha",
|
||||
currentVersion: "0.23.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "Stable version with patch number should filter alpha tags",
|
||||
},
|
||||
{
|
||||
name: "stable version with patch allows stable",
|
||||
currentVersion: "0.23.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "Stable version with patch number should allow stable tags",
|
||||
},
|
||||
{
|
||||
name: "tag with alpha substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with alpha in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "tag with beta substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-beta.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with beta in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "tag with rc substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-rc.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with rc in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "empty tag on stable version",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "",
|
||||
expectedFilter: false,
|
||||
description: "Empty tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows all tags",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "Dev versions should not filter any tags (pre-release allows all)",
|
||||
},
|
||||
{
|
||||
name: "stable version filters dev tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-dev",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, dev tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows dev tag",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0-dev.1",
|
||||
expectedFilter: false,
|
||||
description: "When on dev release, dev tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows stable tag",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on dev release, stable tags should not be filtered",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := filterPreReleasesIfStable(func() string { return tt.currentVersion })(tt.tag)
|
||||
if result != tt.expectedFilter {
|
||||
t.Errorf("%s: got %v, want %v\nDescription: %s\nCurrent version: %s, Tag: %s",
|
||||
tt.name,
|
||||
result,
|
||||
tt.expectedFilter,
|
||||
tt.description,
|
||||
tt.currentVersion,
|
||||
tt.tag,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPreReleaseVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
expected bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "stable version",
|
||||
version: "0.23.0",
|
||||
expected: false,
|
||||
description: "Stable version should not be pre-release",
|
||||
},
|
||||
{
|
||||
name: "alpha version",
|
||||
version: "0.23.0-alpha.1",
|
||||
expected: true,
|
||||
description: "Alpha version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "beta version",
|
||||
version: "0.23.0-beta.1",
|
||||
expected: true,
|
||||
description: "Beta version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "rc version",
|
||||
version: "0.23.0-rc.1",
|
||||
expected: true,
|
||||
description: "RC version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with alpha substring",
|
||||
version: "0.23.0-alphabetical",
|
||||
expected: true,
|
||||
description: "Version containing 'alpha' should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with beta substring",
|
||||
version: "0.23.0-betamax",
|
||||
expected: true,
|
||||
description: "Version containing 'beta' should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "dev version",
|
||||
version: "0.23.0-dev",
|
||||
expected: true,
|
||||
description: "Dev version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "empty version",
|
||||
version: "",
|
||||
expected: false,
|
||||
description: "Empty version should not be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with patch number",
|
||||
version: "0.23.1",
|
||||
expected: false,
|
||||
description: "Stable version with patch should not be pre-release",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isPreReleaseVersion(tt.version)
|
||||
if result != tt.expected {
|
||||
t.Errorf("%s: got %v, want %v\nDescription: %s\nVersion: %s",
|
||||
tt.name,
|
||||
result,
|
||||
tt.expected,
|
||||
tt.description,
|
||||
tt.version,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -202,6 +202,18 @@ func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunC
|
||||
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
|
||||
"HEADSCALE_INTEGRATION_RUN_ID=" + runID,
|
||||
}
|
||||
|
||||
// Pass through all HEADSCALE_INTEGRATION_* environment variables
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_") {
|
||||
// Skip the ones we already set explicitly
|
||||
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_POSTGRES=") ||
|
||||
strings.HasPrefix(e, "HEADSCALE_INTEGRATION_RUN_ID=") {
|
||||
continue
|
||||
}
|
||||
env = append(env, e)
|
||||
}
|
||||
}
|
||||
containerConfig := &container.Config{
|
||||
Image: "golang:" + config.GoVersion,
|
||||
Cmd: goTestCmd,
|
||||
|
||||
@@ -216,6 +216,39 @@ nodes.
|
||||
}
|
||||
```
|
||||
|
||||
### Restrict access to exit nodes per user or group
|
||||
|
||||
A user can use _any_ of the available exit nodes with `autogroup:internet`. Alternatively, the ACL snippet below assigns
|
||||
each user a specific exit node while hiding all other exit nodes. The user `alice` can only use exit node `exit1` while
|
||||
user `bob` can only use exit node `exit2`.
|
||||
|
||||
```json title="Assign each user a dedicated exit node"
|
||||
{
|
||||
"hosts": {
|
||||
"exit1": "100.64.0.1/32",
|
||||
"exit2": "100.64.0.2/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["alice@"],
|
||||
"dst": ["exit1:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["bob@"],
|
||||
"dst": ["exit2:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
- The above implementation is Headscale specific and will likely be removed once [support for
|
||||
`via`](https://github.com/juanfont/headscale/issues/2409) is available.
|
||||
- Beware that a user can also connect to any port of the exit node itself.
|
||||
|
||||
### Automatically approve an exit node with auto approvers
|
||||
|
||||
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
|
||||
|
||||
@@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
|
||||
|
||||
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
|
||||
distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
|
||||
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-GUIzlPRsyEq1uSTzRNds9p1uVu4pTeH5PAxrJ5Njhis=";
|
||||
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
|
||||
in {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -109,7 +109,7 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\rHealthRequest\"E\n" +
|
||||
"\x0eHealthResponse\x123\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x80\x17\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\xff\x17\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
@@ -119,7 +119,8 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
|
||||
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
|
||||
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12z\n" +
|
||||
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12}\n" +
|
||||
"\x10DeletePreAuthKey\x12%.headscale.v1.DeletePreAuthKeyRequest\x1a&.headscale.v1.DeletePreAuthKeyResponse\"\x1a\x82\xd3\xe4\x93\x02\x14*\x12/api/v1/preauthkey\x12z\n" +
|
||||
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
|
||||
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
|
||||
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
|
||||
@@ -165,48 +166,50 @@ var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 8: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 9: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 10: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 11: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 12: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 13: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 14: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 15: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 16: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 17: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 33: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 34: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 36: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
|
||||
(*DeletePreAuthKeyRequest)(nil), // 8: headscale.v1.DeletePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 9: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 10: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 11: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 12: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 13: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 14: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 15: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 16: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 17: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 18: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 19: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 20: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 21: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 22: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 23: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 24: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 25: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 26: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 27: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 28: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 29: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 30: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 31: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 32: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*DeletePreAuthKeyResponse)(nil), // 33: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 38: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 39: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 40: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 41: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 42: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 43: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 44: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 45: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 46: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 47: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 48: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 49: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 50: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 51: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
@@ -215,52 +218,54 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
8, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
25, // [25:50] is the sub-list for method output_type
|
||||
0, // [0:25] is the sub-list for method input_type
|
||||
8, // 6: headscale.v1.HeadscaleService.DeletePreAuthKey:input_type -> headscale.v1.DeletePreAuthKeyRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
26, // 24: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 25: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
27, // 26: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
28, // 27: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
29, // 28: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
50, // 49: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
51, // 50: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 51: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
26, // [26:52] is the sub-list for method output_type
|
||||
0, // [0:26] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
|
||||
@@ -227,6 +227,38 @@ func local_request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, mars
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_DeletePreAuthKey_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeletePreAuthKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.DeletePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq DeletePreAuthKeyRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.DeletePreAuthKey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ListPreAuthKeys_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
@@ -471,6 +503,8 @@ func local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||
|
||||
func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq ExpireNodeRequest
|
||||
@@ -485,6 +519,12 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -503,6 +543,12 @@ func local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.ExpireNode(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -953,6 +999,26 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeletePreAuthKey", runtime.WithHTTPPathPattern("/api/v1/preauthkey"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1475,6 +1541,23 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeletePreAuthKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeletePreAuthKey", runtime.WithHTTPPathPattern("/api/v1/preauthkey"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_DeletePreAuthKey_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_DeletePreAuthKey_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_ListPreAuthKeys_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -1808,6 +1891,7 @@ var (
|
||||
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
|
||||
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
|
||||
pattern_HeadscaleService_DeletePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
|
||||
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
|
||||
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
|
||||
@@ -1836,6 +1920,7 @@ var (
|
||||
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DeletePreAuthKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
|
||||
|
||||
@@ -25,6 +25,7 @@ const (
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_DeletePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/DeletePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
@@ -58,6 +59,7 @@ type HeadscaleServiceClient interface {
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
|
||||
DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
|
||||
@@ -151,6 +153,16 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(DeletePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeletePreAuthKey_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
@@ -353,6 +365,7 @@ type HeadscaleServiceServer interface {
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
|
||||
DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
|
||||
@@ -404,6 +417,9 @@ func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *Cr
|
||||
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeletePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
@@ -590,6 +606,24 @@ func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Con
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeletePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeletePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DeletePreAuthKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, req.(*DeletePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListPreAuthKeysRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -963,6 +997,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ExpirePreAuthKey",
|
||||
Handler: _HeadscaleService_ExpirePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeletePreAuthKey",
|
||||
Handler: _HeadscaleService_DeletePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListPreAuthKeys",
|
||||
Handler: _HeadscaleService_ListPreAuthKeys_Handler,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
@@ -729,6 +729,7 @@ func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
|
||||
type ExpireNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -770,6 +771,13 @@ func (x *ExpireNodeRequest) GetNodeId() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExpireNodeResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
@@ -1349,9 +1357,10 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\",\n" +
|
||||
"\x11DeleteNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"\x14\n" +
|
||||
"\x12DeleteNodeResponse\",\n" +
|
||||
"\x12DeleteNodeResponse\"`\n" +
|
||||
"\x11ExpireNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"<\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x122\n" +
|
||||
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\"<\n" +
|
||||
"\x12ExpireNodeResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"G\n" +
|
||||
"\x11RenameNodeRequest\x12\x17\n" +
|
||||
@@ -1439,16 +1448,17 @@ var file_headscale_v1_node_proto_depIdxs = []int32{
|
||||
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 10: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 11: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
25, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
|
||||
1, // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 15: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
16, // [16:16] is the sub-list for method output_type
|
||||
16, // [16:16] is the sub-list for method input_type
|
||||
16, // [16:16] is the sub-list for extension type_name
|
||||
16, // [16:16] is the sub-list for extension extendee
|
||||
0, // [0:16] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_node_proto_init() }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
@@ -338,6 +338,94 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type DeletePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) Reset() {
|
||||
*x = DeletePreAuthKeyRequest{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeletePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeletePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeletePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) GetUser() uint64 {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeletePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) Reset() {
|
||||
*x = DeletePreAuthKeyResponse{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeletePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeletePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeletePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeletePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
|
||||
@@ -347,7 +435,7 @@ type ListPreAuthKeysRequest struct {
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
*x = ListPreAuthKeysRequest{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -359,7 +447,7 @@ func (x *ListPreAuthKeysRequest) String() string {
|
||||
func (*ListPreAuthKeysRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -372,7 +460,7 @@ func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
|
||||
@@ -391,7 +479,7 @@ type ListPreAuthKeysResponse struct {
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
*x = ListPreAuthKeysResponse{}
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -403,7 +491,7 @@ func (x *ListPreAuthKeysResponse) String() string {
|
||||
func (*ListPreAuthKeysResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -416,7 +504,7 @@ func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
@@ -459,7 +547,11 @@ const file_headscale_v1_preauthkey_proto_rawDesc = "" +
|
||||
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\",\n" +
|
||||
"\x18ExpirePreAuthKeyResponse\"?\n" +
|
||||
"\x17DeletePreAuthKeyRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
|
||||
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
|
||||
"\x18DeletePreAuthKeyResponse\",\n" +
|
||||
"\x16ListPreAuthKeysRequest\x12\x12\n" +
|
||||
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
|
||||
"\x17ListPreAuthKeysResponse\x12<\n" +
|
||||
@@ -477,30 +569,32 @@ func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_headscale_v1_preauthkey_proto_goTypes = []any{
|
||||
(*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey
|
||||
(*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 3: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*User)(nil), // 7: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
(*DeletePreAuthKeyRequest)(nil), // 5: headscale.v1.DeletePreAuthKeyRequest
|
||||
(*DeletePreAuthKeyResponse)(nil), // 6: headscale.v1.DeletePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 8: headscale.v1.ListPreAuthKeysResponse
|
||||
(*User)(nil), // 9: headscale.v1.User
|
||||
(*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
8, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
8, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
8, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
9, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
|
||||
10, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
10, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
10, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
@@ -515,7 +609,7 @@ func file_headscale_v1_preauthkey_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -406,6 +406,13 @@
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "expiry",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -611,6 +618,41 @@
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeletePreAuthKey",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeletePreAuthKeyResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "user",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "key",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"summary": "--- PreAuthKeys start ---",
|
||||
"operationId": "HeadscaleService_CreatePreAuthKey",
|
||||
@@ -1022,6 +1064,9 @@
|
||||
"v1DeleteNodeResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeletePreAuthKeyResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteUserResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
|
||||
6
go.mod
6
go.mod
@@ -36,7 +36,7 @@ require (
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
@@ -115,7 +115,7 @@ require (
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.25.2 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
@@ -159,7 +159,7 @@ require (
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -126,6 +126,8 @@ github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wz
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@@ -278,6 +280,8 @@ github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNa
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -461,6 +465,8 @@ github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
|
||||
@@ -446,6 +446,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
|
||||
router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/version", h.VersionHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -71,6 +70,13 @@ func (h *Headscale) handleRegister(
|
||||
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
|
||||
// nodes, separated by users and this path is handling expiring/logout paths.
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
// When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero.
|
||||
// Return the current node state without modification.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
if req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() {
|
||||
return nodeToRegisterResponse(node), nil
|
||||
}
|
||||
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
@@ -173,6 +179,7 @@ func (h *Headscale) handleLogout(
|
||||
}
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
// Zero expiry is handled in handleRegister() before calling this function.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
@@ -356,16 +363,13 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// eventbus.
|
||||
// TODO(kradalby): This needs to be ran as part of the batcher maybe?
|
||||
// now since we dont update the node/pol here anymore
|
||||
routeChange := h.state.AutoApproveRoutes(node)
|
||||
|
||||
if _, _, err := h.state.SaveNode(node); err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
routesChange, err := h.state.AutoApproveRoutes(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto approving routes: %w", err)
|
||||
}
|
||||
|
||||
if routeChange && changed.Empty() {
|
||||
changed = change.NodeAdded(node.ID())
|
||||
}
|
||||
h.Change(changed)
|
||||
// Send both changes. Empty changes are ignored by Change().
|
||||
h.Change(changed, routesChange)
|
||||
|
||||
// TODO(kradalby): I think this is covered above, but we need to validate that.
|
||||
// // If policy changed due to node registration, send a separate policy change
|
||||
|
||||
@@ -3004,3 +3004,385 @@ func createTestApp(t *testing.T) *Headscale {
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
// TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey tests the scenario reported in
|
||||
// https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Scenario:
|
||||
// 1. Node registers successfully with a single-use pre-auth key
|
||||
// 2. Node is running fine
|
||||
// 3. Node restarts (e.g., after headscale upgrade or tailscale container restart)
|
||||
// 4. Node sends RegisterRequest with the same pre-auth key
|
||||
// 5. BUG: Headscale rejects the request with "authkey expired" or "authkey already used"
|
||||
//
|
||||
// Expected behavior:
|
||||
// When an existing node (identified by matching NodeKey + MachineKey) re-registers
|
||||
// with a pre-auth key that it previously used, the registration should succeed.
|
||||
// The node is not creating a new registration - it's re-authenticating the same device.
|
||||
func TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create user and single-use pre-auth key
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil) // reusable=false
|
||||
require.NoError(t, err)
|
||||
require.False(t, pak.Reusable, "key should be single-use for this test")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// STEP 1: Initial registration with pre-auth key (simulates fresh node joining)
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
t.Log("Step 1: Initial registration with pre-auth key")
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "initial registration should succeed")
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized, "node should be authorized")
|
||||
assert.False(t, initialResp.NodeKeyExpired, "node key should not be expired")
|
||||
|
||||
// Verify node was created in database
|
||||
node, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should exist after initial registration")
|
||||
assert.Equal(t, "test-node", node.Hostname())
|
||||
assert.Equal(t, nodeKey.Public(), node.NodeKey())
|
||||
assert.Equal(t, machineKey.Public(), node.MachineKey())
|
||||
|
||||
// Verify pre-auth key is now marked as used
|
||||
usedPak, err := app.state.GetPreAuthKey(pak.Key)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, usedPak.Used, "pre-auth key should be marked as used after initial registration")
|
||||
|
||||
// STEP 2: Simulate node restart - node sends RegisterRequest again with same pre-auth key
|
||||
// This happens when:
|
||||
// - Tailscale container restarts
|
||||
// - Tailscaled service restarts
|
||||
// - System reboots
|
||||
// The Tailscale client persists the pre-auth key in its state and sends it on every registration
|
||||
t.Log("Step 2: Node restart - re-registration with same (now used) pre-auth key")
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same node key
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
// BUG: This fails with "authkey already used" or "authkey expired"
|
||||
// EXPECTED: Should succeed because it's the same node re-registering
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
|
||||
// This is the assertion that currently FAILS in v0.27.0
|
||||
assert.NoError(t, err, "BUG: existing node re-registration with its own used pre-auth key should succeed")
|
||||
if err != nil {
|
||||
t.Logf("Error received (this is the bug): %v", err)
|
||||
t.Logf("Expected behavior: Node should be able to re-register with the same pre-auth key it used initially")
|
||||
return // Stop here to show the bug clearly
|
||||
}
|
||||
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized, "node should remain authorized after restart")
|
||||
assert.False(t, restartResp.NodeKeyExpired, "node key should not be expired after restart")
|
||||
|
||||
// Verify it's the same node (not a duplicate)
|
||||
nodeAfterRestart, found := app.state.GetNodeByNodeKey(nodeKey.Public())
|
||||
require.True(t, found, "node should still exist after restart")
|
||||
assert.Equal(t, node.ID(), nodeAfterRestart.ID(), "should be the same node, not a new one")
|
||||
assert.Equal(t, "test-node", nodeAfterRestart.Hostname())
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithReusablePreAuthKey tests that reusable keys work correctly
|
||||
// for node re-registration.
|
||||
func TestNodeReregistrationWithReusablePreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, nil, nil) // reusable=true
|
||||
require.NoError(t, err)
|
||||
require.True(t, pak.Reusable)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Initial registration
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegister(context.Background(), initialReq, machineKey.Public())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, initialResp)
|
||||
assert.True(t, initialResp.MachineAuthorized)
|
||||
|
||||
// Node restart - re-registration with reusable key
|
||||
restartReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Reusable key
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "reusable-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
require.NoError(t, err, "reusable key should allow re-registration")
|
||||
require.NotNil(t, restartResp)
|
||||
assert.True(t, restartResp.MachineAuthorized)
|
||||
assert.False(t, restartResp.NodeKeyExpired)
|
||||
}
|
||||
|
||||
// TestNodeReregistrationWithExpiredPreAuthKey tests that truly expired keys
|
||||
// are still rejected even for existing nodes.
|
||||
func TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
expiry := time.Now().Add(-1 * time.Hour) // Already expired
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), true, false, &expiry, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Try to register with expired key
|
||||
req := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "expired-key-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegister(context.Background(), req, machineKey.Public())
|
||||
assert.Error(t, err, "expired pre-auth key should be rejected")
|
||||
assert.Contains(t, err.Error(), "authkey expired", "error should mention key expiration")
|
||||
}
|
||||
|
||||
// TestIssue2830_ExistingNodeReregistersWithExpiredKey tests the fix for issue #2830.
|
||||
// When a node is already registered and the pre-auth key expires, the node should
|
||||
// still be able to re-register (e.g., after a container restart) using the same
|
||||
// expired key. The key was only needed for initial authentication.
|
||||
func TestIssue2830_ExistingNodeReregistersWithExpiredKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
app := createTestApp(t)
|
||||
|
||||
user := app.state.CreateUserForTest("test-user")
|
||||
|
||||
// Create a valid key (will expire it later)
|
||||
expiry := time.Now().Add(1 * time.Hour)
|
||||
pak, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, &expiry, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Register the node initially (key is still valid)
|
||||
req := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue2830-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
resp, err := app.handleRegister(context.Background(), req, machineKey.Public())
|
||||
require.NoError(t, err, "initial registration should succeed")
|
||||
require.NotNil(t, resp)
|
||||
require.True(t, resp.MachineAuthorized, "node should be authorized after initial registration")
|
||||
|
||||
// Verify node was created
|
||||
allNodes := app.state.ListNodes()
|
||||
require.Equal(t, 1, allNodes.Len())
|
||||
initialNodeID := allNodes.At(0).ID()
|
||||
|
||||
// Now expire the key by updating it in the database to have an expiry in the past.
|
||||
// This simulates the real-world scenario where a key expires after initial registration.
|
||||
pastExpiry := time.Now().Add(-1 * time.Hour)
|
||||
err = app.state.DB().DB.Model(&types.PreAuthKey{}).
|
||||
Where("id = ?", pak.ID).
|
||||
Update("expiration", pastExpiry).Error
|
||||
require.NoError(t, err, "should be able to update key expiration")
|
||||
|
||||
// Reload the key to verify it's now expired
|
||||
expiredPak, err := app.state.GetPreAuthKey(pak.Key)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, expiredPak.Expiration)
|
||||
require.True(t, expiredPak.Expiration.Before(time.Now()), "key should be expired")
|
||||
|
||||
// Verify the expired key would fail validation
|
||||
err = expiredPak.Validate()
|
||||
require.Error(t, err, "key should fail validation when expired")
|
||||
require.Contains(t, err.Error(), "authkey expired")
|
||||
|
||||
// Attempt to re-register with the SAME key (now expired).
|
||||
// This should SUCCEED because:
|
||||
// - The node already exists with the same MachineKey and User
|
||||
// - The fix allows existing nodes to re-register even with expired keys
|
||||
// - The key was only needed for initial authentication
|
||||
req2 := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: pak.Key, // Same key as initial registration (now expired)
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same NodeKey as initial registration
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue2830-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
resp2, err := app.handleRegister(context.Background(), req2, machineKey.Public())
|
||||
assert.NoError(t, err, "re-registration should succeed even with expired key for existing node")
|
||||
assert.NotNil(t, resp2)
|
||||
assert.True(t, resp2.MachineAuthorized, "node should remain authorized after re-registration")
|
||||
|
||||
// Verify we still have only one node (re-registered, not created new)
|
||||
allNodes = app.state.ListNodes()
|
||||
require.Equal(t, 1, allNodes.Len(), "should have exactly one node (re-registered)")
|
||||
assert.Equal(t, initialNodeID, allNodes.At(0).ID(), "node ID should not change on re-registration")
|
||||
}
|
||||
|
||||
// TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey tests that an existing node
|
||||
// can re-register using a pre-auth key that's already marked as Used=true, as long as:
|
||||
// 1. The node is re-registering with the same MachineKey it originally used
|
||||
// 2. The node is using the same pre-auth key it was originally registered with (AuthKeyID matches)
|
||||
//
|
||||
// This is the fix for GitHub issue #2830: https://github.com/juanfont/headscale/issues/2830
|
||||
//
|
||||
// Background: When Docker/Kubernetes containers restart, they keep their persistent state
|
||||
// (including the MachineKey), but container entrypoints unconditionally run:
|
||||
//
|
||||
// tailscale up --authkey=$TS_AUTHKEY
|
||||
//
|
||||
// This caused nodes to be rejected after restart because the pre-auth key was already
|
||||
// marked as Used=true from the initial registration. The fix allows re-registration of
|
||||
// existing nodes with their own used keys.
|
||||
func TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey(t *testing.T) {
|
||||
app := createTestApp(t)
|
||||
|
||||
// Create a user
|
||||
user := app.state.CreateUserForTest("testuser")
|
||||
|
||||
// Create a SINGLE-USE pre-auth key (reusable=false)
|
||||
// This is the type of key that triggers the bug in issue #2830
|
||||
preAuthKey, err := app.state.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, preAuthKey.Reusable, "Pre-auth key must be single-use to test issue #2830")
|
||||
require.False(t, preAuthKey.Used, "Pre-auth key should not be used yet")
|
||||
|
||||
// Generate node keys for the client
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
// Step 1: Initial registration with the pre-auth key
|
||||
// This simulates the first time the container starts and runs 'tailscale up --authkey=...'
|
||||
initialReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key,
|
||||
},
|
||||
NodeKey: nodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
initialResp, err := app.handleRegisterWithAuthKey(initialReq, machineKey.Public())
|
||||
require.NoError(t, err, "Initial registration should succeed")
|
||||
require.True(t, initialResp.MachineAuthorized, "Node should be authorized after initial registration")
|
||||
require.NotNil(t, initialResp.User, "User should be set in response")
|
||||
require.Equal(t, "testuser", initialResp.User.DisplayName, "User should match the pre-auth key's user")
|
||||
|
||||
// Verify the pre-auth key is now marked as Used
|
||||
updatedKey, err := app.state.GetPreAuthKey(preAuthKey.Key)
|
||||
require.NoError(t, err)
|
||||
require.True(t, updatedKey.Used, "Pre-auth key should be marked as Used after initial registration")
|
||||
|
||||
// Step 2: Container restart scenario
|
||||
// The container keeps its MachineKey (persistent state), but the entrypoint script
|
||||
// unconditionally runs 'tailscale up --authkey=$TS_AUTHKEY' again
|
||||
//
|
||||
// WITHOUT THE FIX: This would fail with "authkey already used" error
|
||||
// WITH THE FIX: This succeeds because it's the same node re-registering with its own key
|
||||
|
||||
// Simulate sending the same RegisterRequest again (same MachineKey, same AuthKey)
|
||||
// This is exactly what happens when a container restarts
|
||||
reregisterReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Same key, now marked as Used=true
|
||||
},
|
||||
NodeKey: nodeKey.Public(), // Same NodeKey
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "issue-2830-test-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
reregisterResp, err := app.handleRegisterWithAuthKey(reregisterReq, machineKey.Public()) // Same MachineKey
|
||||
require.NoError(t, err, "Re-registration with same MachineKey and used pre-auth key should succeed (fixes #2830)")
|
||||
require.True(t, reregisterResp.MachineAuthorized, "Node should remain authorized after re-registration")
|
||||
require.NotNil(t, reregisterResp.User, "User should be set in re-registration response")
|
||||
require.Equal(t, "testuser", reregisterResp.User.DisplayName, "User should remain the same")
|
||||
|
||||
// Verify that only ONE node was created (not a duplicate)
|
||||
nodes := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodes.Len(), "Should have exactly one node (no duplicates created)")
|
||||
require.Equal(t, "issue-2830-test-node", nodes.At(0).Hostname(), "Node hostname should match")
|
||||
|
||||
// Step 3: Verify that a DIFFERENT machine cannot use the same used key
|
||||
// This ensures we didn't break the security model - only the original node can re-register
|
||||
differentMachineKey := key.NewMachine()
|
||||
differentNodeKey := key.NewNode()
|
||||
|
||||
attackReq := tailcfg.RegisterRequest{
|
||||
Auth: &tailcfg.RegisterResponseAuth{
|
||||
AuthKey: preAuthKey.Key, // Try to use the same key
|
||||
},
|
||||
NodeKey: differentNodeKey.Public(),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "attacker-node",
|
||||
},
|
||||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
|
||||
_, err = app.handleRegisterWithAuthKey(attackReq, differentMachineKey.Public())
|
||||
require.Error(t, err, "Different machine should NOT be able to use the same used pre-auth key")
|
||||
require.Contains(t, err.Error(), "already used", "Error should indicate key is already used")
|
||||
|
||||
// Verify still only one node (the original one)
|
||||
nodesAfterAttack := app.state.ListNodesByUser(types.UserID(user.ID))
|
||||
require.Equal(t, 1, nodesAfterAttack.Len(), "Should still have exactly one node (attack prevented)")
|
||||
}
|
||||
|
||||
@@ -932,6 +932,61 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Drop all tables that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
ID: "202510311551",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldTable := range []string{"namespaces", "machines", "shared_machines", "kvs", "pre_auth_key_acl_tags", "routes"} {
|
||||
err := tx.Migrator().DropTable(oldTable)
|
||||
if err != nil {
|
||||
log.Trace().Str("table", oldTable).
|
||||
Err(err).
|
||||
Msg("Error dropping old table, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Drop all indices that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
// They should all be cleaned up by the db engine, but we are a bit
|
||||
// conservative to ensure all our previous mess is cleaned up.
|
||||
ID: "202511101554-drop-old-idx",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldIdx := range []struct{ name, table string }{
|
||||
{"idx_namespaces_deleted_at", "namespaces"},
|
||||
{"idx_routes_deleted_at", "routes"},
|
||||
{"idx_shared_machines_deleted_at", "shared_machines"},
|
||||
} {
|
||||
err := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name)
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Str("index", oldIdx.name).
|
||||
Str("table", oldIdx.table).
|
||||
Err(err).
|
||||
Msg("Error dropping old index, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
// Migrations **above** this points will be REMOVED in version **0.29.0**
|
||||
// This is to clean up a lot of old migrations that is seldom used
|
||||
// and carries a lot of technical debt.
|
||||
// Any new migrations should be added after the comment below and follow
|
||||
// the rules it sets out.
|
||||
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
@@ -962,7 +1017,17 @@ AND auth_key_id NOT IN (
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
|
||||
opts := squibble.DigestOptions{
|
||||
IgnoreTables: []string{
|
||||
// Litestream tables, these are inserted by
|
||||
// litestream and not part of our schema
|
||||
// https://litestream.io/how-it-works
|
||||
"_litestream_lock",
|
||||
"_litestream_seq",
|
||||
},
|
||||
}
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,7 +325,11 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
|
||||
}
|
||||
|
||||
if changed {
|
||||
err := tx.Save(node).Error
|
||||
// Use Updates() with Select() to only update IP fields, avoiding overwriting
|
||||
// other fields like Expiry. We need Select() because Updates() alone skips
|
||||
// zero values, but we DO want to update IPv4/IPv6 to nil when removing them.
|
||||
// See issue #2862.
|
||||
err := tx.Model(node).Select("ipv4", "ipv6").Updates(node).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -27,9 +28,7 @@ const (
|
||||
NodeGivenNameTrimSize = 2
|
||||
)
|
||||
|
||||
var (
|
||||
invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
)
|
||||
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
@@ -234,6 +233,17 @@ func SetApprovedRoutes(
|
||||
return nil
|
||||
}
|
||||
|
||||
// When approving exit routes, ensure both IPv4 and IPv6 are included
|
||||
// If either 0.0.0.0/0 or ::/0 is being approved, both should be approved
|
||||
hasIPv4Exit := slices.Contains(routes, tsaddr.AllIPv4())
|
||||
hasIPv6Exit := slices.Contains(routes, tsaddr.AllIPv6())
|
||||
|
||||
if hasIPv4Exit && !hasIPv6Exit {
|
||||
routes = append(routes, tsaddr.AllIPv6())
|
||||
} else if hasIPv6Exit && !hasIPv4Exit {
|
||||
routes = append(routes, tsaddr.AllIPv4())
|
||||
}
|
||||
|
||||
b, err := json.Marshal(routes)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -442,13 +452,6 @@ func NodeSetMachineKey(
|
||||
}).Error
|
||||
}
|
||||
|
||||
// NodeSave saves a node object to the database, prefer to use a specific save method rather
|
||||
// than this. It is intended to be used when we are changing or.
|
||||
// TODO(kradalby): Remove this func, just use Save.
|
||||
func NodeSave(tx *gorm.DB, node *types.Node) error {
|
||||
return tx.Save(node).Error
|
||||
}
|
||||
|
||||
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
// Strip invalid DNS characters for givenName
|
||||
suppliedName = strings.ToLower(suppliedName)
|
||||
|
||||
@@ -476,7 +476,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), node.ApprovedRoutes, tt.routes)
|
||||
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), nodeTagged.ApprovedRoutes, tt.routes)
|
||||
if changed2 {
|
||||
err = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2)
|
||||
require.NoError(t, err)
|
||||
@@ -490,7 +490,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes1) == 0 {
|
||||
expectedRoutes1 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
@@ -501,7 +501,7 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||
if len(expectedRoutes2) == 0 {
|
||||
expectedRoutes2 = nil
|
||||
}
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
|
||||
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -126,9 +126,18 @@ func GetPreAuthKey(tx *gorm.DB, key string) (*types.PreAuthKey, error) {
|
||||
}
|
||||
|
||||
// DestroyPreAuthKey destroys a preauthkey. Returns error if the PreAuthKey
|
||||
// does not exist.
|
||||
// does not exist. This also clears the auth_key_id on any nodes that reference
|
||||
// this key.
|
||||
func DestroyPreAuthKey(tx *gorm.DB, pak types.PreAuthKey) error {
|
||||
return tx.Transaction(func(db *gorm.DB) error {
|
||||
// First, clear the foreign key reference on any nodes using this key
|
||||
if err := db.Model(&types.Node{}).
|
||||
Where("auth_key_id = ?", pak.ID).
|
||||
Update("auth_key_id", nil).Error; err != nil {
|
||||
return fmt.Errorf("failed to clear auth_key_id on nodes: %w", err)
|
||||
}
|
||||
|
||||
// Then delete the pre-auth key
|
||||
if result := db.Unscoped().Delete(pak); result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
@@ -143,13 +152,20 @@ func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) DeletePreAuthKey(k *types.PreAuthKey) error {
|
||||
return hsdb.Write(func(tx *gorm.DB) error {
|
||||
return DestroyPreAuthKey(tx, *k)
|
||||
})
|
||||
}
|
||||
|
||||
// UsePreAuthKey marks a PreAuthKey as used.
|
||||
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
|
||||
k.Used = true
|
||||
if err := tx.Save(k).Error; err != nil {
|
||||
err := tx.Model(k).Update("used", true).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update key used status in the database: %w", err)
|
||||
}
|
||||
|
||||
k.Used = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
45
hscontrol/db/testdata/sqlite/headscale_0.26.1_dump_schema-to-0.27.0-old-table-cleanup.sql
vendored
Normal file
45
hscontrol/db/testdata/sqlite/headscale_0.26.1_dump_schema-to-0.27.0-old-table-cleanup.sql
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
PRAGMA foreign_keys=OFF;
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
INSERT INTO migrations VALUES('202312101416');
|
||||
INSERT INTO migrations VALUES('202312101430');
|
||||
INSERT INTO migrations VALUES('202402151347');
|
||||
INSERT INTO migrations VALUES('2024041121742');
|
||||
INSERT INTO migrations VALUES('202406021630');
|
||||
INSERT INTO migrations VALUES('202409271400');
|
||||
INSERT INTO migrations VALUES('202407191627');
|
||||
INSERT INTO migrations VALUES('202408181235');
|
||||
INSERT INTO migrations VALUES('202501221827');
|
||||
INSERT INTO migrations VALUES('202501311657');
|
||||
INSERT INTO migrations VALUES('202502070949');
|
||||
INSERT INTO migrations VALUES('202502131714');
|
||||
INSERT INTO migrations VALUES('202502171819');
|
||||
INSERT INTO migrations VALUES('202505091439');
|
||||
INSERT INTO migrations VALUES('202505141324');
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
DELETE FROM sqlite_sequence;
|
||||
INSERT INTO sqlite_sequence VALUES('nodes',0);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
|
||||
-- Create all the old tables we have had and ensure they are clean up.
|
||||
CREATE TABLE `namespaces` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
|
||||
|
||||
CREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);
|
||||
CREATE INDEX `idx_namespaces_deleted_at` ON `namespaces`(`deleted_at`);
|
||||
CREATE INDEX `idx_shared_machines_deleted_at` ON `shared_machines`(`deleted_at`);
|
||||
|
||||
COMMIT;
|
||||
14
hscontrol/db/testdata/sqlite/headscale_0.26.1_schema-litestream.sql
vendored
Normal file
14
hscontrol/db/testdata/sqlite/headscale_0.26.1_schema-litestream.sql
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
CREATE TABLE _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);
|
||||
CREATE TABLE _litestream_lock (id INTEGER);
|
||||
134
hscontrol/db/user_update_test.go
Normal file
134
hscontrol/db/user_update_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestUserUpdatePreservesUnchangedFields verifies that updating a user
|
||||
// preserves fields that aren't modified. This test validates the fix
|
||||
// for using Updates() instead of Save() in UpdateUser-like operations.
|
||||
func TestUserUpdatePreservesUnchangedFields(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user with all fields set
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Test User Display",
|
||||
Email: "test@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-123",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, createdUser)
|
||||
|
||||
// Verify initial state
|
||||
assert.Equal(t, "testuser", createdUser.Name)
|
||||
assert.Equal(t, "Test User Display", createdUser.DisplayName)
|
||||
assert.Equal(t, "test@example.com", createdUser.Email)
|
||||
assert.True(t, createdUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-123", createdUser.ProviderIdentifier.String)
|
||||
|
||||
// Simulate what UpdateUser does: load user, modify one field, save
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify ONLY DisplayName
|
||||
user.DisplayName = "Updated Display Name"
|
||||
|
||||
// This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones
|
||||
err = tx.Save(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read user back from database
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that DisplayName was updated
|
||||
assert.Equal(t, "Updated Display Name", updatedUser.DisplayName)
|
||||
|
||||
// CRITICAL: Verify that other fields were NOT overwritten
|
||||
// With Save(), these assertions should pass because the user object
|
||||
// was loaded from DB and has all fields populated.
|
||||
// But if Updates() is used, these will also pass (and it's safer).
|
||||
assert.Equal(t, "testuser", updatedUser.Name, "Name should be preserved")
|
||||
assert.Equal(t, "test@example.com", updatedUser.Email, "Email should be preserved")
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid, "ProviderIdentifier should be preserved")
|
||||
assert.Equal(t, "provider-123", updatedUser.ProviderIdentifier.String, "ProviderIdentifier value should be preserved")
|
||||
}
|
||||
|
||||
// TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save()
|
||||
// works correctly and only updates modified fields.
|
||||
func TestUserUpdateWithUpdatesMethod(t *testing.T) {
|
||||
database := dbForTest(t)
|
||||
|
||||
// Create a user
|
||||
initialUser := types.User{
|
||||
Name: "testuser",
|
||||
DisplayName: "Original Display",
|
||||
Email: "original@example.com",
|
||||
ProviderIdentifier: sql.NullString{
|
||||
String: "provider-abc",
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
|
||||
createdUser, err := database.CreateUser(initialUser)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update using Updates() method
|
||||
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
|
||||
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify multiple fields
|
||||
user.DisplayName = "New Display"
|
||||
user.Email = "new@example.com"
|
||||
|
||||
// Use Updates() instead of Save()
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify changes
|
||||
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
|
||||
return GetUserByID(rx, types.UserID(createdUser.ID))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify updated fields
|
||||
assert.Equal(t, "New Display", updatedUser.DisplayName)
|
||||
assert.Equal(t, "new@example.com", updatedUser.Email)
|
||||
|
||||
// Verify preserved fields
|
||||
assert.Equal(t, "testuser", updatedUser.Name)
|
||||
assert.True(t, updatedUser.ProviderIdentifier.Valid)
|
||||
assert.Equal(t, "provider-abc", updatedUser.ProviderIdentifier.String)
|
||||
}
|
||||
@@ -102,7 +102,8 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
||||
|
||||
oldUser.Name = newName
|
||||
|
||||
if err := tx.Save(&oldUser).Error; err != nil {
|
||||
err = tx.Updates(&oldUser).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -126,7 +127,17 @@ func shuffleDERPMap(dm *tailcfg.DERPMap) {
|
||||
return
|
||||
}
|
||||
|
||||
for id, region := range dm.Regions {
|
||||
// Collect region IDs and sort them to ensure deterministic iteration order.
|
||||
// Map iteration order is non-deterministic in Go, which would cause the
|
||||
// shuffle to be non-deterministic even with a fixed seed.
|
||||
ids := make([]int, 0, len(dm.Regions))
|
||||
for id := range dm.Regions {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
slices.Sort(ids)
|
||||
|
||||
for _, id := range ids {
|
||||
region := dm.Regions[id]
|
||||
if len(region.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -83,9 +83,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sea",
|
||||
RegionName: "Seattle",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
@@ -93,9 +93,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sfo",
|
||||
RegionName: "San Francisco",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -169,6 +169,74 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with another base domain",
|
||||
baseDomain: "another.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with yet another base domain",
|
||||
baseDomain: "yetanother.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -206,6 +206,27 @@ func (api headscaleV1APIServer) ExpirePreAuthKey(
|
||||
return &v1.ExpirePreAuthKeyResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) DeletePreAuthKey(
|
||||
ctx context.Context,
|
||||
request *v1.DeletePreAuthKeyRequest,
|
||||
) (*v1.DeletePreAuthKeyResponse, error) {
|
||||
preAuthKey, err := api.h.state.GetPreAuthKey(request.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(preAuthKey.User.ID) != request.GetUser() {
|
||||
return nil, fmt.Errorf("preauth key does not belong to user")
|
||||
}
|
||||
|
||||
err = api.h.state.DeletePreAuthKey(preAuthKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.DeletePreAuthKeyResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ListPreAuthKeys(
|
||||
ctx context.Context,
|
||||
request *v1.ListPreAuthKeysRequest,
|
||||
@@ -273,13 +294,13 @@ func (api headscaleV1APIServer) RegisterNode(
|
||||
// ensure we send an update.
|
||||
// This works, but might be another good candidate for doing some sort of
|
||||
// eventbus.
|
||||
_ = api.h.state.AutoApproveRoutes(node)
|
||||
_, _, err = api.h.state.SaveNode(node)
|
||||
routeChange, err := api.h.state.AutoApproveRoutes(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
return nil, fmt.Errorf("auto approving routes: %w", err)
|
||||
}
|
||||
|
||||
api.h.Change(nodeChange)
|
||||
// Send both changes. Empty changes are ignored by Change().
|
||||
api.h.Change(nodeChange, routeChange)
|
||||
|
||||
return &v1.RegisterNodeResponse{Node: node.Proto()}, nil
|
||||
}
|
||||
@@ -416,9 +437,12 @@ func (api headscaleV1APIServer) ExpireNode(
|
||||
ctx context.Context,
|
||||
request *v1.ExpireNodeRequest,
|
||||
) (*v1.ExpireNodeResponse, error) {
|
||||
now := time.Now()
|
||||
expiry := time.Now()
|
||||
if request.GetExpiry() != nil {
|
||||
expiry = request.GetExpiry().AsTime()
|
||||
}
|
||||
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), now)
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), expiry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -201,6 +201,24 @@ func (h *Headscale) RobotsHandler(
|
||||
}
|
||||
}
|
||||
|
||||
// VersionHandler returns version information about the Headscale server
|
||||
// Listens in /version.
|
||||
func (h *Headscale) VersionHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
if err := json.NewEncoder(writer).Encode(versionInfo); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write version response")
|
||||
}
|
||||
}
|
||||
|
||||
var codeStyleRegisterWebAPI = styles.Props{
|
||||
styles.Display: "block",
|
||||
styles.Padding: "20px",
|
||||
|
||||
@@ -108,11 +108,12 @@ func TestTailNode(t *testing.T) {
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.0.0.0/10"),
|
||||
},
|
||||
},
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), netip.MustParsePrefix("192.168.0.0/24")},
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix("192.168.0.0/24")},
|
||||
CreatedAt: created,
|
||||
},
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
@@ -150,6 +151,7 @@ func TestTailNode(t *testing.T) {
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("172.0.0.0/10"),
|
||||
},
|
||||
|
||||
@@ -42,10 +42,6 @@ var (
|
||||
errOIDCAllowedUsers = errors.New(
|
||||
"authenticated principal does not match any allowed user",
|
||||
)
|
||||
errOIDCInvalidNodeState = errors.New(
|
||||
"requested node state key expired before authorisation completed",
|
||||
)
|
||||
errOIDCNodeKeyMissing = errors.New("could not get node key from cache")
|
||||
)
|
||||
|
||||
// RegistrationInfo contains both machine key and verifier information for OIDC validation.
|
||||
@@ -108,16 +104,8 @@ func (a *AuthProviderOIDC) AuthURL(registrationID types.RegistrationID) string {
|
||||
registrationID.String())
|
||||
}
|
||||
|
||||
func (a *AuthProviderOIDC) determineNodeExpiry(idTokenExpiration time.Time) time.Time {
|
||||
if a.cfg.UseExpiryFromToken {
|
||||
return idTokenExpiration
|
||||
}
|
||||
|
||||
return time.Now().Add(a.cfg.Expiry)
|
||||
}
|
||||
|
||||
// RegisterOIDC redirects to the OIDC provider for authentication
|
||||
// Puts NodeKey in cache so the callback can retrieve it using the oidc state param
|
||||
// RegisterHandler registers the OIDC callback handler with the given router.
|
||||
// It puts NodeKey in cache so the callback can retrieve it using the oidc state param.
|
||||
// Listens in /register/:registration_id.
|
||||
func (a *AuthProviderOIDC) RegisterHandler(
|
||||
writer http.ResponseWriter,
|
||||
@@ -213,7 +201,8 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
cookieState, err := req.Cookie("state")
|
||||
stateCookieName := getCookieName("state", state)
|
||||
cookieState, err := req.Cookie(stateCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err))
|
||||
return
|
||||
@@ -235,8 +224,13 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
if idToken.Nonce == "" {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err))
|
||||
return
|
||||
}
|
||||
|
||||
nonce, err := req.Cookie("nonce")
|
||||
nonceCookieName := getCookieName("nonce", idToken.Nonce)
|
||||
nonce, err := req.Cookie(nonceCookieName)
|
||||
if err != nil {
|
||||
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err))
|
||||
return
|
||||
@@ -298,7 +292,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
user, c, err := a.createOrUpdateUserFromClaim(&claims)
|
||||
user, _, err := a.createOrUpdateUserFromClaim(&claims)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
@@ -317,9 +311,6 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
return
|
||||
}
|
||||
|
||||
// Send policy update notifications if needed
|
||||
a.h.Change(c)
|
||||
|
||||
// TODO(kradalby): Is this comment right?
|
||||
// If the node exists, then the node should be reauthenticated,
|
||||
// if the node does not exist, and the machine key exists, then
|
||||
@@ -366,6 +357,14 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", nil))
|
||||
}
|
||||
|
||||
func (a *AuthProviderOIDC) determineNodeExpiry(idTokenExpiration time.Time) time.Time {
|
||||
if a.cfg.UseExpiryFromToken {
|
||||
return idTokenExpiration
|
||||
}
|
||||
|
||||
return time.Now().Add(a.cfg.Expiry)
|
||||
}
|
||||
|
||||
func extractCodeAndStateParamFromRequest(
|
||||
req *http.Request,
|
||||
) (string, string, error) {
|
||||
@@ -498,8 +497,8 @@ func (a *AuthProviderOIDC) createOrUpdateUserFromClaim(
|
||||
}
|
||||
|
||||
// if the user is still not found, create a new empty user.
|
||||
// TODO(kradalby): This might cause us to not have an ID below which
|
||||
// is a problem.
|
||||
// TODO(kradalby): This context is not inherited from the request, which is probably not ideal.
|
||||
// However, we need a context to use the OIDC provider.
|
||||
if user == nil {
|
||||
newUser = true
|
||||
user = &types.User{}
|
||||
@@ -551,18 +550,13 @@ func (a *AuthProviderOIDC) handleRegistration(
|
||||
// ensure we send an update.
|
||||
// This works, but might be another good candidate for doing some sort of
|
||||
// eventbus.
|
||||
_ = a.h.state.AutoApproveRoutes(node)
|
||||
_, policyChange, err := a.h.state.SaveNode(node)
|
||||
routesChange, err := a.h.state.AutoApproveRoutes(node)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("saving auto approved routes to node: %w", err)
|
||||
return false, fmt.Errorf("auto approving routes: %w", err)
|
||||
}
|
||||
|
||||
// Policy updates are full and take precedence over node changes.
|
||||
if !policyChange.Empty() {
|
||||
a.h.Change(policyChange)
|
||||
} else {
|
||||
a.h.Change(nodeChange)
|
||||
}
|
||||
// Send both changes. Empty changes are ignored by Change().
|
||||
a.h.Change(nodeChange, routesChange)
|
||||
|
||||
return !nodeChange.Empty(), nil
|
||||
}
|
||||
@@ -584,6 +578,11 @@ func renderOIDCCallbackTemplate(
|
||||
return &content, nil
|
||||
}
|
||||
|
||||
// getCookieName generates a unique cookie name based on a cookie value.
|
||||
func getCookieName(baseName, value string) string {
|
||||
return fmt.Sprintf("%s_%s", baseName, value[:6])
|
||||
}
|
||||
|
||||
func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {
|
||||
val, err := util.GenerateRandomStringURLSafe(64)
|
||||
if err != nil {
|
||||
@@ -592,7 +591,7 @@ func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string,
|
||||
|
||||
c := &http.Cookie{
|
||||
Path: "/oidc/callback",
|
||||
Name: name,
|
||||
Name: getCookieName(name, val),
|
||||
Value: val,
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
Secure: r.TLS != nil,
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -91,3 +92,12 @@ func (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
|
||||
}
|
||||
|
||||
// DestsIsTheInternet reports if the destination is equal to "the internet"
|
||||
// which is a IPSet that represents "autogroup:internet" and is special
|
||||
// cased for exit nodes.
|
||||
func (m Match) DestsIsTheInternet() bool {
|
||||
return m.dests.Equal(util.TheInternet()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
|
||||
m.dests.ContainsPrefix(tsaddr.AllIPv6())
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -782,12 +783,287 @@ func TestReduceNodes(t *testing.T) {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff)
|
||||
t.Errorf("ReduceNodes() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {
|
||||
var routes []netip.Prefix
|
||||
for _, route := range routess {
|
||||
routes = append(routes, netip.MustParsePrefix(route))
|
||||
}
|
||||
|
||||
return &types.Node{
|
||||
ID: id,
|
||||
IPv4: ap(ip),
|
||||
Hostname: hostname,
|
||||
User: types.User{Name: username},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: routes,
|
||||
},
|
||||
ApprovedRoutes: routes,
|
||||
}
|
||||
}
|
||||
|
||||
type args struct {
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes types.Nodes
|
||||
policy string
|
||||
node *types.Node
|
||||
want types.Nodes
|
||||
wantMatchers int
|
||||
}{
|
||||
{
|
||||
name: "2788-exit-node-too-visible",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
},
|
||||
wantMatchers: 1,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-autogroup:internet",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-0000-route",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"0.0.0.0/0:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2788-exit-node-::0-route",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"server:80"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"mobile"
|
||||
],
|
||||
"dst": [
|
||||
"::0/0:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "mobile", "mobile"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "server", "server"),
|
||||
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
{
|
||||
name: "2784-split-exit-node-access",
|
||||
nodes: types.Nodes{
|
||||
n(1, "100.64.0.1", "user", "user"),
|
||||
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
|
||||
n(3, "100.64.0.3", "exit2", "exit", "0.0.0.0/0", "::/0"),
|
||||
n(4, "100.64.0.4", "otheruser", "otheruser"),
|
||||
},
|
||||
policy: `
|
||||
{
|
||||
"hosts": {
|
||||
"user": "100.64.0.1/32",
|
||||
"exit1": "100.64.0.2/32",
|
||||
"exit2": "100.64.0.3/32",
|
||||
"otheruser": "100.64.0.4/32",
|
||||
},
|
||||
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"user"
|
||||
],
|
||||
"dst": [
|
||||
"exit1:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"otheruser"
|
||||
],
|
||||
"dst": [
|
||||
"exit2:*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
node: n(1, "100.64.0.1", "user", "user"),
|
||||
want: types.Nodes{
|
||||
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
|
||||
},
|
||||
wantMatchers: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
pm, err = pmf(nil, tt.nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
matchers, err := pm.MatchersForNode(tt.node.View())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, matchers, tt.wantMatchers)
|
||||
|
||||
gotViews := ReduceNodes(
|
||||
tt.node.View(),
|
||||
tt.nodes.ViewSlice(),
|
||||
matchers,
|
||||
)
|
||||
// Convert views back to nodes for comparison in tests
|
||||
var got types.Nodes
|
||||
for _, v := range gotViews.All() {
|
||||
got = append(got, v.AsStruct())
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("TestReduceNodesFromPolicy() unexpected result (-want +got):\n%s", diff)
|
||||
t.Log("Matchers: ")
|
||||
for _, m := range matchers {
|
||||
t.Log("\t+", m.DebugString())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHPolicyRules(t *testing.T) {
|
||||
users := []types.User{
|
||||
{Name: "user1", Model: gorm.Model{ID: 1}},
|
||||
@@ -1077,6 +1353,55 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "2863-allow-predefined-missing-users",
|
||||
targetNode: taggedClient,
|
||||
peers: types.Nodes{&nodeUser2},
|
||||
policy: `{
|
||||
"groups": {
|
||||
"group:example-infra": [
|
||||
"user2@",
|
||||
"not-created-yet@",
|
||||
],
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:client": [
|
||||
"user2@"
|
||||
],
|
||||
},
|
||||
"ssh": [
|
||||
// Allow infra to ssh to tag:example-infra server as debian
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"group:example-infra"
|
||||
],
|
||||
"dst": [
|
||||
"tag:client",
|
||||
],
|
||||
"users": [
|
||||
"debian",
|
||||
],
|
||||
},
|
||||
],
|
||||
}`,
|
||||
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
|
||||
{
|
||||
Principals: []*tailcfg.SSHPrincipal{
|
||||
{NodeIP: "100.64.0.2"},
|
||||
},
|
||||
SSHUsers: map[string]string{
|
||||
"debian": "debian",
|
||||
},
|
||||
Action: &tailcfg.SSHAction{
|
||||
Accept: true,
|
||||
AllowAgentForwarding: true,
|
||||
AllowLocalPortForwarding: true,
|
||||
AllowRemotePortForwarding: true,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -99,14 +99,16 @@ func (pol *Policy) compileFilterRulesForNode(
|
||||
return nil, ErrInvalidAction
|
||||
}
|
||||
|
||||
rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
aclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("compiling ACL")
|
||||
continue
|
||||
}
|
||||
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
for _, rule := range aclRules {
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,27 +117,32 @@ func (pol *Policy) compileFilterRulesForNode(
|
||||
|
||||
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
|
||||
// autogroup:self per-node while supporting all other alias types normally.
|
||||
// It returns a slice of filter rules because when an ACL has both autogroup:self
|
||||
// and other destinations, they need to be split into separate rules with different
|
||||
// source filtering logic.
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) (*tailcfg.FilterRule, error) {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
) ([]*tailcfg.FilterRule, error) {
|
||||
var autogroupSelfDests []AliasWithPorts
|
||||
var otherDests []AliasWithPorts
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
autogroupSelfDests = append(autogroupSelfDests, dest)
|
||||
} else {
|
||||
otherDests = append(otherDests, dest)
|
||||
}
|
||||
}
|
||||
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
var rules []*tailcfg.FilterRule
|
||||
|
||||
var resolvedSrcIPs []*netipx.IPSet
|
||||
|
||||
// Resolve sources to only include devices from the same user as the target node.
|
||||
for _, src := range acl.Sources {
|
||||
// autogroup:self is not allowed in sources
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
|
||||
}
|
||||
@@ -147,92 +154,121 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
}
|
||||
|
||||
if ips != nil {
|
||||
if hasAutogroupSelfInDst {
|
||||
// Instead of iterating all addresses (which could be millions),
|
||||
// check each node's IPs against the source set
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No autogroup:self in destination, use all resolved sources
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
resolvedSrcIPs = append(resolvedSrcIPs, ips)
|
||||
}
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(resolvedSrcIPs) == 0 {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
if srcSet == nil || len(srcSet.Prefixes()) == 0 {
|
||||
// No sources resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
// Handle autogroup:self destinations (if any)
|
||||
if len(autogroupSelfDests) > 0 {
|
||||
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
if len(sameUserNodes) > 0 {
|
||||
// Filter sources to only same-user untagged devices
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
for _, ips := range resolvedSrcIPs {
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
for _, dest := range autogroupSelfDests {
|
||||
for _, n := range sameUserNodes {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
destPorts = append(destPorts, tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
|
||||
if len(destPorts) > 0 {
|
||||
rules = append(rules, &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) == 0 {
|
||||
// No destinations resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
if len(otherDests) > 0 {
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
|
||||
for _, ips := range resolvedSrcIPs {
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range otherDests {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) > 0 {
|
||||
rules = append(rules, &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
}, nil
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
||||
@@ -260,46 +296,29 @@ func (pol *Policy) compileSSHPolicy(
|
||||
var rules []*tailcfg.SSHRule
|
||||
|
||||
for index, rule := range pol.SSHs {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
// Separate destinations into autogroup:self and others
|
||||
// This is needed because autogroup:self requires filtering sources to same-user only,
|
||||
// while other destinations should use all resolved sources
|
||||
var autogroupSelfDests []Alias
|
||||
var otherDests []Alias
|
||||
|
||||
for _, dst := range rule.Destinations {
|
||||
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If autogroup:self is used, skip tagged nodes
|
||||
if hasAutogroupSelfInDst && node.IsTagged() {
|
||||
continue
|
||||
}
|
||||
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, src := range rule.Destinations {
|
||||
// Handle autogroup:self specially
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
// For autogroup:self, only include the target user's untagged devices
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
autogroupSelfDests = append(autogroupSelfDests, dst)
|
||||
} else {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
otherDests = append(otherDests, dst)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
// Note: Tagged nodes can't match autogroup:self destinations, but can still match other destinations
|
||||
|
||||
// Resolve sources once - we'll use them differently for each destination type
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
}
|
||||
|
||||
if !node.InIPSet(destSet) {
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -313,50 +332,9 @@ func (pol *Policy) compileSSHPolicy(
|
||||
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
|
||||
}
|
||||
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
|
||||
continue // Skip this rule if we can't resolve sources
|
||||
}
|
||||
|
||||
// If autogroup:self is in destinations, filter sources to same user only
|
||||
if hasAutogroupSelfInDst {
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
// Instead of iterating all addresses, check each node's IPs
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcIPs, err = filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
// No valid sources after filtering, skip this rule
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(rule.Users))
|
||||
if rule.Users.ContainsNonRoot() {
|
||||
userMap["*"] = "="
|
||||
|
||||
// by default, we do not allow root unless explicitly stated
|
||||
userMap["root"] = ""
|
||||
}
|
||||
@@ -366,11 +344,108 @@ func (pol *Policy) compileSSHPolicy(
|
||||
for _, u := range rule.Users.NormalUsers() {
|
||||
userMap[u.String()] = u.String()
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
|
||||
// Handle autogroup:self destinations (if any)
|
||||
// Note: Tagged nodes can't match autogroup:self, so skip this block for tagged nodes
|
||||
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
|
||||
// Build destination set for autogroup:self (same-user untagged devices only)
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only create rule if this node is in the destination set
|
||||
if node.InIPSet(destSet) {
|
||||
// Filter sources to only same-user untagged devices
|
||||
// Pre-filter to same-user untagged devices for efficiency
|
||||
sameUserNodes := make([]types.NodeView, 0)
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
sameUserNodes = append(sameUserNodes, n)
|
||||
}
|
||||
}
|
||||
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filteredSrcSet, err := filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if filteredSrcSet != nil && len(filteredSrcSet.Prefixes()) > 0 {
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
for addr := range util.IPSetAddrIter(filteredSrcSet) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
if len(principals) > 0 {
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle other destinations (if any)
|
||||
if len(otherDests) > 0 {
|
||||
// Build destination set for other destinations
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, dst := range otherDests {
|
||||
ips, err := dst.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
if ips != nil {
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only create rule if this node is in the destination set
|
||||
if node.InIPSet(destSet) {
|
||||
// For non-autogroup:self destinations, use all resolved sources (no filtering)
|
||||
var principals []*tailcfg.SSHPrincipal
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
})
|
||||
}
|
||||
|
||||
if len(principals) > 0 {
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &tailcfg.SSHPolicy{
|
||||
|
||||
@@ -1339,3 +1339,70 @@ func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {
|
||||
assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfAndMixedDestinations tests that SSH rules can have both
|
||||
// autogroup:self and other destinations (like tag:router) in the same rule, and that
|
||||
// autogroup:self filtering only applies to autogroup:self destinations, not others.
|
||||
func TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-device"},
|
||||
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-device2"},
|
||||
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-device"},
|
||||
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-router", ForcedTags: []string{"tag:router"}},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:router"): Owners{up("user2@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:member")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self"), tp("tag:router")},
|
||||
Users: []SSHUser{"admin"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test 1: Compile for user1's device (should only match autogroup:self destination)
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy1, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy1)
|
||||
require.Len(t, sshPolicy1.Rules, 1, "user1's device should have 1 SSH rule (autogroup:self)")
|
||||
|
||||
// Verify autogroup:self rule has filtered sources (only same-user devices)
|
||||
selfRule := sshPolicy1.Rules[0]
|
||||
require.Len(t, selfRule.Principals, 2, "autogroup:self rule should only have user1's devices")
|
||||
selfPrincipals := make([]string, len(selfRule.Principals))
|
||||
for i, p := range selfRule.Principals {
|
||||
selfPrincipals[i] = p.NodeIP
|
||||
}
|
||||
require.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, selfPrincipals,
|
||||
"autogroup:self rule should only include same-user untagged devices")
|
||||
|
||||
// Test 2: Compile for router (should only match tag:router destination)
|
||||
routerNode := nodes[3].View() // user2-router
|
||||
sshPolicyRouter, err := policy.compileSSHPolicy(users, routerNode, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicyRouter)
|
||||
require.Len(t, sshPolicyRouter.Rules, 1, "router should have 1 SSH rule (tag:router)")
|
||||
|
||||
routerRule := sshPolicyRouter.Rules[0]
|
||||
routerPrincipals := make([]string, len(routerRule.Principals))
|
||||
for i, p := range routerRule.Principals {
|
||||
routerPrincipals[i] = p.NodeIP
|
||||
}
|
||||
require.Contains(t, routerPrincipals, "100.64.0.1", "router rule should include user1's device (unfiltered sources)")
|
||||
require.Contains(t, routerPrincipals, "100.64.0.2", "router rule should include user1's other device (unfiltered sources)")
|
||||
require.Contains(t, routerPrincipals, "100.64.0.3", "router rule should include user2's device (unfiltered sources)")
|
||||
}
|
||||
|
||||
@@ -47,6 +47,14 @@ type PolicyManager struct {
|
||||
usesAutogroupSelf bool
|
||||
}
|
||||
|
||||
// filterAndPolicy combines the compiled filter rules with policy content for hashing.
|
||||
// This ensures filterHash changes when policy changes, even for autogroup:self where
|
||||
// the compiled filter is always empty.
|
||||
type filterAndPolicy struct {
|
||||
Filter []tailcfg.FilterRule
|
||||
Policy *Policy
|
||||
}
|
||||
|
||||
// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.
|
||||
// It returns an error if the policy file is invalid.
|
||||
// The policy manager will update the filter rules based on the users and nodes.
|
||||
@@ -77,14 +85,6 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
|
||||
// updateLocked updates the filter rules based on the current policy and nodes.
|
||||
// It must be called with the lock held.
|
||||
func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
// Clear the SSH policy map to ensure it's recalculated with the new policy.
|
||||
// TODO(kradalby): This could potentially be optimized by only clearing the
|
||||
// policies for nodes that have changed. Particularly if the only difference is
|
||||
// that nodes has been added or removed.
|
||||
clear(pm.sshPolicyMap)
|
||||
clear(pm.compiledFilterRulesMap)
|
||||
clear(pm.filterRulesMap)
|
||||
|
||||
// Check if policy uses autogroup:self
|
||||
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
|
||||
|
||||
@@ -98,7 +98,14 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
return false, fmt.Errorf("compiling filter rules: %w", err)
|
||||
}
|
||||
|
||||
filterHash := deephash.Hash(&filter)
|
||||
// Hash both the compiled filter AND the policy content together.
|
||||
// This ensures filterHash changes when policy changes, even for autogroup:self
|
||||
// where the compiled filter is always empty. This eliminates the need for
|
||||
// a separate policyHash field.
|
||||
filterHash := deephash.Hash(&filterAndPolicy{
|
||||
Filter: filter,
|
||||
Policy: pm.pol,
|
||||
})
|
||||
filterChanged := filterHash != pm.filterHash
|
||||
if filterChanged {
|
||||
log.Debug().
|
||||
@@ -164,8 +171,27 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
pm.exitSet = exitSet
|
||||
pm.exitSetHash = exitSetHash
|
||||
|
||||
// If neither of the calculated values changed, no need to update nodes
|
||||
if !filterChanged && !tagOwnerChanged && !autoApproveChanged && !exitSetChanged {
|
||||
// Determine if we need to send updates to nodes
|
||||
// filterChanged now includes policy content changes (via combined hash),
|
||||
// so it will detect changes even for autogroup:self where compiled filter is empty
|
||||
needsUpdate := filterChanged || tagOwnerChanged || autoApproveChanged || exitSetChanged
|
||||
|
||||
// Only clear caches if we're actually going to send updates
|
||||
// This prevents clearing caches when nothing changed, which would leave nodes
|
||||
// with stale filters until they reconnect. This is critical for autogroup:self
|
||||
// where even reloading the same policy would clear caches but not send updates.
|
||||
if needsUpdate {
|
||||
// Clear the SSH policy map to ensure it's recalculated with the new policy.
|
||||
// TODO(kradalby): This could potentially be optimized by only clearing the
|
||||
// policies for nodes that have changed. Particularly if the only difference is
|
||||
// that nodes has been added or removed.
|
||||
clear(pm.sshPolicyMap)
|
||||
clear(pm.compiledFilterRulesMap)
|
||||
clear(pm.filterRulesMap)
|
||||
}
|
||||
|
||||
// If nothing changed, no need to update nodes
|
||||
if !needsUpdate {
|
||||
log.Trace().
|
||||
Msg("Policy evaluation detected no changes - all hashes match")
|
||||
return false, nil
|
||||
@@ -491,10 +517,16 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
|
||||
// For global policies: the filter must be recompiled to include the new nodes.
|
||||
if nodesChanged {
|
||||
// Recompile filter with the new node list
|
||||
_, err := pm.updateLocked()
|
||||
needsUpdate, err := pm.updateLocked()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !needsUpdate {
|
||||
// This ensures fresh filter rules are generated for all nodes
|
||||
clear(pm.sshPolicyMap)
|
||||
clear(pm.compiledFilterRulesMap)
|
||||
clear(pm.filterRulesMap)
|
||||
}
|
||||
// Always return true when nodes changed, even if filter hash didn't change
|
||||
// (can happen with autogroup:self or when nodes are added but don't affect rules)
|
||||
return true, nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -439,3 +440,168 @@ func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
|
||||
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
|
||||
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
|
||||
}
|
||||
|
||||
// When separate ACL rules exist (one with autogroup:self, one with tag:router),
|
||||
// the autogroup:self rule should not prevent the tag:router rule from working.
|
||||
// This ensures that autogroup:self doesn't interfere with other ACL rules.
|
||||
func TestAutogroupSelfWithOtherRules(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
|
||||
}
|
||||
|
||||
// test-1 has a regular device
|
||||
test1Node := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "test-1-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[0],
|
||||
UserID: users[0].ID,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// test-2 has a router device with tag:node-router
|
||||
test2RouterNode := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "test-2-router",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[1],
|
||||
UserID: users[1].ID,
|
||||
ForcedTags: []string{"tag:node-router"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{test1Node, test2RouterNode}
|
||||
|
||||
// This matches the exact policy from issue #2838:
|
||||
// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)
|
||||
// - Second rule: group:home -> tag:node-router (should allow group members to see router)
|
||||
policy := `{
|
||||
"groups": {
|
||||
"group:home": ["test-1@example.com", "test-2@example.com"]
|
||||
},
|
||||
"tagOwners": {
|
||||
"tag:node-router": ["group:home"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:home"],
|
||||
"dst": ["tag:node-router:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// test-1 (in group:home) should see:
|
||||
// 1. Their own node (from autogroup:self rule)
|
||||
// 2. The router node (from group:home -> tag:node-router rule)
|
||||
test1Peers := peerMap[test1Node.ID]
|
||||
|
||||
// Verify test-1 can see the router (group:home -> tag:node-router rule)
|
||||
require.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {
|
||||
return n.ID() == test2RouterNode.ID
|
||||
}), "test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)")
|
||||
|
||||
// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)
|
||||
rules, err := pm.FilterForNode(test1Node.View())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rules, "test-1 should have filter rules from both ACL rules")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfPolicyUpdateTriggersMapResponse verifies that when a policy with
|
||||
// autogroup:self is updated, SetPolicy returns true to trigger MapResponse updates,
|
||||
// even if the global filter hash didn't change (which is always empty for autogroup:self).
|
||||
// This fixes the issue where policy updates would clear caches but not trigger updates,
|
||||
// leaving nodes with stale filter rules until reconnect.
|
||||
func TestAutogroupSelfPolicyUpdateTriggersMapResponse(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
|
||||
}
|
||||
|
||||
test1Node := &types.Node{
|
||||
ID: 1,
|
||||
Hostname: "test-1-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[0],
|
||||
UserID: users[0].ID,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
test2Node := &types.Node{
|
||||
ID: 2,
|
||||
Hostname: "test-2-device",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[1],
|
||||
UserID: users[1].ID,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{test1Node, test2Node}
|
||||
|
||||
// Initial policy with autogroup:self
|
||||
initialPolicy := `{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(initialPolicy), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
|
||||
|
||||
// Get initial filter rules for test-1 (should be cached)
|
||||
rules1, err := pm.FilterForNode(test1Node.View())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rules1, "test-1 should have filter rules")
|
||||
|
||||
// Update policy with a different ACL that still results in empty global filter
|
||||
// (only autogroup:self rules, which compile to empty global filter)
|
||||
// We add a comment/description change by adding groups (which don't affect filter compilation)
|
||||
updatedPolicy := `{
|
||||
"groups": {
|
||||
"group:test": ["test-1@example.com"]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
// SetPolicy should return true even though global filter hash didn't change
|
||||
policyChanged, err := pm.SetPolicy([]byte(updatedPolicy))
|
||||
require.NoError(t, err)
|
||||
require.True(t, policyChanged, "SetPolicy should return true when policy content changes, even if global filter hash unchanged (autogroup:self)")
|
||||
|
||||
// Verify that caches were cleared and new rules are generated
|
||||
// The cache should be empty, so FilterForNode will recompile
|
||||
rules2, err := pm.FilterForNode(test1Node.View())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rules2, "test-1 should have filter rules after policy update")
|
||||
|
||||
// Verify that the policy hash tracking works - a second identical update should return false
|
||||
policyChanged2, err := pm.SetPolicy([]byte(updatedPolicy))
|
||||
require.NoError(t, err)
|
||||
require.False(t, policyChanged2, "SetPolicy should return false when policy content hasn't changed")
|
||||
}
|
||||
|
||||
@@ -300,7 +300,9 @@ func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Save(user).Error; err != nil {
|
||||
// Use Updates() to only update modified fields, preserving unchanged values.
|
||||
err = tx.Updates(user).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating user: %w", err)
|
||||
}
|
||||
|
||||
@@ -386,7 +388,11 @@ func (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Cha
|
||||
|
||||
nodePtr := node.AsStruct()
|
||||
|
||||
if err := s.db.DB.Save(nodePtr).Error; err != nil {
|
||||
// Use Omit("expiry") to prevent overwriting expiry during MapRequest updates.
|
||||
// Expiry should only be updated through explicit SetNodeExpiry calls or re-registration.
|
||||
// See: https://github.com/juanfont/headscale/issues/2862
|
||||
err := s.db.DB.Omit("expiry").Updates(nodePtr).Error
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("saving node: %w", err)
|
||||
}
|
||||
|
||||
@@ -456,9 +462,9 @@ func (s *State) Connect(id types.NodeID) []change.ChangeSet {
|
||||
log.Info().Uint64("node.id", id.Uint64()).Str("node.name", node.Hostname()).Msg("Node connected")
|
||||
|
||||
// Use the node's current routes for primary route update
|
||||
// SubnetRoutes() returns only the intersection of announced AND approved routes
|
||||
// We MUST use SubnetRoutes() to maintain the security model
|
||||
routeChange := s.primaryRoutes.SetRoutes(id, node.SubnetRoutes()...)
|
||||
// AllApprovedRoutes() returns only the intersection of announced AND approved routes
|
||||
// We MUST use AllApprovedRoutes() to maintain the security model
|
||||
routeChange := s.primaryRoutes.SetRoutes(id, node.AllApprovedRoutes()...)
|
||||
|
||||
if routeChange {
|
||||
c = append(c, change.NodeAdded(id))
|
||||
@@ -656,7 +662,7 @@ func (s *State) SetApprovedRoutes(nodeID types.NodeID, routes []netip.Prefix) (t
|
||||
// Update primary routes table based on SubnetRoutes (intersection of announced and approved).
|
||||
// The primary routes table is what the mapper uses to generate network maps, so updating it
|
||||
// here ensures that route changes are distributed to peers.
|
||||
routeChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.SubnetRoutes()...)
|
||||
routeChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.AllApprovedRoutes()...)
|
||||
|
||||
// If routes changed or the changeset isn't already a full update, trigger a policy change
|
||||
// to ensure all nodes get updated network maps
|
||||
@@ -821,7 +827,7 @@ func (s *State) SetPolicy(pol []byte) (bool, error) {
|
||||
|
||||
// AutoApproveRoutes checks if a node's routes should be auto-approved.
|
||||
// AutoApproveRoutes checks if any routes should be auto-approved for a node and updates them.
|
||||
func (s *State) AutoApproveRoutes(nv types.NodeView) bool {
|
||||
func (s *State) AutoApproveRoutes(nv types.NodeView) (change.ChangeSet, error) {
|
||||
approved, changed := policy.ApproveRoutesWithPolicy(s.polMan, nv, nv.ApprovedRoutes().AsSlice(), nv.AnnouncedRoutes())
|
||||
if changed {
|
||||
log.Debug().
|
||||
@@ -834,7 +840,7 @@ func (s *State) AutoApproveRoutes(nv types.NodeView) bool {
|
||||
|
||||
// Persist the auto-approved routes to database and NodeStore via SetApprovedRoutes
|
||||
// This ensures consistency between database and NodeStore
|
||||
_, _, err := s.SetApprovedRoutes(nv.ID(), approved)
|
||||
_, c, err := s.SetApprovedRoutes(nv.ID(), approved)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Uint64("node.id", nv.ID().Uint64()).
|
||||
@@ -842,13 +848,15 @@ func (s *State) AutoApproveRoutes(nv types.NodeView) bool {
|
||||
Err(err).
|
||||
Msg("Failed to persist auto-approved routes")
|
||||
|
||||
return false
|
||||
return change.EmptySet, err
|
||||
}
|
||||
|
||||
log.Info().Uint64("node.id", nv.ID().Uint64()).Str("node.name", nv.Hostname()).Strs("routes.approved", util.PrefixesToString(approved)).Msg("Routes approved")
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return changed
|
||||
return change.EmptySet, nil
|
||||
}
|
||||
|
||||
// GetPolicy retrieves the current policy from the database.
|
||||
@@ -964,6 +972,11 @@ func (s *State) ExpirePreAuthKey(preAuthKey *types.PreAuthKey) error {
|
||||
return s.db.ExpirePreAuthKey(preAuthKey)
|
||||
}
|
||||
|
||||
// DeletePreAuthKey permanently deletes a pre-authentication key.
|
||||
func (s *State) DeletePreAuthKey(preAuthKey *types.PreAuthKey) error {
|
||||
return s.db.DeletePreAuthKey(preAuthKey)
|
||||
}
|
||||
|
||||
// GetRegistrationCacheEntry retrieves a node registration from cache.
|
||||
func (s *State) GetRegistrationCacheEntry(id types.RegistrationID) (*types.RegisterNode, bool) {
|
||||
entry, found := s.registrationCache.Get(id)
|
||||
@@ -1187,9 +1200,10 @@ func (s *State) HandleNodeFromAuthPath(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -1294,9 +1308,53 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
// Check if node exists with same machine key before validating the key.
|
||||
// For #2830: container restarts send the same pre-auth key which may be used/expired.
|
||||
// Skip validation for existing nodes re-registering with the same NodeKey, as the
|
||||
// key was only needed for initial authentication. NodeKey rotation requires validation.
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// For existing nodes, skip validation if:
|
||||
// 1. MachineKey matches (cryptographic proof of machine identity)
|
||||
// 2. User matches (from the PAK being used)
|
||||
// 3. Not a NodeKey rotation (rotation requires fresh validation)
|
||||
//
|
||||
// Security: MachineKey is the cryptographic identity. If someone has the MachineKey,
|
||||
// they control the machine. The PAK was only needed to authorize initial join.
|
||||
// We don't check which specific PAK was used originally because:
|
||||
// - Container restarts may use different PAKs (e.g., env var changed)
|
||||
// - Original PAK may be deleted
|
||||
// - MachineKey + User is sufficient to prove this is the same node
|
||||
isExistingNodeReregistering := existsSameUser && existingNodeSameUser.Valid()
|
||||
|
||||
// Check if this is a NodeKey rotation (different NodeKey)
|
||||
isNodeKeyRotation := existsSameUser && existingNodeSameUser.Valid() &&
|
||||
existingNodeSameUser.NodeKey() != regReq.NodeKey
|
||||
|
||||
if isExistingNodeReregistering && !isNodeKeyRotation {
|
||||
// Existing node re-registering with same NodeKey: skip validation.
|
||||
// Pre-auth keys are only needed for initial authentication. Critical for
|
||||
// containers that run "tailscale up --authkey=KEY" on every restart.
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", existingNodeSameUser.ID().Uint64()).
|
||||
Str("node.name", existingNodeSameUser.Hostname()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key.existing", existingNodeSameUser.NodeKey().ShortString()).
|
||||
Str("node.key.request", regReq.NodeKey.ShortString()).
|
||||
Uint64("authkey.id", pak.ID).
|
||||
Bool("authkey.used", pak.Used).
|
||||
Bool("authkey.expired", pak.Expiration != nil && pak.Expiration.Before(time.Now())).
|
||||
Bool("authkey.reusable", pak.Reusable).
|
||||
Bool("nodekey.rotation", isNodeKeyRotation).
|
||||
Msg("Existing node re-registering with same NodeKey and auth key, skipping validation")
|
||||
|
||||
} else {
|
||||
// New node or NodeKey rotation: require valid auth key.
|
||||
err = pak.Validate()
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.EmptySet, err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname - handle nil/empty cases
|
||||
@@ -1328,9 +1386,6 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
|
||||
var finalNode types.NodeView
|
||||
|
||||
// Check if node already exists with same machine key for this user
|
||||
existingNodeSameUser, existsSameUser := s.nodeStore.GetNodeByMachineKey(machineKey, types.UserID(pak.User.ID))
|
||||
|
||||
// If this node exists for this user, update the node in place.
|
||||
if existsSameUser && existingNodeSameUser.Valid() {
|
||||
log.Trace().
|
||||
@@ -1372,9 +1427,10 @@ func (s *State) HandleNodeFromPreAuthKey(
|
||||
return types.NodeView{}, change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", existingNodeSameUser.ID())
|
||||
}
|
||||
|
||||
// Use the node from UpdateNode to save to database
|
||||
_, err = hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.Node, error) {
|
||||
if err := tx.Save(updatedNodeView.AsStruct()).Error; err != nil {
|
||||
// Use Updates() to preserve fields not modified by UpdateNode.
|
||||
err := tx.Updates(updatedNodeView.AsStruct()).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save node: %w", err)
|
||||
}
|
||||
|
||||
@@ -1583,6 +1639,7 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
var routeChange bool
|
||||
var hostinfoChanged bool
|
||||
var needsRouteApproval bool
|
||||
var autoApprovedRoutes []netip.Prefix
|
||||
// We need to ensure we update the node as it is in the NodeStore at
|
||||
// the time of the request.
|
||||
updatedNode, ok := s.nodeStore.UpdateNode(id, func(currentNode *types.Node) {
|
||||
@@ -1607,7 +1664,6 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
}
|
||||
|
||||
// Calculate route approval before NodeStore update to avoid calling View() inside callback
|
||||
var autoApprovedRoutes []netip.Prefix
|
||||
var hasNewRoutes bool
|
||||
if hi := req.Hostinfo; hi != nil {
|
||||
hasNewRoutes = len(hi.RoutableIPs) > 0
|
||||
@@ -1673,7 +1729,6 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
Strs("newApprovedRoutes", util.PrefixesToString(autoApprovedRoutes)).
|
||||
Bool("routeChanged", routeChange).
|
||||
Msg("applying route approval results")
|
||||
currentNode.ApprovedRoutes = autoApprovedRoutes
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -1682,6 +1737,24 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
return change.EmptySet, fmt.Errorf("node not found in NodeStore: %d", id)
|
||||
}
|
||||
|
||||
if routeChange {
|
||||
log.Debug().
|
||||
Uint64("node.id", id.Uint64()).
|
||||
Strs("autoApprovedRoutes", util.PrefixesToString(autoApprovedRoutes)).
|
||||
Msg("Persisting auto-approved routes from MapRequest")
|
||||
|
||||
// SetApprovedRoutes will update both database and PrimaryRoutes table
|
||||
_, c, err := s.SetApprovedRoutes(id, autoApprovedRoutes)
|
||||
if err != nil {
|
||||
return change.EmptySet, fmt.Errorf("persisting auto-approved routes: %w", err)
|
||||
}
|
||||
|
||||
// If SetApprovedRoutes resulted in a policy change, return it
|
||||
if !c.Empty() {
|
||||
return c, nil
|
||||
}
|
||||
} // Continue with the rest of the processing using the updated node
|
||||
|
||||
nodeRouteChange := change.EmptySet
|
||||
|
||||
// Handle route changes after NodeStore update
|
||||
@@ -1696,13 +1769,8 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
routesChangedButNotApproved = true
|
||||
}
|
||||
}
|
||||
if routeChange {
|
||||
needsRouteUpdate = true
|
||||
log.Debug().
|
||||
Caller().
|
||||
Uint64("node.id", id.Uint64()).
|
||||
Msg("updating routes because approved routes changed")
|
||||
} else if routesChangedButNotApproved {
|
||||
|
||||
if routesChangedButNotApproved {
|
||||
needsRouteUpdate = true
|
||||
log.Debug().
|
||||
Caller().
|
||||
@@ -1711,7 +1779,7 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
}
|
||||
|
||||
if needsRouteUpdate {
|
||||
// SetNodeRoutes sets the active/distributed routes, so we must use SubnetRoutes()
|
||||
// SetNodeRoutes sets the active/distributed routes, so we must use AllApprovedRoutes()
|
||||
// which returns only the intersection of announced AND approved routes.
|
||||
// Using AnnouncedRoutes() would bypass the security model and auto-approve everything.
|
||||
log.Debug().
|
||||
@@ -1719,9 +1787,9 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
Uint64("node.id", id.Uint64()).
|
||||
Strs("announcedRoutes", util.PrefixesToString(updatedNode.AnnouncedRoutes())).
|
||||
Strs("approvedRoutes", util.PrefixesToString(updatedNode.ApprovedRoutes().AsSlice())).
|
||||
Strs("subnetRoutes", util.PrefixesToString(updatedNode.SubnetRoutes())).
|
||||
Strs("allApprovedRoutes", util.PrefixesToString(updatedNode.AllApprovedRoutes())).
|
||||
Msg("updating node routes for distribution")
|
||||
nodeRouteChange = s.SetNodeRoutes(id, updatedNode.SubnetRoutes()...)
|
||||
nodeRouteChange = s.SetNodeRoutes(id, updatedNode.AllApprovedRoutes()...)
|
||||
}
|
||||
|
||||
_, policyChange, err := s.persistNodeToDB(updatedNode)
|
||||
@@ -1739,25 +1807,26 @@ func (s *State) UpdateNodeFromMapRequest(id types.NodeID, req tailcfg.MapRequest
|
||||
return change.NodeAdded(id), nil
|
||||
}
|
||||
|
||||
func hostinfoEqual(oldNode types.NodeView, new *tailcfg.Hostinfo) bool {
|
||||
if !oldNode.Valid() && new == nil {
|
||||
func hostinfoEqual(oldNode types.NodeView, newHI *tailcfg.Hostinfo) bool {
|
||||
if !oldNode.Valid() && newHI == nil {
|
||||
return true
|
||||
}
|
||||
if !oldNode.Valid() || new == nil {
|
||||
|
||||
if !oldNode.Valid() || newHI == nil {
|
||||
return false
|
||||
}
|
||||
old := oldNode.AsStruct().Hostinfo
|
||||
|
||||
return old.Equal(new)
|
||||
return old.Equal(newHI)
|
||||
}
|
||||
|
||||
func routesChanged(oldNode types.NodeView, new *tailcfg.Hostinfo) bool {
|
||||
func routesChanged(oldNode types.NodeView, newHI *tailcfg.Hostinfo) bool {
|
||||
var oldRoutes []netip.Prefix
|
||||
if oldNode.Valid() && oldNode.AsStruct().Hostinfo != nil {
|
||||
oldRoutes = oldNode.AsStruct().Hostinfo.RoutableIPs
|
||||
}
|
||||
|
||||
newRoutes := new.RoutableIPs
|
||||
newRoutes := newHI.RoutableIPs
|
||||
if newRoutes == nil {
|
||||
newRoutes = []netip.Prefix{}
|
||||
}
|
||||
|
||||
@@ -269,11 +269,19 @@ func (node *Node) Prefixes() []netip.Prefix {
|
||||
// node has any exit routes enabled.
|
||||
// If none are enabled, it will return nil.
|
||||
func (node *Node) ExitRoutes() []netip.Prefix {
|
||||
if slices.ContainsFunc(node.SubnetRoutes(), tsaddr.IsExitRoute) {
|
||||
return tsaddr.ExitRoutes()
|
||||
var routes []netip.Prefix
|
||||
|
||||
for _, route := range node.AnnouncedRoutes() {
|
||||
if tsaddr.IsExitRoute(route) && slices.Contains(node.ApprovedRoutes, route) {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return routes
|
||||
}
|
||||
|
||||
func (node *Node) IsExitNode() bool {
|
||||
return len(node.ExitRoutes()) > 0
|
||||
}
|
||||
|
||||
func (node *Node) IPsAsString() []string {
|
||||
@@ -311,9 +319,16 @@ func (node *Node) CanAccess(matchers []matcher.Match, node2 *Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if the node has access to routes that might be part of a
|
||||
// smaller subnet that is served from node2 as a subnet router.
|
||||
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the dst is "the internet" and node2 is an exit node, allow access.
|
||||
if matcher.DestsIsTheInternet() && node2.IsExitNode() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
@@ -440,16 +455,22 @@ func (node *Node) AnnouncedRoutes() []netip.Prefix {
|
||||
return node.Hostinfo.RoutableIPs
|
||||
}
|
||||
|
||||
// SubnetRoutes returns the list of routes that the node announces and are approved.
|
||||
// SubnetRoutes returns the list of routes (excluding exit routes) that the node
|
||||
// announces and are approved.
|
||||
//
|
||||
// IMPORTANT: This method is used for internal data structures and should NOT be used
|
||||
// for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated manually
|
||||
// with PrimaryRoutes to ensure it includes only routes actively served by the node.
|
||||
// See the comment in Proto() method and the implementation in grpcv1.go/nodesToProto.
|
||||
// IMPORTANT: This method is used for internal data structures and should NOT be
|
||||
// used for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated
|
||||
// manually with PrimaryRoutes to ensure it includes only routes actively served
|
||||
// by the node. See the comment in Proto() method and the implementation in
|
||||
// grpcv1.go/nodesToProto.
|
||||
func (node *Node) SubnetRoutes() []netip.Prefix {
|
||||
var routes []netip.Prefix
|
||||
|
||||
for _, route := range node.AnnouncedRoutes() {
|
||||
if tsaddr.IsExitRoute(route) {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(node.ApprovedRoutes, route) {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
@@ -463,6 +484,11 @@ func (node *Node) IsSubnetRouter() bool {
|
||||
return len(node.SubnetRoutes()) > 0
|
||||
}
|
||||
|
||||
// AllApprovedRoutes returns the combination of SubnetRoutes and ExitRoutes
|
||||
func (node *Node) AllApprovedRoutes() []netip.Prefix {
|
||||
return append(node.SubnetRoutes(), node.ExitRoutes()...)
|
||||
}
|
||||
|
||||
func (node *Node) String() string {
|
||||
return node.Hostname
|
||||
}
|
||||
@@ -653,6 +679,7 @@ func (node Node) DebugString() string {
|
||||
fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes)
|
||||
fmt.Fprintf(&sb, "\tAnnouncedRoutes: %v\n", node.AnnouncedRoutes())
|
||||
fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes())
|
||||
fmt.Fprintf(&sb, "\tExitRoutes: %v\n", node.ExitRoutes())
|
||||
sb.WriteString("\n")
|
||||
|
||||
return sb.String()
|
||||
@@ -678,27 +705,11 @@ func (v NodeView) InIPSet(set *netipx.IPSet) bool {
|
||||
}
|
||||
|
||||
func (v NodeView) CanAccess(matchers []matcher.Match, node2 NodeView) bool {
|
||||
if !v.Valid() || !node2.Valid() {
|
||||
if !v.Valid() {
|
||||
return false
|
||||
}
|
||||
src := v.IPs()
|
||||
allowedIPs := node2.IPs()
|
||||
|
||||
for _, matcher := range matchers {
|
||||
if !matcher.SrcsContainsIPs(src...) {
|
||||
continue
|
||||
}
|
||||
|
||||
if matcher.DestsContainsIP(allowedIPs...) {
|
||||
return true
|
||||
}
|
||||
|
||||
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return v.ж.CanAccess(matchers, node2.AsStruct())
|
||||
}
|
||||
|
||||
func (v NodeView) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool {
|
||||
@@ -730,6 +741,13 @@ func (v NodeView) IsSubnetRouter() bool {
|
||||
return v.ж.IsSubnetRouter()
|
||||
}
|
||||
|
||||
func (v NodeView) AllApprovedRoutes() []netip.Prefix {
|
||||
if !v.Valid() {
|
||||
return nil
|
||||
}
|
||||
return v.ж.AllApprovedRoutes()
|
||||
}
|
||||
|
||||
func (v NodeView) AppendToIPSet(build *netipx.IPSetBuilder) {
|
||||
if !v.Valid() {
|
||||
return
|
||||
@@ -808,6 +826,13 @@ func (v NodeView) ExitRoutes() []netip.Prefix {
|
||||
return v.ж.ExitRoutes()
|
||||
}
|
||||
|
||||
func (v NodeView) IsExitNode() bool {
|
||||
if !v.Valid() {
|
||||
return false
|
||||
}
|
||||
return v.ж.IsExitNode()
|
||||
}
|
||||
|
||||
// RequestTags returns the ACL tags that the node is requesting.
|
||||
func (v NodeView) RequestTags() []string {
|
||||
if !v.Valid() || !v.Hostinfo().Valid() {
|
||||
|
||||
@@ -1611,37 +1611,170 @@ func TestACLAutogroupTagged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that only devices owned by the same user can access each other and cannot access devices of other users
|
||||
// Test structure:
|
||||
// - user1: 2 regular nodes (tests autogroup:self for same-user access)
|
||||
// - user2: 2 regular nodes (tests autogroup:self for same-user access and cross-user isolation)
|
||||
// - user-router: 1 node with tag:router-node (tests that autogroup:self doesn't interfere with other rules)
|
||||
func TestACLAutogroupSelf(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario := aclScenario(t,
|
||||
&policyv2.Policy{
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
|
||||
},
|
||||
// Policy with TWO separate ACL rules:
|
||||
// 1. autogroup:member -> autogroup:self (same-user access)
|
||||
// 2. group:home -> tag:router-node (router access)
|
||||
// This tests that autogroup:self doesn't prevent other rules from working
|
||||
policy := &policyv2.Policy{
|
||||
Groups: policyv2.Groups{
|
||||
policyv2.Group("group:home"): []policyv2.Username{
|
||||
policyv2.Username("user1@"),
|
||||
policyv2.Username("user2@"),
|
||||
},
|
||||
},
|
||||
TagOwners: policyv2.TagOwners{
|
||||
policyv2.Tag("tag:router-node"): policyv2.Owners{
|
||||
usernameOwner("user-router@"),
|
||||
},
|
||||
},
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{groupp("group:home")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(tagp("tag:router-node"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{tagp("tag:router-node")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(groupp("group:home"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
2,
|
||||
)
|
||||
}
|
||||
|
||||
// Create custom scenario: user1 and user2 with regular nodes, plus user-router with tagged node
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 2,
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err := scenario.WaitForTailscaleSyncWithPeerCount(1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithDockerEntrypoint([]string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
|
||||
}),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
},
|
||||
hsic.WithACLPolicy(policy),
|
||||
hsic.WithTestName("acl-autogroup-self"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add router node for user-router (single shared router node)
|
||||
networks := scenario.Networks()
|
||||
var network *dockertest.Network
|
||||
if len(networks) > 0 {
|
||||
network = networks[0]
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
routerUser, err := scenario.CreateUser("user-router")
|
||||
require.NoError(t, err)
|
||||
|
||||
authKey, err := scenario.CreatePreAuthKey(routerUser.GetId(), true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create router node (tagged with tag:router-node)
|
||||
routerClient, err := tsic.New(
|
||||
scenario.Pool(),
|
||||
"unstable",
|
||||
tsic.WithCACert(headscale.GetCert()),
|
||||
tsic.WithHeadscaleName(headscale.GetHostname()),
|
||||
tsic.WithNetwork(network),
|
||||
tsic.WithTags([]string{"tag:router-node"}),
|
||||
tsic.WithNetfilter("off"),
|
||||
tsic.WithDockerEntrypoint([]string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
|
||||
}),
|
||||
tsic.WithDockerWorkdir("/"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.Login(headscale.GetEndpoint(), authKey.GetKey())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = routerClient.WaitForRunning(integrationutil.PeerSyncTimeout())
|
||||
require.NoError(t, err)
|
||||
|
||||
userRouterObj := scenario.GetOrCreateUser("user-router")
|
||||
userRouterObj.Clients[routerClient.Hostname()] = routerClient
|
||||
|
||||
user1Clients, err := scenario.GetClients("user1")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2Clients, err := scenario.GetClients("user2")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that user1's devices can access each other
|
||||
var user1Regular, user2Regular []TailscaleClient
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user1Clients {
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
|
||||
user1Regular = append(user1Regular, client)
|
||||
}
|
||||
}
|
||||
for _, client := range user2Clients {
|
||||
status, err := client.Status()
|
||||
require.NoError(t, err)
|
||||
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
|
||||
user2Regular = append(user2Regular, client)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEmpty(t, user1Regular, "user1 should have regular (untagged) devices")
|
||||
require.NotEmpty(t, user2Regular, "user2 should have regular (untagged) devices")
|
||||
require.NotNil(t, routerClient, "router node should exist")
|
||||
|
||||
// Wait for all nodes to sync with their expected peer counts
|
||||
// With our ACL policy:
|
||||
// - Regular nodes (user1/user2): 1 same-user regular peer + 1 router-node = 2 peers
|
||||
// - Router node: 2 user1 regular + 2 user2 regular = 4 peers
|
||||
for _, client := range user1Regular {
|
||||
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "user1 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
|
||||
}
|
||||
for _, client := range user2Regular {
|
||||
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "user2 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
|
||||
}
|
||||
err = routerClient.WaitForPeers(4, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
||||
require.NoError(t, err, "router should see 4 peers (all group:home regular nodes)")
|
||||
|
||||
// Test that user1's regular devices can access each other
|
||||
for _, client := range user1Regular {
|
||||
for _, peer := range user1Regular {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
@@ -1656,13 +1789,13 @@ func TestACLAutogroupSelf(t *testing.T) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device via autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that user2's devices can access each other
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user2Clients {
|
||||
// Test that user2's regular devices can access each other
|
||||
for _, client := range user2Regular {
|
||||
for _, peer := range user2Regular {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
@@ -1677,36 +1810,64 @@ func TestACLAutogroupSelf(t *testing.T) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, result, 13)
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device via autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that devices from different users cannot access each other
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user2Clients {
|
||||
// Test that user1's regular devices can access router-node
|
||||
for _, client := range user1Regular {
|
||||
fqdn, err := routerClient.FQDN()
|
||||
require.NoError(t, err)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user1) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.NotEmpty(c, result, "user1 should be able to access router-node via group:home -> tag:router-node rule")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach router-node (proves autogroup:self doesn't interfere)")
|
||||
}
|
||||
|
||||
// Test that user2's regular devices can access router-node
|
||||
for _, client := range user2Regular {
|
||||
fqdn, err := routerClient.FQDN()
|
||||
require.NoError(t, err)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user2) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
result, err := client.Curl(url)
|
||||
assert.NoError(c, err)
|
||||
assert.NotEmpty(c, result, "user2 should be able to access router-node via group:home -> tag:router-node rule")
|
||||
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach router-node (proves autogroup:self doesn't interfere)")
|
||||
}
|
||||
|
||||
// Test that devices from different users cannot access each other's regular devices
|
||||
for _, client := range user1Regular {
|
||||
for _, peer := range user2Regular {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user1) to %s (user2) - should FAIL", client.Hostname(), fqdn)
|
||||
t.Logf("url from %s (user1) to %s (user2 regular) - should FAIL", client.Hostname(), fqdn)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result, "user1 should not be able to access user2's devices with autogroup:self")
|
||||
assert.Error(t, err, "connection from user1 to user2 should fail")
|
||||
assert.Empty(t, result, "user1 should not be able to access user2's regular devices (autogroup:self isolation)")
|
||||
assert.Error(t, err, "connection from user1 to user2 regular device should fail")
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user1Clients {
|
||||
for _, client := range user2Regular {
|
||||
for _, peer := range user1Regular {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s (user2) to %s (user1) - should FAIL", client.Hostname(), fqdn)
|
||||
t.Logf("url from %s (user2) to %s (user1 regular) - should FAIL", client.Hostname(), fqdn)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result, "user2 should not be able to access user1's devices with autogroup:self")
|
||||
assert.Error(t, err, "connection from user2 to user1 should fail")
|
||||
assert.Empty(t, result, "user2 should not be able to access user1's regular devices (autogroup:self isolation)")
|
||||
assert.Error(t, err, "connection from user2 to user1 regular device should fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
@@ -223,6 +226,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
|
||||
@@ -454,3 +458,267 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAuthKeyDeleteKey tests Issue #2830: node with deleted auth key should still reconnect.
|
||||
// Scenario from user report: "create node, delete the auth key, restart to validate it can connect"
|
||||
// Steps:
|
||||
// 1. Create node with auth key
|
||||
// 2. DELETE the auth key from database (completely remove it)
|
||||
// 3. Restart node - should successfully reconnect using MachineKey identity
|
||||
func TestAuthKeyDeleteKey(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Create scenario with NO nodes - we'll create the node manually so we can capture the auth key
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
NodesPerUser: 0, // No nodes created automatically
|
||||
Users: []string{"user1"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("delkey"), hsic.WithTLS(), hsic.WithDERPAsIP())
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// Get the user
|
||||
userMap, err := headscale.MapUsers()
|
||||
require.NoError(t, err)
|
||||
userID := userMap["user1"].GetId()
|
||||
|
||||
// Create a pre-auth key - we keep the full key string before it gets redacted
|
||||
authKey, err := scenario.CreatePreAuthKey(userID, false, false)
|
||||
require.NoError(t, err)
|
||||
authKeyString := authKey.GetKey()
|
||||
authKeyID := authKey.GetId()
|
||||
t.Logf("Created pre-auth key ID %d: %s", authKeyID, authKeyString)
|
||||
|
||||
// Create a tailscale client and log it in with the auth key
|
||||
client, err := scenario.CreateTailscaleNode(
|
||||
"head",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Login(headscale.GetEndpoint(), authKeyString)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the node to be registered
|
||||
var user1Nodes []*v1.Node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
var err error
|
||||
user1Nodes, err = headscale.ListNodes("user1")
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, user1Nodes, 1)
|
||||
}, 30*time.Second, 500*time.Millisecond, "waiting for node to be registered")
|
||||
|
||||
nodeID := user1Nodes[0].GetId()
|
||||
nodeName := user1Nodes[0].GetName()
|
||||
t.Logf("Node %d (%s) created successfully with auth_key_id=%d", nodeID, nodeName, authKeyID)
|
||||
|
||||
// Verify node is online
|
||||
requireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, "node should be online initially", 120*time.Second)
|
||||
|
||||
// DELETE the pre-auth key using the API
|
||||
t.Logf("Deleting pre-auth key ID %d using API", authKeyID)
|
||||
err = headscale.DeleteAuthKey(userID, authKeyString)
|
||||
require.NoError(t, err)
|
||||
t.Logf("Successfully deleted auth key")
|
||||
|
||||
// Simulate node restart (down + up)
|
||||
t.Logf("Restarting node after deleting its auth key")
|
||||
err = client.Down()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
err = client.Up()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify node comes back online
|
||||
// This will FAIL without the fix because auth key validation will reject deleted key
|
||||
// With the fix, MachineKey identity allows reconnection even with deleted key
|
||||
requireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, "node should reconnect after restart despite deleted key", 120*time.Second)
|
||||
|
||||
t.Logf("✓ Node successfully reconnected after its auth key was deleted")
|
||||
}
|
||||
|
||||
// TestAuthKeyLogoutAndReloginRoutesPreserved tests that routes remain serving
|
||||
// after a node logs out and re-authenticates with the same user.
|
||||
//
|
||||
// This test validates the fix for issue #2896:
|
||||
// https://github.com/juanfont/headscale/issues/2896
|
||||
//
|
||||
// Bug: When a node with already-approved routes restarts/re-authenticates,
|
||||
// the routes show as "Approved" and "Available" but NOT "Serving" (Primary).
|
||||
// A headscale restart would fix it, indicating a state management issue.
|
||||
//
|
||||
// The test scenario:
|
||||
// 1. Node registers with auth key and advertises routes
|
||||
// 2. Routes are auto-approved and verified as serving
|
||||
// 3. Node logs out
|
||||
// 4. Node re-authenticates with same auth key
|
||||
// 5. Routes should STILL be serving (this is where the bug manifests)
|
||||
func TestAuthKeyLogoutAndReloginRoutesPreserved(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
user := "routeuser"
|
||||
advertiseRoute := "10.55.0.0/24"
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{user},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{
|
||||
tsic.WithAcceptRoutes(),
|
||||
// Advertise route on initial login
|
||||
tsic.WithExtraLoginArgs([]string{"--advertise-routes=" + advertiseRoute}),
|
||||
},
|
||||
hsic.WithTestName("routelogout"),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithACLPolicy(
|
||||
&policyv2.Policy{
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{policyv2.Wildcard},
|
||||
Destinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},
|
||||
},
|
||||
},
|
||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
||||
netip.MustParsePrefix(advertiseRoute): {ptr.To(policyv2.Username(user + "@test.no"))},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
require.Len(t, allClients, 1)
|
||||
|
||||
client := allClients[0]
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// Step 1: Verify initial route is advertised, approved, and SERVING
|
||||
t.Logf("Step 1: Verifying initial route is advertised, approved, and SERVING at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
var initialNode *v1.Node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Should have exactly 1 node")
|
||||
|
||||
if len(nodes) == 1 {
|
||||
initialNode = nodes[0]
|
||||
// Check: 1 announced, 1 approved, 1 serving (subnet route)
|
||||
assert.Lenf(c, initialNode.GetAvailableRoutes(), 1,
|
||||
"Node should have 1 available route, got %v", initialNode.GetAvailableRoutes())
|
||||
assert.Lenf(c, initialNode.GetApprovedRoutes(), 1,
|
||||
"Node should have 1 approved route, got %v", initialNode.GetApprovedRoutes())
|
||||
assert.Lenf(c, initialNode.GetSubnetRoutes(), 1,
|
||||
"Node should have 1 serving (subnet) route, got %v - THIS IS THE BUG if empty", initialNode.GetSubnetRoutes())
|
||||
assert.Contains(c, initialNode.GetSubnetRoutes(), advertiseRoute,
|
||||
"Subnet routes should contain %s", advertiseRoute)
|
||||
}
|
||||
}, 30*time.Second, 500*time.Millisecond, "initial route should be serving")
|
||||
|
||||
require.NotNil(t, initialNode, "Initial node should be found")
|
||||
initialNodeID := initialNode.GetId()
|
||||
t.Logf("Initial node ID: %d, Available: %v, Approved: %v, Serving: %v",
|
||||
initialNodeID, initialNode.GetAvailableRoutes(), initialNode.GetApprovedRoutes(), initialNode.GetSubnetRoutes())
|
||||
|
||||
// Step 2: Logout
|
||||
t.Logf("Step 2: Logging out at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
err = client.Logout()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for logout to complete
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for logout to complete")
|
||||
|
||||
t.Logf("Logout completed, node should still exist in database")
|
||||
|
||||
// Verify node still exists (routes should still be in DB)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Node should persist in database after logout")
|
||||
}, 10*time.Second, 500*time.Millisecond, "node should persist after logout")
|
||||
|
||||
// Step 3: Re-authenticate with the SAME user (using auth key)
|
||||
t.Logf("Step 3: Re-authenticating with same user at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
userMap, err := headscale.MapUsers()
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := scenario.CreatePreAuthKey(userMap[user].GetId(), true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Re-login - the container already has extraLoginArgs with --advertise-routes
|
||||
// from the initial setup, so routes will be advertised on re-login
|
||||
err = scenario.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for client to be running
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after relogin")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for relogin to complete")
|
||||
|
||||
t.Logf("Re-authentication completed at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Step 4: THE CRITICAL TEST - Verify routes are STILL SERVING after re-authentication
|
||||
t.Logf("Step 4: Verifying routes are STILL SERVING after re-authentication at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Should still have exactly 1 node after relogin")
|
||||
|
||||
if len(nodes) == 1 {
|
||||
node := nodes[0]
|
||||
t.Logf("After relogin - Available: %v, Approved: %v, Serving: %v",
|
||||
node.GetAvailableRoutes(), node.GetApprovedRoutes(), node.GetSubnetRoutes())
|
||||
|
||||
// This is where issue #2896 manifests:
|
||||
// - Available shows the route (from Hostinfo.RoutableIPs)
|
||||
// - Approved shows the route (from ApprovedRoutes)
|
||||
// - BUT Serving (SubnetRoutes/PrimaryRoutes) is EMPTY!
|
||||
assert.Lenf(c, node.GetAvailableRoutes(), 1,
|
||||
"Node should have 1 available route after relogin, got %v", node.GetAvailableRoutes())
|
||||
assert.Lenf(c, node.GetApprovedRoutes(), 1,
|
||||
"Node should have 1 approved route after relogin, got %v", node.GetApprovedRoutes())
|
||||
assert.Lenf(c, node.GetSubnetRoutes(), 1,
|
||||
"BUG #2896: Node should have 1 SERVING route after relogin, got %v", node.GetSubnetRoutes())
|
||||
assert.Contains(c, node.GetSubnetRoutes(), advertiseRoute,
|
||||
"BUG #2896: Subnet routes should contain %s after relogin", advertiseRoute)
|
||||
|
||||
// Also verify node ID was preserved (same node, not new registration)
|
||||
assert.Equal(c, initialNodeID, node.GetId(),
|
||||
"Node ID should be preserved after same-user relogin")
|
||||
}
|
||||
}, 30*time.Second, 500*time.Millisecond,
|
||||
"BUG #2896: routes should remain SERVING after logout/relogin with same user")
|
||||
|
||||
t.Logf("Test completed - verifying issue #2896 fix")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
@@ -19,6 +20,8 @@ import (
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
@@ -953,6 +956,119 @@ func TestOIDCFollowUpUrl(t *testing.T) {
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login")
|
||||
}
|
||||
|
||||
// TestOIDCMultipleOpenedLoginUrls tests the scenario:
|
||||
// - client (mostly Windows) opens multiple browser tabs with different login URLs
|
||||
// - client performs auth on the first opened browser tab
|
||||
//
|
||||
// This test makes sure that cookies are still valid for the first browser tab.
|
||||
func TestOIDCMultipleOpenedLoginUrls(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(
|
||||
ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcauthrelog"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode(
|
||||
"unstable",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
u1, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
u2, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure login URLs are different
|
||||
require.NotEqual(t, u1.String(), u2.String())
|
||||
|
||||
loginClient, err := newLoginHTTPClient(ts.Hostname())
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the first login URL "in browser"
|
||||
_, redirect1, err := doLoginURLWithClient(ts.Hostname(), u1, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
// open the second login URL "in browser"
|
||||
_, redirect2, err := doLoginURLWithClient(ts.Hostname(), u2, loginClient, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// two valid redirects with different state/nonce params
|
||||
require.NotEqual(t, redirect1.String(), redirect2.String())
|
||||
|
||||
// complete auth with the first opened "browser tab"
|
||||
_, redirect1, err = doLoginURLWithClient(ts.Hostname(), redirect1, loginClient, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
},
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(
|
||||
wantUsers,
|
||||
listUsers,
|
||||
cmpopts.IgnoreUnexported(v1.User{}),
|
||||
cmpopts.IgnoreFields(v1.User{}, "CreatedAt"),
|
||||
); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(c *assert.CollectT) {
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, 1)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login",
|
||||
)
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameNodeSameUser tests the scenario where a single Tailscale client
|
||||
// authenticates using OIDC (OpenID Connect), logs out, and then logs back in as the same user.
|
||||
//
|
||||
@@ -1181,3 +1297,618 @@ func TestOIDCReloginSameNodeSameUser(t *testing.T) {
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 node is online after same-user OIDC relogin")
|
||||
}
|
||||
|
||||
// TestOIDCExpiryAfterRestart validates that node expiry is preserved
|
||||
// when a tailscaled client restarts and reconnects to headscale.
|
||||
//
|
||||
// This test reproduces the bug reported in https://github.com/juanfont/headscale/issues/2862
|
||||
// where OIDC expiry was reset to 0001-01-01 00:00:00 after tailscaled restart.
|
||||
//
|
||||
// Test flow:
|
||||
// 1. Node logs in with OIDC (gets 72h expiry)
|
||||
// 2. Verify expiry is set correctly in headscale
|
||||
// 3. Restart tailscaled container (simulates daemon restart)
|
||||
// 4. Wait for reconnection
|
||||
// 5. Verify expiry is still set correctly (not zero).
|
||||
func TestOIDCExpiryAfterRestart(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
"HEADSCALE_OIDC_EXPIRY": "72h",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcexpiry"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and login tailscale client
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating initial login and expiry at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Verify initial expiry is set
|
||||
var initialExpiry time.Time
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1)
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should be set after OIDC login")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
assert.False(ct, expiryTime.IsZero(), "Expiry should not be zero time")
|
||||
|
||||
initialExpiry = expiryTime
|
||||
t.Logf("Initial expiry set to: %v (expires in %v)", expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating initial expiry after OIDC login")
|
||||
|
||||
// Now restart the tailscaled container
|
||||
t.Logf("Restarting tailscaled container at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
err = ts.Restart()
|
||||
require.NoError(t, err, "Failed to restart tailscaled container")
|
||||
|
||||
t.Logf("Tailscaled restarted, waiting for reconnection at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Wait for the node to come back online
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
if !assert.NoError(ct, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if !assert.NotNil(ct, status) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 60*time.Second, 2*time.Second, "waiting for tailscale to reconnect after restart")
|
||||
|
||||
// THE CRITICAL TEST: Verify expiry is still set correctly after restart
|
||||
t.Logf("Validating expiry preservation after restart at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1, "Should still have exactly 1 node after restart")
|
||||
|
||||
node := nodes[0]
|
||||
assert.NotNil(ct, node.GetExpiry(), "Expiry should NOT be nil after restart")
|
||||
|
||||
if node.GetExpiry() != nil {
|
||||
expiryTime := node.GetExpiry().AsTime()
|
||||
|
||||
// This is the bug check - expiry should NOT be zero time
|
||||
assert.False(ct, expiryTime.IsZero(),
|
||||
"BUG: Expiry was reset to zero time after tailscaled restart! This is issue #2862")
|
||||
|
||||
// Expiry should be exactly the same as before restart
|
||||
assert.Equal(ct, initialExpiry, expiryTime,
|
||||
"Expiry should be exactly the same after restart, got %v, expected %v",
|
||||
expiryTime, initialExpiry)
|
||||
|
||||
t.Logf("SUCCESS: Expiry preserved after restart: %v (expires in %v)",
|
||||
expiryTime, time.Until(expiryTime))
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating expiry preservation after restart")
|
||||
}
|
||||
|
||||
// TestOIDCACLPolicyOnJoin validates that ACL policies are correctly applied
|
||||
// to newly joined OIDC nodes without requiring a client restart.
|
||||
//
|
||||
// This test validates the fix for issue #2888:
|
||||
// https://github.com/juanfont/headscale/issues/2888
|
||||
//
|
||||
// Bug: Nodes joining via OIDC authentication did not get the appropriate ACL
|
||||
// policy applied until they restarted their client. This was a regression
|
||||
// introduced in v0.27.0.
|
||||
//
|
||||
// The test scenario:
|
||||
// 1. Creates a CLI user (gateway) with a node advertising a route
|
||||
// 2. Sets up ACL policy allowing all nodes to access advertised routes
|
||||
// 3. OIDC user authenticates and joins with a new node
|
||||
// 4. Verifies that the OIDC user's node IMMEDIATELY sees the advertised route
|
||||
//
|
||||
// Expected behavior:
|
||||
// - Without fix: OIDC node cannot see the route (PrimaryRoutes is nil/empty)
|
||||
// - With fix: OIDC node immediately sees the route in PrimaryRoutes
|
||||
//
|
||||
// Root cause: The buggy code called a.h.Change(c) immediately after user
|
||||
// creation but BEFORE node registration completed, creating a race condition
|
||||
// where policy change notifications were sent asynchronously before the node
|
||||
// was fully registered.
|
||||
func TestOIDCACLPolicyOnJoin(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
gatewayUser := "gateway"
|
||||
oidcUser := "oidcuser"
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{gatewayUser},
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser(oidcUser, true),
|
||||
},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
// Create headscale environment with ACL policy that allows OIDC user
|
||||
// to access routes advertised by gateway user
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
[]tsic.Option{
|
||||
tsic.WithAcceptRoutes(),
|
||||
},
|
||||
hsic.WithTestName("oidcaclpolicy"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithACLPolicy(
|
||||
&policyv2.Policy{
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{prefixp("100.64.0.0/10")},
|
||||
Destinations: []policyv2.AliasWithPorts{
|
||||
aliasWithPorts(prefixp("100.64.0.0/10"), tailcfg.PortRangeAny),
|
||||
aliasWithPorts(prefixp("10.33.0.0/24"), tailcfg.PortRangeAny),
|
||||
aliasWithPorts(prefixp("10.44.0.0/24"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
||||
netip.MustParsePrefix("10.33.0.0/24"): {usernameApprover("gateway@test.no"), usernameApprover("oidcuser@headscale.net"), usernameApprover("jane.doe@example.com")},
|
||||
netip.MustParsePrefix("10.44.0.0/24"): {usernameApprover("gateway@test.no"), usernameApprover("oidcuser@headscale.net"), usernameApprover("jane.doe@example.com")},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the gateway client (CLI user) - only one client at first
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
require.Len(t, allClients, 1, "Should have exactly 1 client (gateway) before OIDC login")
|
||||
|
||||
gatewayClient := allClients[0]
|
||||
|
||||
// Wait for initial sync (gateway logs in)
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// Gateway advertises route 10.33.0.0/24
|
||||
advertiseRoute := "10.33.0.0/24"
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-routes=" + advertiseRoute,
|
||||
}
|
||||
_, _, err = gatewayClient.Execute(command)
|
||||
require.NoErrorf(t, err, "failed to advertise route: %s", err)
|
||||
|
||||
// Wait for route advertisement to propagate
|
||||
var gatewayNodeID uint64
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1)
|
||||
|
||||
gatewayNode := nodes[0]
|
||||
gatewayNodeID = gatewayNode.GetId()
|
||||
assert.Len(ct, gatewayNode.GetAvailableRoutes(), 1)
|
||||
assert.Contains(ct, gatewayNode.GetAvailableRoutes(), advertiseRoute)
|
||||
}, 10*time.Second, 500*time.Millisecond, "route advertisement should propagate to headscale")
|
||||
|
||||
// Approve the advertised route
|
||||
_, err = headscale.ApproveRoutes(
|
||||
gatewayNodeID,
|
||||
[]netip.Prefix{netip.MustParsePrefix(advertiseRoute)},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for route approval to propagate
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 1)
|
||||
|
||||
gatewayNode := nodes[0]
|
||||
assert.Len(ct, gatewayNode.GetApprovedRoutes(), 1)
|
||||
assert.Contains(ct, gatewayNode.GetApprovedRoutes(), advertiseRoute)
|
||||
}, 10*time.Second, 500*time.Millisecond, "route approval should propagate to headscale")
|
||||
|
||||
// NOW create the OIDC user by having them join
|
||||
// This is where issue #2888 manifests - the new OIDC node should immediately
|
||||
// see the gateway's advertised route
|
||||
t.Logf("OIDC user joining at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Create OIDC user's tailscale node
|
||||
oidcAdvertiseRoute := "10.44.0.0/24"
|
||||
oidcClient, err := scenario.CreateTailscaleNode(
|
||||
"head",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
tsic.WithAcceptRoutes(),
|
||||
tsic.WithExtraLoginArgs([]string{"--advertise-routes=" + oidcAdvertiseRoute}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// OIDC login happens automatically via LoginWithURL
|
||||
loginURL, err := oidcClient.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(oidcClient.Hostname(), loginURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("OIDC user logged in successfully at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// THE CRITICAL TEST: Verify that the OIDC user's node can IMMEDIATELY
|
||||
// see the gateway's advertised route WITHOUT needing a client restart.
|
||||
//
|
||||
// This is where the bug manifests:
|
||||
// - Without fix: PrimaryRoutes will be nil/empty
|
||||
// - With fix: PrimaryRoutes immediately contains the advertised route
|
||||
t.Logf("Verifying OIDC user can immediately see advertised routes at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := oidcClient.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
// Find the gateway peer in the OIDC user's peer list
|
||||
var gatewayPeer *ipnstate.PeerStatus
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peer := status.Peer[peerKey]
|
||||
// Gateway is the peer that's not the OIDC user
|
||||
if peer.UserID != status.Self.UserID {
|
||||
gatewayPeer = peer
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.NotNil(ct, gatewayPeer, "OIDC user should see gateway as peer")
|
||||
|
||||
if gatewayPeer != nil {
|
||||
// This is the critical assertion - PrimaryRoutes should NOT be nil
|
||||
assert.NotNil(ct, gatewayPeer.PrimaryRoutes,
|
||||
"BUG #2888: Gateway peer PrimaryRoutes is nil - ACL policy not applied to new OIDC node!")
|
||||
|
||||
if gatewayPeer.PrimaryRoutes != nil {
|
||||
routes := gatewayPeer.PrimaryRoutes.AsSlice()
|
||||
assert.Contains(ct, routes, netip.MustParsePrefix(advertiseRoute),
|
||||
"OIDC user should immediately see gateway's advertised route %s in PrimaryRoutes", advertiseRoute)
|
||||
t.Logf("SUCCESS: OIDC user can see advertised route %s in gateway's PrimaryRoutes", advertiseRoute)
|
||||
}
|
||||
|
||||
// Also verify AllowedIPs includes the route
|
||||
if gatewayPeer.AllowedIPs != nil && gatewayPeer.AllowedIPs.Len() > 0 {
|
||||
allowedIPs := gatewayPeer.AllowedIPs.AsSlice()
|
||||
t.Logf("Gateway peer AllowedIPs: %v", allowedIPs)
|
||||
}
|
||||
}
|
||||
}, 15*time.Second, 500*time.Millisecond,
|
||||
"OIDC user should immediately see gateway's advertised route without client restart (issue #2888)")
|
||||
|
||||
// Verify that the Gateway node sees the OIDC node's advertised route (AutoApproveRoutes check)
|
||||
t.Logf("Verifying Gateway user can immediately see OIDC advertised routes at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := gatewayClient.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
// Find the OIDC peer in the Gateway user's peer list
|
||||
var oidcPeer *ipnstate.PeerStatus
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peer := status.Peer[peerKey]
|
||||
if peer.UserID != status.Self.UserID {
|
||||
oidcPeer = peer
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.NotNil(ct, oidcPeer, "Gateway user should see OIDC user as peer")
|
||||
|
||||
if oidcPeer != nil {
|
||||
assert.NotNil(ct, oidcPeer.PrimaryRoutes,
|
||||
"BUG: OIDC peer PrimaryRoutes is nil - AutoApproveRoutes failed or overwritten!")
|
||||
|
||||
if oidcPeer.PrimaryRoutes != nil {
|
||||
routes := oidcPeer.PrimaryRoutes.AsSlice()
|
||||
assert.Contains(ct, routes, netip.MustParsePrefix(oidcAdvertiseRoute),
|
||||
"Gateway user should immediately see OIDC's advertised route %s in PrimaryRoutes", oidcAdvertiseRoute)
|
||||
}
|
||||
}
|
||||
}, 15*time.Second, 500*time.Millisecond,
|
||||
"Gateway user should immediately see OIDC's advertised route (AutoApproveRoutes check)")
|
||||
|
||||
// Additional validation: Verify nodes in headscale match expectations
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 2, "Should have 2 nodes (gateway + oidcuser)")
|
||||
|
||||
// Verify OIDC user was created correctly
|
||||
users, err := headscale.ListUsers()
|
||||
assert.NoError(ct, err)
|
||||
// Note: mockoidc may create additional default users (like jane.doe)
|
||||
// so we check for at least 2 users, not exactly 2
|
||||
assert.GreaterOrEqual(ct, len(users), 2, "Should have at least 2 users (gateway CLI user + oidcuser)")
|
||||
|
||||
// Find gateway CLI user
|
||||
var gatewayUser *v1.User
|
||||
|
||||
for _, user := range users {
|
||||
if user.GetName() == "gateway" && user.GetProvider() == "" {
|
||||
gatewayUser = user
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.NotNil(ct, gatewayUser, "Should have gateway CLI user")
|
||||
|
||||
if gatewayUser != nil {
|
||||
assert.Equal(ct, "gateway", gatewayUser.GetName())
|
||||
}
|
||||
|
||||
// Find OIDC user
|
||||
var oidcUserFound *v1.User
|
||||
|
||||
for _, user := range users {
|
||||
if user.GetName() == "oidcuser" && user.GetProvider() == "oidc" {
|
||||
oidcUserFound = user
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.NotNil(ct, oidcUserFound, "Should have OIDC user")
|
||||
|
||||
if oidcUserFound != nil {
|
||||
assert.Equal(ct, "oidcuser", oidcUserFound.GetName())
|
||||
assert.Equal(ct, "oidcuser@headscale.net", oidcUserFound.GetEmail())
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "headscale should have correct users and nodes")
|
||||
|
||||
t.Logf("Test completed successfully - issue #2888 fix validated")
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameUserRoutesPreserved tests the scenario where:
|
||||
// - A node logs in via OIDC and advertises routes
|
||||
// - Routes are auto-approved and verified as SERVING
|
||||
// - The node logs out
|
||||
// - The node logs back in as the same user
|
||||
// - Routes should STILL be SERVING (not just approved/available)
|
||||
//
|
||||
// This test validates the fix for issue #2896:
|
||||
// https://github.com/juanfont/headscale/issues/2896
|
||||
//
|
||||
// Bug: When a node with already-approved routes restarts/re-authenticates,
|
||||
// the routes show as "Approved" and "Available" but NOT "Serving" (Primary).
|
||||
// A headscale restart would fix it, indicating a state management issue.
|
||||
func TestOIDCReloginSameUserRoutesPreserved(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
advertiseRoute := "10.55.0.0/24"
|
||||
|
||||
// Create scenario with same user for both login attempts
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true), // Initial login
|
||||
oidcMockUser("user1", true), // Relogin with same user
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
[]tsic.Option{
|
||||
tsic.WithAcceptRoutes(),
|
||||
},
|
||||
hsic.WithTestName("oidcrouterelogin"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
hsic.WithACLPolicy(
|
||||
&policyv2.Policy{
|
||||
ACLs: []policyv2.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []policyv2.Alias{policyv2.Wildcard},
|
||||
Destinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},
|
||||
},
|
||||
},
|
||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
||||
netip.MustParsePrefix(advertiseRoute): {usernameApprover("user1@headscale.net")},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create client with route advertisement
|
||||
ts, err := scenario.CreateTailscaleNode(
|
||||
"unstable",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
tsic.WithAcceptRoutes(),
|
||||
tsic.WithExtraLoginArgs([]string{"--advertise-routes=" + advertiseRoute}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initial login as user1
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for client to be running
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for initial login to complete")
|
||||
|
||||
// Step 1: Verify initial route is advertised, approved, and SERVING
|
||||
t.Logf("Step 1: Verifying initial route is advertised, approved, and SERVING at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
var initialNode *v1.Node
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Should have exactly 1 node")
|
||||
|
||||
if len(nodes) == 1 {
|
||||
initialNode = nodes[0]
|
||||
// Check: 1 announced, 1 approved, 1 serving (subnet route)
|
||||
assert.Lenf(c, initialNode.GetAvailableRoutes(), 1,
|
||||
"Node should have 1 available route, got %v", initialNode.GetAvailableRoutes())
|
||||
assert.Lenf(c, initialNode.GetApprovedRoutes(), 1,
|
||||
"Node should have 1 approved route, got %v", initialNode.GetApprovedRoutes())
|
||||
assert.Lenf(c, initialNode.GetSubnetRoutes(), 1,
|
||||
"Node should have 1 serving (subnet) route, got %v - THIS IS THE BUG if empty", initialNode.GetSubnetRoutes())
|
||||
assert.Contains(c, initialNode.GetSubnetRoutes(), advertiseRoute,
|
||||
"Subnet routes should contain %s", advertiseRoute)
|
||||
}
|
||||
}, 30*time.Second, 500*time.Millisecond, "initial route should be serving")
|
||||
|
||||
require.NotNil(t, initialNode, "Initial node should be found")
|
||||
initialNodeID := initialNode.GetId()
|
||||
t.Logf("Initial node ID: %d, Available: %v, Approved: %v, Serving: %v",
|
||||
initialNodeID, initialNode.GetAvailableRoutes(), initialNode.GetApprovedRoutes(), initialNode.GetSubnetRoutes())
|
||||
|
||||
// Step 2: Logout
|
||||
t.Logf("Step 2: Logging out at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
err = ts.Logout()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for logout to complete
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for logout to complete")
|
||||
|
||||
t.Logf("Logout completed, node should still exist in database")
|
||||
|
||||
// Verify node still exists (routes should still be in DB)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Node should persist in database after logout")
|
||||
}, 10*time.Second, 500*time.Millisecond, "node should persist after logout")
|
||||
|
||||
// Step 3: Re-authenticate via OIDC as the same user
|
||||
t.Logf("Step 3: Re-authenticating with same user via OIDC at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for client to be running
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after relogin")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for relogin to complete")
|
||||
|
||||
t.Logf("Re-authentication completed at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
// Step 4: THE CRITICAL TEST - Verify routes are STILL SERVING after re-authentication
|
||||
t.Logf("Step 4: Verifying routes are STILL SERVING after re-authentication at %s", time.Now().Format(TimestampFormat))
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 1, "Should still have exactly 1 node after relogin")
|
||||
|
||||
if len(nodes) == 1 {
|
||||
node := nodes[0]
|
||||
t.Logf("After relogin - Available: %v, Approved: %v, Serving: %v",
|
||||
node.GetAvailableRoutes(), node.GetApprovedRoutes(), node.GetSubnetRoutes())
|
||||
|
||||
// This is where issue #2896 manifests:
|
||||
// - Available shows the route (from Hostinfo.RoutableIPs)
|
||||
// - Approved shows the route (from ApprovedRoutes)
|
||||
// - BUT Serving (SubnetRoutes/PrimaryRoutes) is EMPTY!
|
||||
assert.Lenf(c, node.GetAvailableRoutes(), 1,
|
||||
"Node should have 1 available route after relogin, got %v", node.GetAvailableRoutes())
|
||||
assert.Lenf(c, node.GetApprovedRoutes(), 1,
|
||||
"Node should have 1 approved route after relogin, got %v", node.GetApprovedRoutes())
|
||||
assert.Lenf(c, node.GetSubnetRoutes(), 1,
|
||||
"BUG #2896: Node should have 1 SERVING route after relogin, got %v", node.GetSubnetRoutes())
|
||||
assert.Contains(c, node.GetSubnetRoutes(), advertiseRoute,
|
||||
"BUG #2896: Subnet routes should contain %s after relogin", advertiseRoute)
|
||||
|
||||
// Also verify node ID was preserved (same node, not new registration)
|
||||
assert.Equal(c, initialNodeID, node.GetId(),
|
||||
"Node ID should be preserved after same-user relogin")
|
||||
}
|
||||
}, 30*time.Second, 500*time.Millisecond,
|
||||
"BUG #2896: routes should remain SERVING after OIDC logout/relogin with same user")
|
||||
|
||||
t.Logf("Test completed - verifying issue #2896 fix for OIDC")
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ type ControlServer interface {
|
||||
WaitForRunning() error
|
||||
CreateUser(user string) (*v1.User, error)
|
||||
CreateAuthKey(user uint64, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
|
||||
DeleteAuthKey(user uint64, key string) error
|
||||
ListNodes(users ...string) ([]*v1.Node, error)
|
||||
DeleteNode(nodeID uint64) error
|
||||
NodesByUser() (map[string][]*v1.Node, error)
|
||||
|
||||
@@ -819,6 +819,104 @@ func TestExpireNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetNodeExpiryInFuture tests setting arbitrary expiration date
|
||||
// New expiration date should be stored in the db and propagated to all peers
|
||||
func TestSetNodeExpiryInFuture(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("expirenodefuture"))
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
targetExpiry := time.Now().Add(2 * time.Hour).Round(time.Second).UTC()
|
||||
|
||||
result, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale", "nodes", "expire",
|
||||
"--identifier", "1",
|
||||
"--output", "json",
|
||||
"--expiry", targetExpiry.Format(time.RFC3339),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = json.Unmarshal([]byte(result), &node)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, node.GetExpiry().AsTime().After(time.Now()))
|
||||
require.WithinDuration(t, targetExpiry, node.GetExpiry().AsTime(), 2*time.Second)
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText([]byte(node.GetNodeKey()))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
if client.Hostname() == node.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
peerStatus, ok := status.Peer[nodeKey]
|
||||
assert.True(ct, ok, "node key should be present in peer list")
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
assert.NotNil(ct, peerStatus.KeyExpiry)
|
||||
assert.NotNil(ct, peerStatus.Expired)
|
||||
|
||||
if peerStatus.KeyExpiry != nil {
|
||||
assert.WithinDuration(
|
||||
ct,
|
||||
targetExpiry,
|
||||
*peerStatus.KeyExpiry,
|
||||
5*time.Second,
|
||||
"node %q should have key expiry near the requested future time",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
|
||||
assert.Truef(
|
||||
ct,
|
||||
peerStatus.KeyExpiry.After(time.Now()),
|
||||
"node %q should have a key expiry timestamp in the future",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}
|
||||
|
||||
assert.Falsef(
|
||||
ct,
|
||||
peerStatus.Expired,
|
||||
"node %q should not be marked as expired",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}, 3*time.Minute, 5*time.Second, "Waiting for future expiry to propagate",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeOnlineStatus(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
||||
@@ -1031,6 +1031,34 @@ func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
return &preAuthKey, nil
|
||||
}
|
||||
|
||||
// DeleteAuthKey deletes an "authorisation key" for a User.
|
||||
func (t *HeadscaleInContainer) DeleteAuthKey(
|
||||
user uint64,
|
||||
key string,
|
||||
) error {
|
||||
command := []string{
|
||||
"headscale",
|
||||
"--user",
|
||||
strconv.FormatUint(user, 10),
|
||||
"preauthkeys",
|
||||
"delete",
|
||||
key,
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
_, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute delete auth key command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListNodes lists the currently registered Nodes in headscale.
|
||||
// Optionally a list of usernames can be passed to get users for
|
||||
// specific users.
|
||||
@@ -1232,26 +1260,26 @@ func (h *HeadscaleInContainer) writePolicy(pol *policyv2.Policy) error {
|
||||
}
|
||||
|
||||
func (h *HeadscaleInContainer) PID() (int, error) {
|
||||
cmd := []string{"bash", "-c", `ps aux | grep headscale | grep -v grep | awk '{print $2}'`}
|
||||
output, err := h.Execute(cmd)
|
||||
// Use pidof to find the headscale process, which is more reliable than grep
|
||||
// as it only looks for the actual binary name, not processes that contain
|
||||
// "headscale" in their command line (like the dlv debugger).
|
||||
output, err := h.Execute([]string{"pidof", "headscale"})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to execute command: %w", err)
|
||||
// pidof returns exit code 1 when no process is found
|
||||
return 0, os.ErrNotExist
|
||||
}
|
||||
|
||||
lines := strings.TrimSpace(output)
|
||||
if lines == "" {
|
||||
return 0, os.ErrNotExist // No output means no process found
|
||||
// pidof returns space-separated PIDs on a single line
|
||||
pidStrs := strings.Fields(strings.TrimSpace(output))
|
||||
if len(pidStrs) == 0 {
|
||||
return 0, os.ErrNotExist
|
||||
}
|
||||
|
||||
pids := make([]int, 0, len(lines))
|
||||
for _, line := range strings.Split(lines, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
pidInt, err := strconv.Atoi(line)
|
||||
pids := make([]int, 0, len(pidStrs))
|
||||
for _, pidStr := range pidStrs {
|
||||
pidInt, err := strconv.Atoi(pidStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("parsing PID: %w", err)
|
||||
return 0, fmt.Errorf("parsing PID %q: %w", pidStr, err)
|
||||
}
|
||||
// We dont care about the root pid for the container
|
||||
if pidInt == 1 {
|
||||
@@ -1266,7 +1294,9 @@ func (h *HeadscaleInContainer) PID() (int, error) {
|
||||
case 1:
|
||||
return pids[0], nil
|
||||
default:
|
||||
return 0, errors.New("multiple headscale processes running")
|
||||
// If we still have multiple PIDs, return the first one as a fallback
|
||||
// This can happen in edge cases during startup/shutdown
|
||||
return pids[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,12 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -2215,11 +2217,31 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Check if we should run the full matrix of tests
|
||||
// By default, we only run a minimal subset to avoid overwhelming Docker/disk
|
||||
// Set HEADSCALE_INTEGRATION_FULL_MATRIX=1 to run all combinations
|
||||
fullMatrix := envknob.Bool("HEADSCALE_INTEGRATION_FULL_MATRIX")
|
||||
|
||||
// Minimal test set: 3 tests covering all key dimensions
|
||||
// - Both auth methods (authkey, webauth)
|
||||
// - All 3 approver types (tag, user, group)
|
||||
// - Both policy modes (database, file)
|
||||
// - Both advertiseDuringUp values (true, false)
|
||||
minimalTestSet := map[string]bool{
|
||||
"authkey-tag-advertiseduringup-false-pol-database": true, // authkey + database + tag + false
|
||||
"webauth-user-advertiseduringup-true-pol-file": true, // webauth + file + user + true
|
||||
"authkey-group-advertiseduringup-false-pol-file": true, // authkey + file + group + false
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for _, polMode := range []types.PolicyMode{types.PolicyModeDB, types.PolicyModeFile} {
|
||||
for _, advertiseDuringUp := range []bool{false, true} {
|
||||
name := fmt.Sprintf("%s-advertiseduringup-%t-pol-%s", tt.name, advertiseDuringUp, polMode)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// Skip tests not in minimal set unless full matrix is enabled
|
||||
if !fullMatrix && !minimalTestSet[name] {
|
||||
t.Skip("Skipping to reduce test matrix size. Set HEADSCALE_INTEGRATION_FULL_MATRIX=1 to run all tests.")
|
||||
}
|
||||
scenario, err := NewScenario(tt.spec)
|
||||
require.NoErrorf(t, err, "failed to create scenario: %s", err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
@@ -2302,7 +2324,11 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
// into a HA node, which isn't something we are testing here.
|
||||
routerUsernet1, err := scenario.CreateTailscaleNode("head", tsOpts...)
|
||||
require.NoError(t, err)
|
||||
defer routerUsernet1.Shutdown()
|
||||
|
||||
defer func() {
|
||||
_, _, err := routerUsernet1.Shutdown()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
if tt.withURL {
|
||||
u, err := routerUsernet1.LoginWithURL(headscale.GetEndpoint())
|
||||
@@ -2311,7 +2337,14 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
body, err := doLoginURL(routerUsernet1.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
scenario.runHeadscaleRegister("user1", body)
|
||||
err = scenario.runHeadscaleRegister("user1", body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the client to sync with the server after webauth registration.
|
||||
// Unlike authkey login which blocks until complete, webauth registration
|
||||
// happens on the server side and the client needs time to receive the network map.
|
||||
err = routerUsernet1.WaitForRunning(integrationutil.PeerSyncTimeout())
|
||||
require.NoError(t, err, "webauth client failed to reach Running state")
|
||||
} else {
|
||||
userMap, err := headscale.MapUsers()
|
||||
require.NoError(t, err)
|
||||
@@ -2324,6 +2357,11 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
}
|
||||
// extra creation end.
|
||||
|
||||
// Wait for the node to be fully running before getting its ID
|
||||
// This is especially important for webauth flow where login is asynchronous
|
||||
err = routerUsernet1.WaitForRunning(30 * time.Second)
|
||||
require.NoError(t, err)
|
||||
|
||||
routerUsernet1ID := routerUsernet1.MustID()
|
||||
|
||||
web := services[0]
|
||||
@@ -2711,16 +2749,6 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func assertTracerouteViaIP(t *testing.T, tr util.Traceroute, ip netip.Addr) {
|
||||
t.Helper()
|
||||
|
||||
require.NotNil(t, tr)
|
||||
require.True(t, tr.Success)
|
||||
require.NoError(t, tr.Err)
|
||||
require.NotEmpty(t, tr.Route)
|
||||
require.Equal(t, tr.Route[0].IP, ip)
|
||||
}
|
||||
|
||||
// assertTracerouteViaIPWithCollect is a version of assertTracerouteViaIP that works with assert.CollectT.
|
||||
func assertTracerouteViaIPWithCollect(c *assert.CollectT, tr util.Traceroute, ip netip.Addr) {
|
||||
assert.NotNil(c, tr)
|
||||
@@ -2734,30 +2762,6 @@ func assertTracerouteViaIPWithCollect(c *assert.CollectT, tr util.Traceroute, ip
|
||||
}
|
||||
}
|
||||
|
||||
// requirePeerSubnetRoutes asserts that the peer has the expected subnet routes.
|
||||
func requirePeerSubnetRoutes(t *testing.T, status *ipnstate.PeerStatus, expected []netip.Prefix) {
|
||||
t.Helper()
|
||||
if status.AllowedIPs.Len() <= 2 && len(expected) != 0 {
|
||||
t.Fatalf("peer %s (%s) has no subnet routes, expected %v", status.HostName, status.ID, expected)
|
||||
return
|
||||
}
|
||||
|
||||
if len(expected) == 0 {
|
||||
expected = []netip.Prefix{}
|
||||
}
|
||||
|
||||
got := slicesx.Filter(nil, status.AllowedIPs.AsSlice(), func(p netip.Prefix) bool {
|
||||
if tsaddr.IsExitRoute(p) {
|
||||
return true
|
||||
}
|
||||
return !slices.ContainsFunc(status.TailscaleIPs, p.Contains)
|
||||
})
|
||||
|
||||
if diff := cmpdiff.Diff(expected, got, util.PrefixComparer, cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Fatalf("peer %s (%s) subnet routes, unexpected result (-want +got):\n%s", status.HostName, status.ID, diff)
|
||||
}
|
||||
}
|
||||
|
||||
func SortPeerStatus(a, b *ipnstate.PeerStatus) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
}
|
||||
@@ -2802,13 +2806,6 @@ func requirePeerSubnetRoutesWithCollect(c *assert.CollectT, status *ipnstate.Pee
|
||||
}
|
||||
}
|
||||
|
||||
func requireNodeRouteCount(t *testing.T, node *v1.Node, announced, approved, subnet int) {
|
||||
t.Helper()
|
||||
require.Lenf(t, node.GetAvailableRoutes(), announced, "expected %q announced routes(%v) to have %d route, had %d", node.GetName(), node.GetAvailableRoutes(), announced, len(node.GetAvailableRoutes()))
|
||||
require.Lenf(t, node.GetApprovedRoutes(), approved, "expected %q approved routes(%v) to have %d route, had %d", node.GetName(), node.GetApprovedRoutes(), approved, len(node.GetApprovedRoutes()))
|
||||
require.Lenf(t, node.GetSubnetRoutes(), subnet, "expected %q subnet routes(%v) to have %d route, had %d", node.GetName(), node.GetSubnetRoutes(), subnet, len(node.GetSubnetRoutes()))
|
||||
}
|
||||
|
||||
func requireNodeRouteCountWithCollect(c *assert.CollectT, node *v1.Node, announced, approved, subnet int) {
|
||||
assert.Lenf(c, node.GetAvailableRoutes(), announced, "expected %q announced routes(%v) to have %d route, had %d", node.GetName(), node.GetAvailableRoutes(), announced, len(node.GetAvailableRoutes()))
|
||||
assert.Lenf(c, node.GetApprovedRoutes(), approved, "expected %q approved routes(%v) to have %d route, had %d", node.GetName(), node.GetApprovedRoutes(), approved, len(node.GetApprovedRoutes()))
|
||||
|
||||
@@ -860,47 +860,183 @@ func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a
|
||||
// string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
type debugJar struct {
|
||||
inner *cookiejar.Jar
|
||||
mu sync.RWMutex
|
||||
store map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie
|
||||
}
|
||||
|
||||
var err error
|
||||
func newDebugJar() (*debugJar, error) {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &debugJar{
|
||||
inner: jar,
|
||||
store: make(map[string]map[string]map[string]*http.Cookie),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
|
||||
j.inner.SetCookies(u, cookies)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
for _, c := range cookies {
|
||||
if c == nil || c.Name == "" {
|
||||
continue
|
||||
}
|
||||
domain := c.Domain
|
||||
if domain == "" {
|
||||
domain = u.Hostname()
|
||||
}
|
||||
path := c.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if _, ok := j.store[domain]; !ok {
|
||||
j.store[domain] = make(map[string]map[string]*http.Cookie)
|
||||
}
|
||||
if _, ok := j.store[domain][path]; !ok {
|
||||
j.store[domain][path] = make(map[string]*http.Cookie)
|
||||
}
|
||||
j.store[domain][path][c.Name] = copyCookie(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *debugJar) Cookies(u *url.URL) []*http.Cookie {
|
||||
return j.inner.Cookies(u)
|
||||
}
|
||||
|
||||
func (j *debugJar) Dump(w io.Writer) {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
|
||||
for domain, paths := range j.store {
|
||||
fmt.Fprintf(w, "Domain: %s\n", domain)
|
||||
for path, byName := range paths {
|
||||
fmt.Fprintf(w, " Path: %s\n", path)
|
||||
for _, c := range byName {
|
||||
fmt.Fprintf(
|
||||
w, " %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\n",
|
||||
c.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyCookie(c *http.Cookie) *http.Cookie {
|
||||
cc := *c
|
||||
return &cc
|
||||
}
|
||||
|
||||
func newLoginHTTPClient(hostname string) (*http.Client, error) {
|
||||
hc := &http.Client{
|
||||
Transport: LoggingRoundTripper{Hostname: hostname},
|
||||
}
|
||||
hc.Jar, err = cookiejar.New(nil)
|
||||
|
||||
jar, err := newDebugJar()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to create cookiejar : %w", hostname, err)
|
||||
return nil, fmt.Errorf("%s failed to create cookiejar: %w", hostname, err)
|
||||
}
|
||||
|
||||
hc.Jar = jar
|
||||
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
// doLoginURL visits the given login URL and returns the body as a string.
|
||||
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
||||
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
||||
|
||||
hc, err := newLoginHTTPClient(hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
body, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// doLoginURLWithClient performs the login request using the provided HTTP client.
|
||||
// When followRedirects is false, it will return the first redirect without following it.
|
||||
func doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (
|
||||
string,
|
||||
*url.URL,
|
||||
error,
|
||||
) {
|
||||
if hc == nil {
|
||||
return "", nil, fmt.Errorf("%s http client is nil", hostname)
|
||||
}
|
||||
|
||||
if loginURL == nil {
|
||||
return "", nil, fmt.Errorf("%s login url is nil", hostname)
|
||||
}
|
||||
|
||||
log.Printf("%s logging in with url: %s", hostname, loginURL.String())
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("%s failed to create http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
originalRedirect := hc.CheckRedirect
|
||||
if !followRedirects {
|
||||
hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
hc.CheckRedirect = originalRedirect
|
||||
}()
|
||||
|
||||
resp, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
||||
}
|
||||
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return "", fmt.Errorf("%s response code of login request was %w", hostname, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to read response body: %s", hostname, err)
|
||||
return "", nil, fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
}
|
||||
body := string(bodyBytes)
|
||||
|
||||
return "", fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
||||
var redirectURL *url.URL
|
||||
if resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {
|
||||
redirectURL, err = resp.Location()
|
||||
if err != nil {
|
||||
return body, nil, fmt.Errorf("%s failed to resolve redirect location: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
if followRedirects && resp.StatusCode != http.StatusOK {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
log.Printf("body: %s", body)
|
||||
|
||||
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
||||
}
|
||||
|
||||
if hc.Jar != nil {
|
||||
if jar, ok := hc.Jar.(*debugJar); ok {
|
||||
jar.Dump(os.Stdout)
|
||||
} else {
|
||||
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
||||
}
|
||||
}
|
||||
|
||||
return body, redirectURL, nil
|
||||
}
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
|
||||
@@ -29,6 +29,7 @@ type TailscaleClient interface {
|
||||
Login(loginServer, authKey string) error
|
||||
LoginWithURL(loginServer string) (*url.URL, error)
|
||||
Logout() error
|
||||
Restart() error
|
||||
Up() error
|
||||
Down() error
|
||||
IPs() ([]netip.Addr, error)
|
||||
|
||||
@@ -555,6 +555,39 @@ func (t *TailscaleInContainer) Logout() error {
|
||||
return t.waitForBackendState("NeedsLogin", integrationutil.PeerSyncTimeout())
|
||||
}
|
||||
|
||||
// Restart restarts the Tailscale container using Docker API.
|
||||
// This simulates a container restart (e.g., docker restart or Kubernetes pod restart).
|
||||
// The container's entrypoint will re-execute, which typically includes running
|
||||
// "tailscale up" with any auth keys stored in environment variables.
|
||||
func (t *TailscaleInContainer) Restart() error {
|
||||
if t.container == nil {
|
||||
return fmt.Errorf("container not initialized")
|
||||
}
|
||||
|
||||
// Use Docker API to restart the container
|
||||
err := t.pool.Client.RestartContainer(t.container.Container.ID, 30)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restart container %s: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
// Wait for the container to be back up and tailscaled to be ready
|
||||
// We use exponential backoff to poll until we can successfully execute a command
|
||||
_, err = backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
// Try to execute a simple command to verify the container is responsive
|
||||
_, _, err := t.Execute([]string{"tailscale", "version"}, dockertestutil.ExecuteCommandTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
return struct{}{}, fmt.Errorf("container not ready: %w", err)
|
||||
}
|
||||
return struct{}{}, nil
|
||||
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(30*time.Second))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("timeout waiting for container %s to restart and become ready: %w", t.hostname, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper that runs `tailscale up` with no arguments.
|
||||
func (t *TailscaleInContainer) Up() error {
|
||||
command := []string{
|
||||
|
||||
@@ -104,7 +104,7 @@ extra:
|
||||
- icon: fontawesome/brands/discord
|
||||
link: https://discord.gg/c84AZQhmpx
|
||||
headscale:
|
||||
version: 0.27.0
|
||||
version: 0.27.1
|
||||
|
||||
# Extensions
|
||||
markdown_extensions:
|
||||
|
||||
@@ -55,6 +55,13 @@ service HeadscaleService {
|
||||
};
|
||||
}
|
||||
|
||||
rpc DeletePreAuthKey(DeletePreAuthKeyRequest)
|
||||
returns (DeletePreAuthKeyResponse) {
|
||||
option (google.api.http) = {
|
||||
delete : "/api/v1/preauthkey"
|
||||
};
|
||||
}
|
||||
|
||||
rpc ListPreAuthKeys(ListPreAuthKeysRequest)
|
||||
returns (ListPreAuthKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
|
||||
@@ -82,7 +82,10 @@ message DeleteNodeRequest { uint64 node_id = 1; }
|
||||
|
||||
message DeleteNodeResponse {}
|
||||
|
||||
message ExpireNodeRequest { uint64 node_id = 1; }
|
||||
message ExpireNodeRequest {
|
||||
uint64 node_id = 1;
|
||||
google.protobuf.Timestamp expiry = 2;
|
||||
}
|
||||
|
||||
message ExpireNodeResponse { Node node = 1; }
|
||||
|
||||
|
||||
@@ -34,6 +34,13 @@ message ExpirePreAuthKeyRequest {
|
||||
|
||||
message ExpirePreAuthKeyResponse {}
|
||||
|
||||
message DeletePreAuthKeyRequest {
|
||||
uint64 user = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message DeletePreAuthKeyResponse {}
|
||||
|
||||
message ListPreAuthKeysRequest { uint64 user = 1; }
|
||||
|
||||
message ListPreAuthKeysResponse { repeated PreAuthKey pre_auth_keys = 1; }
|
||||
|
||||
Reference in New Issue
Block a user