Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
3ba53497c3 build(deps): bump github.com/go-viper/mapstructure/v2
Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.2.1 to 2.4.0.
- [Release notes](https://github.com/go-viper/mapstructure/releases)
- [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md)
- [Commits](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.4.0)

---
updated-dependencies:
- dependency-name: github.com/go-viper/mapstructure/v2
  dependency-version: 2.4.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-12 14:57:41 +00:00
120 changed files with 4297 additions and 17260 deletions

View File

@@ -94,8 +94,6 @@ jobs:
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Run go cross compile
env:
CGO_ENABLED: 0
run:
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
./cmd/headscale

View File

@@ -62,7 +62,6 @@ jobs:
'**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Run Integration Test
if: always() && steps.changed-files.outputs.files == 'true'
run:
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
--timeout=120m \

View File

@@ -23,12 +23,6 @@ jobs:
- TestPolicyUpdateWhileRunningWithCLIInDatabase
- TestACLAutogroupMember
- TestACLAutogroupTagged
- TestACLAutogroupSelf
- TestACLPolicyPropagationOverTime
- TestAPIAuthenticationBypass
- TestAPIAuthenticationBypassCurl
- TestGRPCAuthenticationBypass
- TestCLIWithConfigAuthenticationBypass
- TestAuthKeyLogoutAndReloginSameUser
- TestAuthKeyLogoutAndReloginNewUser
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
@@ -37,13 +31,8 @@ jobs:
- TestOIDC024UserCreation
- TestOIDCAuthenticationWithPKCE
- TestOIDCReloginSameNodeNewUser
- TestOIDCFollowUpUrl
- TestOIDCMultipleOpenedLoginUrls
- TestOIDCReloginSameNodeSameUser
- TestOIDCExpiryAfterRestart
- TestAuthWebFlowAuthenticationPingAll
- TestAuthWebFlowLogoutAndReloginSameUser
- TestAuthWebFlowLogoutAndReloginNewUser
- TestAuthWebFlowLogoutAndRelogin
- TestUserCommand
- TestPreAuthKeyCommand
- TestPreAuthKeyCommandWithoutExpiry
@@ -72,7 +61,6 @@ jobs:
- TestTaildrop
- TestUpdateHostnameFromClient
- TestExpireNode
- TestSetNodeExpiryInFuture
- TestNodeOnlineStatus
- TestPingAllByIPManyUpDown
- Test2118DeletingOnlineNodePanics
@@ -91,7 +79,6 @@ jobs:
- TestSSHNoSSHConfigured
- TestSSHIsBlockedInACL
- TestSSHUserOnlyIsolation
- TestSSHAutogroupSelf
uses: ./.github/workflows/integration-test-template.yml
with:
test: ${{ matrix.test }}

View File

@@ -2,39 +2,12 @@
version: 2
before:
hooks:
- go mod tidy -compat=1.25
- go mod tidy -compat=1.24
- go mod vendor
release:
prerelease: auto
draft: true
header: |
## Upgrade
Please follow the steps outlined in the [upgrade guide](https://headscale.net/stable/setup/upgrade/) to update your existing Headscale installation.
**It's best to update from one stable version to the next** (e.g., 0.24.0 → 0.25.1 → 0.26.1) in case you are multiple releases behind. You should always pick the latest available patch release.
Be sure to check the changelog above for version-specific upgrade instructions and breaking changes.
### Backup Your Database
**Always backup your database before upgrading.** Here's how to backup a SQLite database:
```bash
# Stop headscale
systemctl stop headscale
# Backup sqlite database
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
# Backup sqlite WAL/SHM files (if they exist)
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
# Start headscale (migration will run automatically)
systemctl start headscale
```
builds:
- id: headscale
@@ -145,8 +118,6 @@ kos:
- "{{ .Tag }}"
- '{{ trimprefix .Tag "v" }}'
- "sha-{{ .ShortCommit }}"
creation_time: "{{.CommitTimestamp}}"
ko_data_creation_time: "{{.CommitTimestamp}}"
- id: ghcr-debug
repositories:

View File

@@ -2,51 +2,15 @@
## Next
### Changes
## 0.27.1 (2025-11-11)
**Minimum supported Tailscale client version: v1.64.0**
### Changes
- Expire nodes with a custom timestamp
[#2828](https://github.com/juanfont/headscale/pull/2828)
- Fix issue where node expiry was reset when tailscaled restarts
[#2875](https://github.com/juanfont/headscale/pull/2875)
- Fix OIDC authentication when multiple login URLs are opened
[#2861](https://github.com/juanfont/headscale/pull/2861)
- Fix node re-registration failing with expired auth keys
[#2859](https://github.com/juanfont/headscale/pull/2859)
- Remove old unused database tables and indices
[#2844](https://github.com/juanfont/headscale/pull/2844)
[#2872](https://github.com/juanfont/headscale/pull/2872)
- Ignore litestream tables during database validation
[#2843](https://github.com/juanfont/headscale/pull/2843)
- Fix exit node visibility to respect ACL rules
[#2855](https://github.com/juanfont/headscale/pull/2855)
- Fix SSH policy becoming empty when unknown user is referenced
[#2874](https://github.com/juanfont/headscale/pull/2874)
- Fix policy validation when using bypass-grpc mode
[#2854](https://github.com/juanfont/headscale/pull/2854)
- Fix autogroup:self interaction with other ACL rules
[#2842](https://github.com/juanfont/headscale/pull/2842)
- Fix flaky DERP map shuffle test
[#2848](https://github.com/juanfont/headscale/pull/2848)
- Use current stable base images for Debian and Alpine containers
[#2827](https://github.com/juanfont/headscale/pull/2827)
## 0.27.0 (2025-10-27)
**Minimum supported Tailscale client version: v1.64.0**
### Database integrity improvements
This release includes a significant database migration that addresses
longstanding issues with the database schema and data integrity that has
accumulated over the years. The migration introduces a `schema.sql` file as the
source of truth for the expected database schema to ensure new migrations that
will cause divergence does not occur again.
This release includes a significant database migration that addresses longstanding
issues with the database schema and data integrity that has accumulated over the
years. The migration introduces a `schema.sql` file as the source of truth for
the expected database schema to ensure new migrations that will cause divergence
does not occur again.
These issues arose from a combination of factors discovered over time: SQLite
foreign keys not being enforced for many early versions, all migrations being
@@ -58,9 +22,8 @@ enforced throughout the migration process.
We are only improving SQLite databases with this change - PostgreSQL databases
are not affected.
Please read the
[PR description](https://github.com/juanfont/headscale/pull/2617) for more
technical details about the issues and solutions.
Please read the [PR description](https://github.com/juanfont/headscale/pull/2617)
for more technical details about the issues and solutions.
**SQLite Database Backup Example:**
@@ -82,35 +45,9 @@ systemctl start headscale
### DERPMap update frequency
The default DERPMap update frequency has been changed from 24 hours to 3 hours.
If you set the `derp.update_frequency` configuration option, it is recommended
to change it to `3h` to ensure that the headscale instance gets the latest
DERPMap updates when upstream is changed.
### Autogroups
This release adds support for the three missing autogroups: `self`
(experimental), `member`, and `tagged`. Please refer to the
[documentation](https://tailscale.com/kb/1018/autogroups/) for a detailed
explanation.
`autogroup:self` is marked as experimental and should be used with caution, but
we need help testing it. Experimental here means two things; first, generating
the packet filter from policies that use `autogroup:self` is very expensive, and
it might perform, or straight up not work on Headscale installations with a
large number of nodes. Second, the implementation might have bugs or edge cases
we are not aware of, meaning that nodes or users might gain _more_ access than
expected. Please report bugs.
### Node store (in memory database)
Under the hood, we have added a new datastructure to store nodes in memory. This
datastructure is called `NodeStore` and aims to reduce the reading and writing
of nodes to the database layer. We have not benchmarked it, but expect it to
improve performance for read heavy workloads. We think of it as, "worst case" we
have moved the bottle neck somewhere else, and "best case" we should see a good
improvement in compute resource usage at the expense of memory usage. We are
quite excited for this change and think it will make it easier for us to improve
the code base over time and make it more correct and efficient.
If you set the `derp.update_frequency` configuration option, it is recommended to change
it to `3h` to ensure that the headscale instance gets the latest DERPMap updates when
upstream is changed.
### BREAKING
@@ -118,21 +55,6 @@ the code base over time and make it more correct and efficient.
[#2692](https://github.com/juanfont/headscale/pull/2692)
- Policy: Zero or empty destination port is no longer allowed
[#2606](https://github.com/juanfont/headscale/pull/2606)
- Stricter hostname validation
[#2383](https://github.com/juanfont/headscale/pull/2383)
- Hostnames must be valid DNS labels (2-63 characters, alphanumeric and
hyphens only, cannot start/end with hyphen)
- **Client Registration (New Nodes)**: Invalid hostnames are automatically
renamed to `invalid-XXXXXX` format
- `my-laptop` → accepted as-is
- `My-Laptop``my-laptop` (lowercased)
- `my_laptop``invalid-a1b2c3` (underscore not allowed)
- `test@host``invalid-d4e5f6` (@ not allowed)
- `laptop-🚀``invalid-j1k2l3` (emoji not allowed)
- **Hostinfo Updates / CLI**: Invalid hostnames are rejected with an error
- Valid names are accepted or lowercased
- Names with invalid characters, too short (<2), too long (>63), or
starting/ending with hyphen are rejected
### Changes
@@ -145,8 +67,8 @@ the code base over time and make it more correct and efficient.
[#2765](https://github.com/juanfont/headscale/pull/2765)
- DERPmap update frequency default changed from 24h to 3h
[#2741](https://github.com/juanfont/headscale/pull/2741)
- DERPmap update mechanism has been improved with retry, and is now failing
conservatively, preserving the old map upon failure.
- DERPmap update mechanism has been improved with retry,
and is now failing conservatively, preserving the old map upon failure.
[#2741](https://github.com/juanfont/headscale/pull/2741)
- Add support for `autogroup:member`, `autogroup:tagged`
[#2572](https://github.com/juanfont/headscale/pull/2572)
@@ -155,6 +77,8 @@ the code base over time and make it more correct and efficient.
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
[#2614](https://github.com/juanfont/headscale/pull/2614)
- Support client verify for DERP
[#2046](https://github.com/juanfont/headscale/pull/2046)
- Remove redundant check regarding `noise` config
[#2658](https://github.com/juanfont/headscale/pull/2658)
- Refactor OpenID Connect documentation
@@ -166,16 +90,9 @@ the code base over time and make it more correct and efficient.
- OIDC: Use group claim from UserInfo
[#2663](https://github.com/juanfont/headscale/pull/2663)
- OIDC: Update user with claims from UserInfo _before_ comparing with allowed
groups, email and domain
[#2663](https://github.com/juanfont/headscale/pull/2663)
- Policy will now reject invalid fields, making it easier to spot spelling
errors [#2764](https://github.com/juanfont/headscale/pull/2764)
- Add FAQ entry on how to recover from an invalid policy in the database
[#2776](https://github.com/juanfont/headscale/pull/2776)
- EXPERIMENTAL: Add support for `autogroup:self`
[#2789](https://github.com/juanfont/headscale/pull/2789)
- Add healthcheck command
[#2659](https://github.com/juanfont/headscale/pull/2659)
groups, email and domain [#2663](https://github.com/juanfont/headscale/pull/2663)
- Policy will now reject invalid fields, making it easier to spot spelling errors
[#2764](https://github.com/juanfont/headscale/pull/2764)
## 0.26.1 (2025-06-06)
@@ -242,7 +159,7 @@ new policy code passes all of our tests.
- Error messages should be more descriptive and informative.
- There is still work to be here, but it is already improved with "typing"
(e.g. only Users can be put in Groups)
- All users in the policy must contain an `@` character.
- All users must contain an `@` character.
- If your user naturally contains and `@`, like an email, this will just work.
- If its based on usernames, or other identifiers not containing an `@`, an
`@` should be appended at the end. For example, if your user is `john`, it

View File

@@ -528,4 +528,3 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
- **Integration Tests**: Require Docker and can consume significant disk space - use headscale-integration-tester agent
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
- **Quality Assurance**: Always use appropriate specialized agents for testing and validation tasks
- **NEVER create gists in the user's name**: Do not use the `create_gist` tool - present information directly in the response instead

View File

@@ -12,7 +12,7 @@ WORKDIR /go/src/tailscale
ARG TARGETARCH
RUN GOARCH=$TARGETARCH go install -v ./cmd/derper
FROM alpine:3.22
FROM alpine:3.18
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
COPY --from=build-env /go/bin/* /usr/local/bin/

View File

@@ -2,12 +2,13 @@
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.25-trixie
FROM docker.io/golang:1.24-bookworm
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
RUN apt-get --update install --no-install-recommends --yes less jq sqlite3 dnsutils \
RUN apt-get update \
&& apt-get install --no-install-recommends --yes less jq sqlite3 dnsutils \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
RUN mkdir -p /var/run/headscale

View File

@@ -36,7 +36,7 @@ RUN GOARCH=$TARGETARCH go install -tags="${BUILD_TAGS}" -ldflags="\
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
FROM alpine:3.22
FROM alpine:3.18
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
COPY --from=build-env /go/bin/* /usr/local/bin/

View File

@@ -1,29 +0,0 @@
package cli
import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(healthCmd)
}
var healthCmd = &cobra.Command{
Use: "health",
Short: "Check the health of the Headscale server",
Long: "Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.Health(ctx, &v1.HealthRequest{})
if err != nil {
ErrorOutput(err, "Error checking health", output)
}
SuccessOutput(response, "", output)
},
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/samber/lo"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"tailscale.com/types/key"
)
@@ -52,7 +51,6 @@ func init() {
nodeCmd.AddCommand(registerNodeCmd)
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
err = expireNodeCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatal(err.Error())
@@ -291,37 +289,12 @@ var expireNodeCmd = &cobra.Command{
)
}
expiry, err := cmd.Flags().GetString("expiry")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting expiry to string: %s", err),
output,
)
return
}
expiryTime := time.Now()
if expiry != "" {
expiryTime, err = time.Parse(time.RFC3339, expiry)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting expiry to string: %s", err),
output,
)
return
}
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ExpireNodeRequest{
NodeId: identifier,
Expiry: timestamppb.New(expiryTime),
}
response, err := client.ExpireNode(ctx, request)

View File

@@ -127,6 +127,12 @@ var setPolicy = &cobra.Command{
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
}
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
return
}
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
confirm := false
force, _ := cmd.Flags().GetBool("force")
@@ -153,17 +159,6 @@ var setPolicy = &cobra.Command{
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
}
users, err := d.ListUsers()
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to load users for policy validation: %s", err), output)
}
_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
return
}
_, err = d.SetPolicy(string(policyBytes))
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)

View File

@@ -5,7 +5,6 @@ import (
"os"
"runtime"
"slices"
"strings"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog"
@@ -76,9 +75,8 @@ func initConfig() {
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
!versionInfo.Dirty {
githubTag := &latest.GithubTag{
Owner: "juanfont",
Repository: "headscale",
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
Owner: "juanfont",
Repository: "headscale",
}
res, err := latest.Check(githubTag, versionInfo.Version)
if err == nil && res.Outdated {
@@ -93,43 +91,6 @@ func initConfig() {
}
}
var prereleases = []string{"alpha", "beta", "rc", "dev"}
func isPreReleaseVersion(version string) bool {
for _, unstable := range prereleases {
if strings.Contains(version, unstable) {
return true
}
}
return false
}
// filterPreReleasesIfStable returns a function that filters out
// pre-release tags if the current version is stable.
// If the current version is a pre-release, it does not filter anything.
// versionFunc is a function that returns the current version string, it is
// a func for testability.
func filterPreReleasesIfStable(versionFunc func() string) func(string) bool {
return func(tag string) bool {
version := versionFunc()
// If we are on a pre-release version, then we do not filter anything
// as we want to recommend the user the latest pre-release.
if isPreReleaseVersion(version) {
return false
}
// If we are on a stable release, filter out pre-releases.
for _, ignore := range prereleases {
if strings.Contains(tag, ignore) {
return true
}
}
return false
}
}
var rootCmd = &cobra.Command{
Use: "headscale",
Short: "headscale - a Tailscale control server",

View File

@@ -1,293 +0,0 @@
package cli
import (
"testing"
)
func TestFilterPreReleasesIfStable(t *testing.T) {
tests := []struct {
name string
currentVersion string
tag string
expectedFilter bool
description string
}{
{
name: "stable version filters alpha tag",
currentVersion: "0.23.0",
tag: "v0.24.0-alpha.1",
expectedFilter: true,
description: "When on stable release, alpha tags should be filtered",
},
{
name: "stable version filters beta tag",
currentVersion: "0.23.0",
tag: "v0.24.0-beta.2",
expectedFilter: true,
description: "When on stable release, beta tags should be filtered",
},
{
name: "stable version filters rc tag",
currentVersion: "0.23.0",
tag: "v0.24.0-rc.1",
expectedFilter: true,
description: "When on stable release, rc tags should be filtered",
},
{
name: "stable version allows stable tag",
currentVersion: "0.23.0",
tag: "v0.24.0",
expectedFilter: false,
description: "When on stable release, stable tags should not be filtered",
},
{
name: "alpha version allows alpha tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-alpha.2",
expectedFilter: false,
description: "When on alpha release, alpha tags should not be filtered",
},
{
name: "alpha version allows beta tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-beta.1",
expectedFilter: false,
description: "When on alpha release, beta tags should not be filtered",
},
{
name: "alpha version allows rc tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-rc.1",
expectedFilter: false,
description: "When on alpha release, rc tags should not be filtered",
},
{
name: "alpha version allows stable tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on alpha release, stable tags should not be filtered",
},
{
name: "beta version allows alpha tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "When on beta release, alpha tags should not be filtered",
},
{
name: "beta version allows beta tag",
currentVersion: "0.23.0-beta.2",
tag: "v0.24.0-beta.3",
expectedFilter: false,
description: "When on beta release, beta tags should not be filtered",
},
{
name: "beta version allows rc tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0-rc.1",
expectedFilter: false,
description: "When on beta release, rc tags should not be filtered",
},
{
name: "beta version allows stable tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on beta release, stable tags should not be filtered",
},
{
name: "rc version allows alpha tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "When on rc release, alpha tags should not be filtered",
},
{
name: "rc version allows beta tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0-beta.1",
expectedFilter: false,
description: "When on rc release, beta tags should not be filtered",
},
{
name: "rc version allows rc tag",
currentVersion: "0.23.0-rc.2",
tag: "v0.24.0-rc.3",
expectedFilter: false,
description: "When on rc release, rc tags should not be filtered",
},
{
name: "rc version allows stable tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on rc release, stable tags should not be filtered",
},
{
name: "stable version with patch filters alpha",
currentVersion: "0.23.1",
tag: "v0.24.0-alpha.1",
expectedFilter: true,
description: "Stable version with patch number should filter alpha tags",
},
{
name: "stable version with patch allows stable",
currentVersion: "0.23.1",
tag: "v0.24.0",
expectedFilter: false,
description: "Stable version with patch number should allow stable tags",
},
{
name: "tag with alpha substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-alpha.1",
expectedFilter: true,
description: "Tags with alpha in version string should be filtered on stable",
},
{
name: "tag with beta substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-beta.1",
expectedFilter: true,
description: "Tags with beta in version string should be filtered on stable",
},
{
name: "tag with rc substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-rc.1",
expectedFilter: true,
description: "Tags with rc in version string should be filtered on stable",
},
{
name: "empty tag on stable version",
currentVersion: "0.23.0",
tag: "",
expectedFilter: false,
description: "Empty tags should not be filtered",
},
{
name: "dev version allows all tags",
currentVersion: "0.23.0-dev",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "Dev versions should not filter any tags (pre-release allows all)",
},
{
name: "stable version filters dev tag",
currentVersion: "0.23.0",
tag: "v0.24.0-dev",
expectedFilter: true,
description: "When on stable release, dev tags should be filtered",
},
{
name: "dev version allows dev tag",
currentVersion: "0.23.0-dev",
tag: "v0.24.0-dev.1",
expectedFilter: false,
description: "When on dev release, dev tags should not be filtered",
},
{
name: "dev version allows stable tag",
currentVersion: "0.23.0-dev",
tag: "v0.24.0",
expectedFilter: false,
description: "When on dev release, stable tags should not be filtered",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := filterPreReleasesIfStable(func() string { return tt.currentVersion })(tt.tag)
if result != tt.expectedFilter {
t.Errorf("%s: got %v, want %v\nDescription: %s\nCurrent version: %s, Tag: %s",
tt.name,
result,
tt.expectedFilter,
tt.description,
tt.currentVersion,
tt.tag,
)
}
})
}
}
func TestIsPreReleaseVersion(t *testing.T) {
tests := []struct {
name string
version string
expected bool
description string
}{
{
name: "stable version",
version: "0.23.0",
expected: false,
description: "Stable version should not be pre-release",
},
{
name: "alpha version",
version: "0.23.0-alpha.1",
expected: true,
description: "Alpha version should be pre-release",
},
{
name: "beta version",
version: "0.23.0-beta.1",
expected: true,
description: "Beta version should be pre-release",
},
{
name: "rc version",
version: "0.23.0-rc.1",
expected: true,
description: "RC version should be pre-release",
},
{
name: "version with alpha substring",
version: "0.23.0-alphabetical",
expected: true,
description: "Version containing 'alpha' should be pre-release",
},
{
name: "version with beta substring",
version: "0.23.0-betamax",
expected: true,
description: "Version containing 'beta' should be pre-release",
},
{
name: "dev version",
version: "0.23.0-dev",
expected: true,
description: "Dev version should be pre-release",
},
{
name: "empty version",
version: "",
expected: false,
description: "Empty version should not be pre-release",
},
{
name: "version with patch number",
version: "0.23.1",
expected: false,
description: "Stable version with patch should not be pre-release",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isPreReleaseVersion(tt.version)
if result != tt.expected {
t.Errorf("%s: got %v, want %v\nDescription: %s\nVersion: %s",
tt.name,
result,
tt.expected,
tt.description,
tt.version,
)
}
})
}
}

View File

@@ -74,7 +74,7 @@ func detectGoVersion() string {
content, err := os.ReadFile(goModPath)
if err != nil {
return "1.25"
return "1.24"
}
lines := splitLines(string(content))
@@ -89,7 +89,7 @@ func detectGoVersion() string {
}
}
return "1.25"
return "1.24"
}
// splitLines splits a string into lines without using strings.Split.

View File

@@ -81,7 +81,7 @@ func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
}
// Create file
outFile, err := os.Create(targetPath)
if err != nil {

View File

@@ -60,9 +60,7 @@ prefixes:
v6: fd7a:115c:a1e0::/48
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given
# IP. A best-effort approach is used and Headscale might leave holes in the
# IP range or fill up existing holes in the IP range.
# - sequential (default): assigns the next free IP from the previous given IP.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
@@ -393,13 +391,11 @@ unix_socket_permission: "0770"
# method: S256
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the
# control panel to instruct tailscale nodes to log their activity to a remote
# server. To disable logging on the client side, please refer to:
# https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for tailscale nodes of this Headscale instance.
# As there is currently no support for overriding the log server in Headscale, this is
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false

View File

@@ -44,15 +44,6 @@ For convenience, we also [build container images with headscale](../setup/instal
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
## What is the recommended update path? Can I skip multiple versions while updating?
Please follow the steps outlined in the [upgrade guide](../setup/upgrade.md) to update your existing Headscale
installation. Its best to update from one stable version to the next (e.g. 0.24.0 &rarr; 0.25.1 &rarr; 0.26.1) in case
you are multiple releases behind. You should always pick the latest available patch release.
Be sure to check the [changelog](https://github.com/juanfont/headscale/blob/main/CHANGELOG.md) for version specific
upgrade instructions and breaking changes.
## Scaling / How many clients does Headscale support?
It depends. As often stated, Headscale is not enterprise software and our focus
@@ -143,35 +134,3 @@ in their output of `tailscale status`. Traffic is still filtered according to th
ping` which is always allowed in either direction.
See also <https://tailscale.com/kb/1087/device-visibility>.
## My policy is stored in the database and Headscale refuses to start due to an invalid policy. How can I recover?
Headscale checks if the policy is valid during startup and refuses to start if it detects an error. The error message
indicates which part of the policy is invalid. Follow these steps to fix your policy:
- Dump the policy to a file: `headscale policy get --bypass-grpc-and-access-database-directly > policy.json`
- Edit and fixup `policy.json`. Use the command `headscale policy check --file policy.json` to validate the policy.
- Load the modified policy: `headscale policy set --bypass-grpc-and-access-database-directly --file policy.json`
- Start Headscale as usual.
!!! warning "Full server configuration required"
The above commands to get/set the policy require a complete server configuration file including database settings. A
minimal config to [control Headscale via remote CLI](../ref/remote-cli.md) is not sufficient. You may use `headscale
-c /path/to/config.yaml` to specify the path to an alternative configuration file.
## How can I avoid to send logs to Tailscale Inc?
A Tailscale client [collects logs about its operation and connection attempts with other
clients](https://tailscale.com/kb/1011/log-mesh-traffic#client-logs) and sends them to a central log service operated by
Tailscale Inc.
Headscale, by default, instructs clients to disable log submission to the central log service. This configuration is
applied by a client once it successfully connected with Headscale. See the configuration option `logtail.enabled` in the
[configuration file](../ref/configuration.md) for details.
Alternatively, logging can also be disabled on the client side. This is independent of Headscale and opting out of
client logging disables log submission early during client startup. The configuration is operating system specific and
is usually achieved by setting the environment variable `TS_NO_LOGS_NO_SUPPORT=true` or by passing the flag
`--no-logs-no-support` to `tailscaled`. See
<https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging> for details.

View File

@@ -23,7 +23,7 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
- [x] ACL management via API
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`, `autogroup:self`
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)

View File

@@ -194,94 +194,13 @@ Here are the ACL's to implement the same permissions as above:
"dst": ["tag:dev-app-servers:80,443"]
},
// Allow users to access their own devices using autogroup:self (see below for more details about performance impact)
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
// We still have to allow internal users communications since nothing guarantees that each user have
// their own users.
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
]
}
```
## Autogroups
Headscale supports several autogroups that automatically include users, destinations, or devices with specific properties. Autogroups provide a convenient way to write ACL rules without manually listing individual users or devices.
### `autogroup:internet`
Allows access to the internet through [exit nodes](routes.md#exit-node). Can only be used in ACL destinations.
```json
{
"action": "accept",
"src": ["group:users"],
"dst": ["autogroup:internet:*"]
}
```
### `autogroup:member`
Includes all users who are direct members of the tailnet. Does not include users from shared devices.
```json
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["tag:prod-app-servers:80,443"]
}
```
### `autogroup:tagged`
Includes all devices that have at least one tag.
```json
{
"action": "accept",
"src": ["autogroup:tagged"],
"dst": ["tag:monitoring:9090"]
}
```
### `autogroup:self`
**(EXPERIMENTAL)**
!!! warning "The current implementation of `autogroup:self` is inefficient"
Includes devices where the same user is authenticated on both the source and destination. Does not include tagged devices. Can only be used in ACL destinations.
```json
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
```
*Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.*
If you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.
```json
{
// The following rules allow internal users to communicate with their
// own nodes in case autogroup:self is causing performance issues.
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
}
```
### `autogroup:nonroot`
Used in Tailscale SSH rules to allow access to any user except root. Can only be used in the `users` field of SSH rules.
```json
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self"],
"users": ["autogroup:nonroot"]
}
```

View File

@@ -1,7 +1,7 @@
# DNS
Headscale supports [most DNS features](../about/features.md) from Tailscale. DNS related settings can be configured
within the `dns` section of the [configuration file](./configuration.md).
within `dns` section of the [configuration file](./configuration.md).
## Setting extra DNS records

View File

@@ -67,6 +67,12 @@ headscale apikeys expire --prefix "<PREFIX>"
export HEADSCALE_CLI_API_KEY="<API_KEY_FROM_PREVIOUS_STEP>"
```
!!! bug
Headscale currently requires at least an empty configuration file when environment variables are used to
specify connection details. See [issue 2193](https://github.com/juanfont/headscale/issues/2193) for more
information.
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
connecting to the local instance.

View File

@@ -216,39 +216,6 @@ nodes.
}
```
### Restrict access to exit nodes per user or group
A user can use _any_ of the available exit nodes with `autogroup:internet`. Alternatively, the ACL snippet below assigns
each user a specific exit node while hiding all other exit nodes. The user `alice` can only use exit node `exit1` while
user `bob` can only use exit node `exit2`.
```json title="Assign each user a dedicated exit node"
{
"hosts": {
"exit1": "100.64.0.1/32",
"exit2": "100.64.0.2/32"
},
"acls": [
{
"action": "accept",
"src": ["alice@"],
"dst": ["exit1:*"]
},
{
"action": "accept",
"src": ["bob@"],
"dst": ["exit2:*"]
}
]
}
```
!!! warning
- The above implementation is Headscale specific and will likely be removed once [support for
`via`](https://github.com/juanfont/headscale/issues/2409) is available.
- Beware that a user can also connect to any port of the exit node itself.
### Automatically approve an exit node with auto approvers
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node

View File

@@ -39,7 +39,6 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
--volume "$(pwd)/run:/var/run/headscale" \
--publish 127.0.0.1:8080:8080 \
--publish 127.0.0.1:9090:9090 \
--health-cmd "CMD headscale health" \
docker.io/headscale/headscale:<VERSION> \
serve
```
@@ -67,8 +66,6 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
- <HEADSCALE_PATH>/lib:/var/lib/headscale
- <HEADSCALE_PATH>/run:/var/run/headscale
command: serve
healthcheck:
test: ["CMD", "headscale", "health"]
```
1. Verify headscale is running:

View File

@@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
@@ -57,14 +57,14 @@ managed by systemd.
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
```shell
sudo wget --output-document=/usr/bin/headscale \
sudo wget --output-document=/usr/local/bin/headscale \
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
```
1. Make `headscale` executable:
```shell
sudo chmod +x /usr/bin/headscale
sudo chmod +x /usr/local/bin/headscale
```
1. Add a dedicated local user to run headscale:

6
flake.lock generated
View File

@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1760533177,
"narHash": "sha256-OwM1sFustLHx+xmTymhucZuNhtq98fHIbfO8Swm5L8A=",
"lastModified": 1755829505,
"narHash": "sha256-4/Jd+LkQ2ssw8luQVkqVs9spDBVE6h/u/hC/tzngsPo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "35f590344ff791e6b1d6d6b8f3523467c9217caf",
"rev": "f937f8ecd1c70efd7e9f90ba13dfb400cf559de4",
"type": "github"
},
"original": {

View File

@@ -18,8 +18,8 @@
{
overlay = _: prev: let
pkgs = nixpkgs.legacyPackages.${prev.system};
buildGo = pkgs.buildGo125Module;
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
buildGo = pkgs.buildGo124Module;
vendorHash = "sha256-hIY6asY3rOIqf/5P6lFmnNCDWcqNPJaj+tqJuOvGJlo=";
in {
headscale = buildGo {
pname = "headscale";
@@ -97,10 +97,9 @@
# buildGoModule = buildGo;
# };
# The package uses buildGo125Module, not the convention.
# goreleaser = prev.goreleaser.override {
# buildGoModule = buildGo;
# };
goreleaser = prev.goreleaser.override {
buildGoModule = buildGo;
};
gotestsum = prev.gotestsum.override {
buildGoModule = buildGo;
@@ -125,7 +124,7 @@
overlays = [self.overlay];
inherit system;
};
buildDeps = with pkgs; [git go_1_25 gnumake];
buildDeps = with pkgs; [git go_1_24 gnumake];
devDeps = with pkgs;
buildDeps
++ [

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/apikey.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/device.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/headscale.proto
@@ -11,7 +11,6 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
@@ -22,94 +21,11 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type HealthRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HealthRequest) Reset() {
*x = HealthRequest{}
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HealthRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HealthRequest) ProtoMessage() {}
func (x *HealthRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead.
func (*HealthRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{0}
}
type HealthResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
DatabaseConnectivity bool `protobuf:"varint,1,opt,name=database_connectivity,json=databaseConnectivity,proto3" json:"database_connectivity,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HealthResponse) Reset() {
*x = HealthResponse{}
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HealthResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HealthResponse) ProtoMessage() {}
func (x *HealthResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead.
func (*HealthResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{1}
}
func (x *HealthResponse) GetDatabaseConnectivity() bool {
if x != nil {
return x.DatabaseConnectivity
}
return false
}
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
const file_headscale_v1_headscale_proto_rawDesc = "" +
"\n" +
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
"\rHealthRequest\"E\n" +
"\x0eHealthResponse\x123\n" +
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x80\x17\n" +
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
"\x10HeadscaleService\x12h\n" +
"\n" +
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
@@ -140,127 +56,109 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policy\x12[\n" +
"\x06Health\x12\x1b.headscale.v1.HealthRequest\x1a\x1c.headscale.v1.HealthResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/healthB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_headscale_proto_rawDescOnce sync.Once
file_headscale_v1_headscale_proto_rawDescData []byte
)
func file_headscale_v1_headscale_proto_rawDescGZIP() []byte {
file_headscale_v1_headscale_proto_rawDescOnce.Do(func() {
file_headscale_v1_headscale_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)))
})
return file_headscale_v1_headscale_proto_rawDescData
}
var file_headscale_v1_headscale_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_headscale_v1_headscale_proto_goTypes = []any{
(*HealthRequest)(nil), // 0: headscale.v1.HealthRequest
(*HealthResponse)(nil), // 1: headscale.v1.HealthResponse
(*CreateUserRequest)(nil), // 2: headscale.v1.CreateUserRequest
(*RenameUserRequest)(nil), // 3: headscale.v1.RenameUserRequest
(*DeleteUserRequest)(nil), // 4: headscale.v1.DeleteUserRequest
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
(*ListPreAuthKeysRequest)(nil), // 8: headscale.v1.ListPreAuthKeysRequest
(*DebugCreateNodeRequest)(nil), // 9: headscale.v1.DebugCreateNodeRequest
(*GetNodeRequest)(nil), // 10: headscale.v1.GetNodeRequest
(*SetTagsRequest)(nil), // 11: headscale.v1.SetTagsRequest
(*SetApprovedRoutesRequest)(nil), // 12: headscale.v1.SetApprovedRoutesRequest
(*RegisterNodeRequest)(nil), // 13: headscale.v1.RegisterNodeRequest
(*DeleteNodeRequest)(nil), // 14: headscale.v1.DeleteNodeRequest
(*ExpireNodeRequest)(nil), // 15: headscale.v1.ExpireNodeRequest
(*RenameNodeRequest)(nil), // 16: headscale.v1.RenameNodeRequest
(*ListNodesRequest)(nil), // 17: headscale.v1.ListNodesRequest
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
(*DebugCreateNodeResponse)(nil), // 33: headscale.v1.DebugCreateNodeResponse
(*GetNodeResponse)(nil), // 34: headscale.v1.GetNodeResponse
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
(*SetApprovedRoutesResponse)(nil), // 36: headscale.v1.SetApprovedRoutesResponse
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
}
var file_headscale_v1_headscale_proto_depIdxs = []int32{
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
3, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
4, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
8, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
9, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
10, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
11, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
12, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
13, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
14, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
15, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
16, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
17, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
18, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
32, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
33, // 32: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
34, // 33: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
35, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
36, // 35: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
37, // 36: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
38, // 37: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
39, // 38: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
40, // 39: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
41, // 40: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
42, // 41: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
25, // [25:50] is the sub-list for method output_type
0, // [0:25] is the sub-list for method input_type
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
1, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
2, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
3, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
4, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
5, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
6, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
24, // [24:48] is the sub-list for method output_type
0, // [0:24] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
@@ -282,13 +180,12 @@ func file_headscale_v1_headscale_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumMessages: 0,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_headscale_v1_headscale_proto_goTypes,
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
MessageInfos: file_headscale_v1_headscale_proto_msgTypes,
}.Build()
File_headscale_v1_headscale_proto = out.File
file_headscale_v1_headscale_proto_goTypes = nil

View File

@@ -471,8 +471,6 @@ func local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler
return msg, metadata, err
}
var filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpireNodeRequest
@@ -487,12 +485,6 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
@@ -511,12 +503,6 @@ func local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ExpireNode(ctx, &protoReq)
return msg, metadata, err
}
@@ -823,24 +809,6 @@ func local_request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler r
return msg, metadata, err
}
func request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq HealthRequest
metadata runtime.ServerMetadata
)
msg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq HealthRequest
metadata runtime.ServerMetadata
)
msg, err := server.Health(ctx, &protoReq)
return msg, metadata, err
}
// RegisterHeadscaleServiceHandlerServer registers the http handlers for service HeadscaleService to "mux".
// UnaryRPC :call HeadscaleServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -1327,26 +1295,6 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -1795,23 +1743,6 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -1840,7 +1771,6 @@ var (
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
pattern_HeadscaleService_Health_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "health"}, ""))
)
var (
@@ -1868,5 +1798,4 @@ var (
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_Health_0 = runtime.ForwardResponseMessage
)

View File

@@ -43,7 +43,6 @@ const (
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
HeadscaleService_Health_FullMethodName = "/headscale.v1.HeadscaleService/Health"
)
// HeadscaleServiceClient is the client API for HeadscaleService service.
@@ -79,8 +78,6 @@ type HeadscaleServiceClient interface {
// --- Policy start ---
GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error)
SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error)
// --- Health start ---
Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error)
}
type headscaleServiceClient struct {
@@ -331,16 +328,6 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
return out, nil
}
func (c *headscaleServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthResponse)
err := c.cc.Invoke(ctx, HeadscaleService_Health_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// HeadscaleServiceServer is the server API for HeadscaleService service.
// All implementations must embed UnimplementedHeadscaleServiceServer
// for forward compatibility.
@@ -374,8 +361,6 @@ type HeadscaleServiceServer interface {
// --- Policy start ---
GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error)
SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error)
// --- Health start ---
Health(context.Context, *HealthRequest) (*HealthResponse, error)
mustEmbedUnimplementedHeadscaleServiceServer()
}
@@ -458,9 +443,6 @@ func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicy
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
}
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
}
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
@@ -914,24 +896,6 @@ func _HeadscaleService_SetPolicy_Handler(srv interface{}, ctx context.Context, d
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HealthRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).Health(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_Health_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).Health(ctx, req.(*HealthRequest))
}
return interceptor(ctx, in, info, handler)
}
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -1035,10 +999,6 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetPolicy",
Handler: _HeadscaleService_SetPolicy_Handler,
},
{
MethodName: "Health",
Handler: _HeadscaleService_Health_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "headscale/v1/headscale.proto",

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/node.proto
@@ -729,7 +729,6 @@ func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
type ExpireNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -771,13 +770,6 @@ func (x *ExpireNodeRequest) GetNodeId() uint64 {
return 0
}
func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
if x != nil {
return x.Expiry
}
return nil
}
type ExpireNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
@@ -1357,10 +1349,9 @@ const file_headscale_v1_node_proto_rawDesc = "" +
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\",\n" +
"\x11DeleteNodeRequest\x12\x17\n" +
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"\x14\n" +
"\x12DeleteNodeResponse\"`\n" +
"\x12DeleteNodeResponse\",\n" +
"\x11ExpireNodeRequest\x12\x17\n" +
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x122\n" +
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\"<\n" +
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"<\n" +
"\x12ExpireNodeResponse\x12&\n" +
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"G\n" +
"\x11RenameNodeRequest\x12\x17\n" +
@@ -1448,17 +1439,16 @@ var file_headscale_v1_node_proto_depIdxs = []int32{
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
1, // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node
1, // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node
25, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
1, // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
1, // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
1, // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
1, // 14: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
1, // 15: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
1, // 10: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
1, // 11: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
1, // 12: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
1, // 13: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
1, // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
15, // [15:15] is the sub-list for method output_type
15, // [15:15] is the sub-list for method input_type
15, // [15:15] is the sub-list for extension type_name
15, // [15:15] is the sub-list for extension extendee
0, // [0:15] is the sub-list for field type_name
}
func init() { file_headscale_v1_node_proto_init() }

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/policy.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/preauthkey.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc-gen-go v1.36.8
// protoc (unknown)
// source: headscale/v1/user.proto

View File

@@ -164,29 +164,6 @@
]
}
},
"/api/v1/health": {
"get": {
"summary": "--- Health start ---",
"operationId": "HeadscaleService_Health",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1HealthResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/node": {
"get": {
"operationId": "HeadscaleService_ListNodes",
@@ -406,13 +383,6 @@
"required": true,
"type": "string",
"format": "uint64"
},
{
"name": "expiry",
"in": "query",
"required": false,
"type": "string",
"format": "date-time"
}
],
"tags": [
@@ -1086,14 +1056,6 @@
}
}
},
"v1HealthResponse": {
"type": "object",
"properties": {
"databaseConnectivity": {
"type": "boolean"
}
}
},
"v1ListApiKeysResponse": {
"type": "object",
"properties": {

136
go.mod
View File

@@ -1,59 +1,61 @@
module github.com/juanfont/headscale
go 1.25
go 1.24.4
toolchain go1.24.6
require (
github.com/arl/statsviz v0.7.2
github.com/cenkalti/backoff/v5 v5.0.3
github.com/chasefleming/elem-go v0.31.0
github.com/coder/websocket v1.8.14
github.com/coreos/go-oidc/v3 v3.16.0
github.com/creachadair/command v0.2.0
github.com/arl/statsviz v0.6.0
github.com/cenkalti/backoff/v5 v5.0.2
github.com/chasefleming/elem-go v0.30.0
github.com/coder/websocket v1.8.13
github.com/coreos/go-oidc/v3 v3.14.1
github.com/creachadair/command v0.1.22
github.com/creachadair/flax v0.0.5
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/docker/docker v28.5.1+incompatible
github.com/docker/docker v28.2.2+incompatible
github.com/fsnotify/fsnotify v1.9.0
github.com/glebarez/sqlite v1.11.0
github.com/go-gormigrate/gormigrate/v2 v2.1.5
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
github.com/go-gormigrate/gormigrate/v2 v2.1.4
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874
github.com/gofrs/uuid/v5 v5.3.2
github.com/google/go-cmp v0.7.0
github.com/gorilla/mux v1.8.1
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
github.com/jagottsicher/termcolor v1.0.2
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
github.com/ory/dockertest/v3 v3.12.0
github.com/philip-bui/grpc-zerolog v1.0.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.66.1
github.com/pterm/pterm v0.12.82
github.com/puzpuzpuz/xsync/v4 v4.2.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.65.0
github.com/pterm/pterm v0.12.81
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/rs/zerolog v1.34.0
github.com/samber/lo v1.52.0
github.com/sasha-s/go-deadlock v0.3.6
github.com/spf13/cobra v1.10.1
github.com/spf13/viper v1.21.0
github.com/stretchr/testify v1.11.1
github.com/samber/lo v1.51.0
github.com/sasha-s/go-deadlock v0.3.5
github.com/spf13/cobra v1.9.1
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
golang.org/x/crypto v0.43.0
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
golang.org/x/net v0.46.0
golang.org/x/oauth2 v0.32.0
golang.org/x/sync v0.17.0
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
google.golang.org/grpc v1.75.1
google.golang.org/protobuf v1.36.10
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.16.0
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.6.0
gorm.io/gorm v1.31.0
gorm.io/gorm v1.30.0
tailscale.com v1.86.5
zgo.at/zcache/v2 v2.4.1
zgo.at/zcache/v2 v2.2.0
zombiezen.com/go/postgrestest v1.0.1
)
@@ -75,17 +77,17 @@ require (
// together, e.g:
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
require (
modernc.org/libc v1.66.10 // indirect
modernc.org/libc v1.62.1 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.39.1
modernc.org/memory v1.10.0 // indirect
modernc.org/sqlite v1.37.0
)
require (
atomicgo.dev/cursor v0.2.0 // indirect
atomicgo.dev/keyboard v0.2.9 // indirect
atomicgo.dev/schedule v0.1.0 // indirect
dario.cat/mergo v1.0.2 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -109,18 +111,17 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
github.com/containerd/console v1.0.5 // indirect
github.com/containerd/continuity v0.4.5 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
github.com/creachadair/mds v0.25.10 // indirect
github.com/creachadair/mds v0.24.3 // indirect
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v28.5.1+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/cli v28.1.1+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.5 // indirect
@@ -129,12 +130,13 @@ require (
github.com/gaissmai/bart v0.18.0 // indirect
github.com/glebarez/go-sqlite v1.22.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -142,10 +144,10 @@ require (
github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gookit/color v1.6.0 // indirect
github.com/gookit/color v1.5.4 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
@@ -153,20 +155,20 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.6 // indirect
github.com/jackc/pgx/v5 v5.7.4 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mdlayher/genetlink v1.3.2 // indirect
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
github.com/mdlayher/sdnotify v1.0.0 // indirect
@@ -179,25 +181,27 @@ require (
github.com/moby/term v0.5.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/go-strftime v1.0.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/runc v1.3.2 // indirect
github.com/opencontainers/runc v1.3.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus-community/pro-bing v0.4.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/sagikazarmark/locafero v0.9.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.8.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
@@ -207,7 +211,7 @@ require (
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
github.com/vishvananda/netns v0.0.5 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
@@ -215,22 +219,22 @@ require (
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/mod v0.26.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.35.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
)

310
go.sum
View File

@@ -8,8 +8,8 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
@@ -37,8 +37,8 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/arl/statsviz v0.7.2 h1:xnuIfRiXE4kvxEcfGL+IE3mKH1BXNHuE+eJELIh7oOA=
github.com/arl/statsviz v0.7.2/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
@@ -82,12 +82,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chasefleming/elem-go v0.31.0 h1:vZsuKmKdv6idnUbu3awMruxTiFqZ/ertFJFAyBCkVhI=
github.com/chasefleming/elem-go v0.31.0/go.mod h1:UBmmZfso2LkXA0HZInbcwsmhE/LXFClEcBPNCGeARtA=
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
github.com/chasefleming/elem-go v0.30.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f6vg71RUilJAA4=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
@@ -99,10 +99,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
@@ -116,18 +114,16 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
github.com/creachadair/mds v0.24.3 h1:X7cM2ymZSyl4IVWnfyXLxRXMJ6awhbcWvtLPhfnTaqI=
github.com/creachadair/mds v0.24.3/go.mod h1:0oeHt9QWu8VfnmskOL4zi2CumjEvB29ScmtOmdrhFeU=
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -145,12 +141,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
@@ -174,17 +170,17 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
github.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8=
github.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M=
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
@@ -203,6 +199,8 @@ github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@@ -225,24 +223,22 @@ github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdF
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0=
github.com/gookit/assert v0.1.1/go.mod h1:jS5bmIVQZTIwk42uXl4lyj4iaaxx32tqH16CFj0VX2E=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=
github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
@@ -259,8 +255,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
@@ -278,10 +274,10 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
@@ -316,8 +312,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
@@ -344,8 +340,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk=
@@ -354,16 +350,16 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runc v1.3.2 h1:GUwgo0Fx9M/pl2utaSYlJfdBcXAB/CZXDxe322lvJ3Y=
github.com/opencontainers/runc v1.3.2/go.mod h1:F7UQQEsxcjUNnFpT1qPLHZBKYP7yWwk6hq8suLy9cl0=
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
@@ -380,14 +376,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
@@ -395,13 +391,15 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
@@ -411,28 +409,29 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw=
github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo=
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
@@ -443,8 +442,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
@@ -465,8 +464,6 @@ github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
@@ -488,8 +485,8 @@ github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -502,70 +499,79 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -586,8 +592,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -595,40 +601,42 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -644,8 +652,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
@@ -654,28 +662,26 @@ honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4=
modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
@@ -684,7 +690,7 @@ software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
tailscale.com v1.86.5 h1:yBtWFjuLYDmxVnfnvPbZNZcKADCYgNfMd0rUAOA9XCs=
tailscale.com v1.86.5/go.mod h1:Lm8dnzU2i/Emw15r6sl3FRNp/liSQ/nYw6ZSQvIdZ1M=
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=

View File

@@ -380,45 +380,53 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
writer http.ResponseWriter,
req *http.Request,
) {
log.Trace().
Caller().
Str("client_address", req.RemoteAddr).
Msg("HTTP authentication invoked")
if err := func() error {
log.Trace().
Caller().
Str("client_address", req.RemoteAddr).
Msg("HTTP authentication invoked")
authHeader := req.Header.Get("Authorization")
authHeader := req.Header.Get("Authorization")
writeUnauthorized := func(statusCode int) {
writer.WriteHeader(statusCode)
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
log.Error().Err(err).Msg("writing HTTP response failed")
if !strings.HasPrefix(authHeader, AuthPrefix) {
log.Error().
Caller().
Str("client_address", req.RemoteAddr).
Msg(`missing "Bearer " prefix in "Authorization" header`)
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
return err
}
}
if !strings.HasPrefix(authHeader, AuthPrefix) {
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
if err != nil {
log.Error().
Caller().
Err(err).
Str("client_address", req.RemoteAddr).
Msg("failed to validate token")
writer.WriteHeader(http.StatusInternalServerError)
_, err := writer.Write([]byte("Unauthorized"))
return err
}
if !valid {
log.Info().
Str("client_address", req.RemoteAddr).
Msg("invalid token")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
return err
}
return nil
}(); err != nil {
log.Error().
Caller().
Str("client_address", req.RemoteAddr).
Msg(`missing "Bearer " prefix in "Authorization" header`)
writeUnauthorized(http.StatusUnauthorized)
return
}
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
if err != nil {
log.Info().
Caller().
Err(err).
Str("client_address", req.RemoteAddr).
Msg("failed to validate token")
writeUnauthorized(http.StatusUnauthorized)
return
}
if !valid {
log.Info().
Str("client_address", req.RemoteAddr).
Msg("invalid token")
writeUnauthorized(http.StatusUnauthorized)
Msg("Failed to write HTTP response")
return
}
@@ -446,7 +454,6 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet)
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/version", h.VersionHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
Methods(http.MethodGet)

View File

@@ -1,7 +1,6 @@
package hscontrol
import (
"cmp"
"context"
"errors"
"fmt"
@@ -12,7 +11,6 @@ import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/tailcfg"
@@ -27,91 +25,26 @@ type AuthProvider interface {
func (h *Headscale) handleRegister(
ctx context.Context,
req tailcfg.RegisterRequest,
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
// Check for logout/expiry FIRST, before checking auth key.
// Tailscale clients may send logout requests with BOTH a past expiry AND an auth key.
// A past expiry takes precedence - it's a logout regardless of other fields.
if !req.Expiry.IsZero() && req.Expiry.Before(time.Now()) {
log.Debug().
Str("node.key", req.NodeKey.ShortString()).
Time("expiry", req.Expiry).
Bool("has_auth", req.Auth != nil).
Msg("Detected logout attempt with past expiry")
node, ok := h.state.GetNodeByNodeKey(regReq.NodeKey)
// This is a logout attempt (expiry in the past)
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
log.Debug().
Uint64("node.id", node.ID().Uint64()).
Str("node.name", node.Hostname()).
Bool("is_ephemeral", node.IsEphemeral()).
Bool("has_authkey", node.AuthKey().Valid()).
Msg("Found existing node for logout, calling handleLogout")
resp, err := h.handleLogout(node, req, machineKey)
if err != nil {
return nil, fmt.Errorf("handling logout: %w", err)
}
if resp != nil {
return resp, nil
}
} else {
log.Warn().
Str("node.key", req.NodeKey.ShortString()).
Msg("Logout attempt but node not found in NodeStore")
if ok {
resp, err := h.handleExistingNode(node.AsStruct(), regReq, machineKey)
if err != nil {
return nil, fmt.Errorf("handling existing node: %w", err)
}
return resp, nil
}
// If the register request does not contain a Auth struct, it means we are logging
// out an existing node (legacy logout path for clients that send Auth=nil).
if req.Auth == nil {
// If the register request present a NodeKey that is currently in use, we will
// check if the node needs to be sent to re-auth, or if the node is logging out.
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
// nodes, separated by users and this path is handling expiring/logout paths.
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
// When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero.
// Return the current node state without modification.
// See: https://github.com/juanfont/headscale/issues/2862
if req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() {
return nodeToRegisterResponse(node), nil
}
resp, err := h.handleLogout(node, req, machineKey)
if err != nil {
return nil, fmt.Errorf("handling existing node: %w", err)
}
// If resp is not nil, we have a response to return to the node.
// If resp is nil, we should proceed and see if the node is trying to re-auth.
if resp != nil {
return resp, nil
}
} else {
// If the register request is not attempting to register a node, and
// we cannot match it with an existing node, we consider that unexpected
// as only register nodes should attempt to log out.
log.Debug().
Str("node.key", req.NodeKey.ShortString()).
Str("machine.key", machineKey.ShortString()).
Bool("unexpected", true).
Msg("received register request with no auth, and no existing node")
}
if regReq.Followup != "" {
return h.waitForFollowup(ctx, regReq)
}
// If the [tailcfg.RegisterRequest] has a Followup URL, it means that the
// node has already started the registration process and we should wait for
// it to finish the original registration.
if req.Followup != "" {
return h.waitForFollowup(ctx, req, machineKey)
}
// Pre authenticated keys are handled slightly different than interactive
// logins as they can be done fully sync and we can respond to the node with
// the result as it is waiting.
if isAuthKey(req) {
resp, err := h.handleRegisterWithAuthKey(req, machineKey)
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
if err != nil {
// Preserve HTTPError types so they can be handled properly by the HTTP layer
var httpErr HTTPError
@@ -125,7 +58,7 @@ func (h *Headscale) handleRegister(
return resp, nil
}
resp, err := h.handleRegisterInteractive(req, machineKey)
resp, err := h.handleRegisterInteractive(regReq, machineKey)
if err != nil {
return nil, fmt.Errorf("handling register interactive: %w", err)
}
@@ -133,34 +66,20 @@ func (h *Headscale) handleRegister(
return resp, nil
}
// handleLogout checks if the [tailcfg.RegisterRequest] is a
// logout attempt from a node. If the node is not attempting to
func (h *Headscale) handleLogout(
node types.NodeView,
req tailcfg.RegisterRequest,
func (h *Headscale) handleExistingNode(
node *types.Node,
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
// Fail closed if it looks like this is an attempt to modify a node where
// the node key and the machine key the noise session was started with does
// not align.
if node.MachineKey() != machineKey {
if node.MachineKey != machineKey {
return nil, NewHTTPError(http.StatusUnauthorized, "node exist with different machine key", nil)
}
// Note: We do NOT return early if req.Auth is set, because Tailscale clients
// may send logout requests with BOTH a past expiry AND an auth key.
// A past expiry indicates logout, regardless of whether Auth is present.
// The expiry check below will handle the logout logic.
expired := node.IsExpired()
// If the node is expired and this is not a re-authentication attempt,
// force the client to re-authenticate.
// TODO(kradalby): I wonder if this is a path we ever hit?
if node.IsExpired() {
log.Trace().Str("node.name", node.Hostname()).
Uint64("node.id", node.ID().Uint64()).
Interface("reg.req", req).
Bool("unexpected", true).
Msg("Node key expired, forcing re-authentication")
// force the client to re-authenticate
if expired && regReq.Auth == nil {
return &tailcfg.RegisterResponse{
NodeKeyExpired: true,
MachineAuthorized: false,
@@ -168,77 +87,49 @@ func (h *Headscale) handleLogout(
}, nil
}
// If we get here, the node is not currently expired, and not trying to
// do an auth.
// The node is likely logging out, but before we run that logic, we will validate
// that the node is not attempting to tamper/extend their expiry.
// If it is not, we will expire the node or in the case of an ephemeral node, delete it.
if !expired && !regReq.Expiry.IsZero() {
requestExpiry := regReq.Expiry
// The client is trying to extend their key, this is not allowed.
if req.Expiry.After(time.Now()) {
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
}
// If the request expiry is in the past, we consider it a logout.
// Zero expiry is handled in handleRegister() before calling this function.
if req.Expiry.Before(time.Now()) {
log.Debug().
Uint64("node.id", node.ID().Uint64()).
Str("node.name", node.Hostname()).
Bool("is_ephemeral", node.IsEphemeral()).
Bool("has_authkey", node.AuthKey().Valid()).
Time("req.expiry", req.Expiry).
Msg("Processing logout request with past expiry")
if node.IsEphemeral() {
log.Info().
Uint64("node.id", node.ID().Uint64()).
Str("node.name", node.Hostname()).
Msg("Deleting ephemeral node during logout")
c, err := h.state.DeleteNode(node)
if err != nil {
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
}
h.Change(c)
return &tailcfg.RegisterResponse{
NodeKeyExpired: true,
MachineAuthorized: false,
}, nil
// The client is trying to extend their key, this is not allowed.
if requestExpiry.After(time.Now()) {
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
}
log.Debug().
Uint64("node.id", node.ID().Uint64()).
Str("node.name", node.Hostname()).
Msg("Node is not ephemeral, setting expiry instead of deleting")
// If the request expiry is in the past, we consider it a logout.
if requestExpiry.Before(time.Now()) {
if node.IsEphemeral() {
c, err := h.state.DeleteNode(node.View())
if err != nil {
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
}
h.Change(c)
return nil, nil
}
}
updatedNode, c, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
if err != nil {
return nil, fmt.Errorf("setting node expiry: %w", err)
}
h.Change(c)
// CRITICAL: Use the updated node view for the response
// The original node object has stale expiry information
node = updatedNode.AsStruct()
}
// Update the internal state with the nodes new expiry, meaning it is
// logged out.
updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), req.Expiry)
if err != nil {
return nil, fmt.Errorf("setting node expiry: %w", err)
}
h.Change(c)
return nodeToRegisterResponse(updatedNode), nil
return nodeToRegisterResponse(node), nil
}
// isAuthKey reports if the register request is a registration request
// using an pre auth key.
func isAuthKey(req tailcfg.RegisterRequest) bool {
return req.Auth != nil && req.Auth.AuthKey != ""
}
func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
return &tailcfg.RegisterResponse{
// TODO(kradalby): Only send for user-owned nodes
// and not tagged nodes when tags is working.
User: node.UserView().TailscaleUser(),
Login: node.UserView().TailscaleLogin(),
User: *node.User.TailscaleUser(),
Login: *node.User.TailscaleLogin(),
NodeKeyExpired: node.IsExpired(),
// Headscale does not implement the concept of machine authorization
@@ -250,10 +141,9 @@ func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
func (h *Headscale) waitForFollowup(
ctx context.Context,
req tailcfg.RegisterRequest,
machineKey key.MachinePublic,
regReq tailcfg.RegisterRequest,
) (*tailcfg.RegisterResponse, error) {
fu, err := url.Parse(req.Followup)
fu, err := url.Parse(regReq.Followup)
if err != nil {
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
}
@@ -269,68 +159,21 @@ func (h *Headscale) waitForFollowup(
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
case node := <-reg.Registered:
if node == nil {
// registration is expired in the cache, instruct the client to try a new registration
return h.reqToNewRegisterResponse(req, machineKey)
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
}
return nodeToRegisterResponse(node.View()), nil
return nodeToRegisterResponse(node), nil
}
}
// if the follow-up registration isn't found anymore, instruct the client to try a new registration
return h.reqToNewRegisterResponse(req, machineKey)
}
// reqToNewRegisterResponse refreshes the registration flow by creating a new
// registration ID and returning the corresponding AuthURL so the client can
// restart the authentication process.
func (h *Headscale) reqToNewRegisterResponse(
req tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
newRegID, err := types.NewRegistrationID()
if err != nil {
return nil, NewHTTPError(http.StatusInternalServerError, "failed to generate registration ID", err)
}
// Ensure we have a valid hostname
hostname := util.EnsureHostname(
req.Hostinfo,
machineKey.String(),
req.NodeKey.String(),
)
// Ensure we have valid hostinfo
hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})
hostinfo.Hostname = hostname
nodeToRegister := types.NewRegisterNode(
types.Node{
Hostname: hostname,
MachineKey: machineKey,
NodeKey: req.NodeKey,
Hostinfo: hostinfo,
LastSeen: ptr.To(time.Now()),
},
)
if !req.Expiry.IsZero() {
nodeToRegister.Node.Expiry = &req.Expiry
}
log.Info().Msgf("New followup node registration using key: %s", newRegID)
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
return &tailcfg.RegisterResponse{
AuthURL: h.authProvider.AuthURL(newRegID),
}, nil
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
}
func (h *Headscale) handleRegisterWithAuthKey(
req tailcfg.RegisterRequest,
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
node, changed, err := h.state.HandleNodeFromPreAuthKey(
req,
regReq,
machineKey,
)
if err != nil {
@@ -382,26 +225,18 @@ func (h *Headscale) handleRegisterWithAuthKey(
// h.Change(policyChange)
// }
resp := &tailcfg.RegisterResponse{
user := node.User()
return &tailcfg.RegisterResponse{
MachineAuthorized: true,
NodeKeyExpired: node.IsExpired(),
User: node.UserView().TailscaleUser(),
Login: node.UserView().TailscaleLogin(),
}
log.Trace().
Caller().
Interface("reg.resp", resp).
Interface("reg.req", req).
Str("node.name", node.Hostname()).
Uint64("node.id", node.ID().Uint64()).
Msg("RegisterResponse")
return resp, nil
User: *user.TailscaleUser(),
Login: *user.TailscaleLogin(),
}, nil
}
func (h *Headscale) handleRegisterInteractive(
req tailcfg.RegisterRequest,
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
registrationId, err := types.NewRegistrationID()
@@ -409,42 +244,19 @@ func (h *Headscale) handleRegisterInteractive(
return nil, fmt.Errorf("generating registration ID: %w", err)
}
// Ensure we have a valid hostname
hostname := util.EnsureHostname(
req.Hostinfo,
machineKey.String(),
req.NodeKey.String(),
)
// Ensure we have valid hostinfo
hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})
if req.Hostinfo == nil {
log.Warn().
Str("machine.key", machineKey.ShortString()).
Str("node.key", req.NodeKey.ShortString()).
Str("generated.hostname", hostname).
Msg("Received registration request with nil hostinfo, generated default hostname")
} else if req.Hostinfo.Hostname == "" {
log.Warn().
Str("machine.key", machineKey.ShortString()).
Str("node.key", req.NodeKey.ShortString()).
Str("generated.hostname", hostname).
Msg("Received registration request with empty hostname, generated default")
}
hostinfo.Hostname = hostname
nodeToRegister := types.NewRegisterNode(
types.Node{
Hostname: hostname,
nodeToRegister := types.RegisterNode{
Node: types.Node{
Hostname: regReq.Hostinfo.Hostname,
MachineKey: machineKey,
NodeKey: req.NodeKey,
Hostinfo: hostinfo,
NodeKey: regReq.NodeKey,
Hostinfo: regReq.Hostinfo,
LastSeen: ptr.To(time.Now()),
},
)
Registered: make(chan *types.Node),
}
if !req.Expiry.IsZero() {
nodeToRegister.Node.Expiry = &req.Expiry
if !regReq.Expiry.IsZero() {
nodeToRegister.Node.Expiry = &regReq.Expiry
}
h.state.SetRegistrationCacheEntry(

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
package capver
// Generated DO NOT EDIT
//Generated DO NOT EDIT
import "tailscale.com/tailcfg"
@@ -37,15 +37,16 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
"v1.84.2": 116,
}
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
90: "v1.64.0",
95: "v1.66.0",
97: "v1.68.0",
102: "v1.70.0",
104: "v1.72.0",
106: "v1.74.0",
109: "v1.78.0",
113: "v1.80.0",
115: "v1.82.0",
116: "v1.84.0",
90: "v1.64.0",
95: "v1.66.0",
97: "v1.68.0",
102: "v1.70.0",
104: "v1.72.0",
106: "v1.74.0",
109: "v1.78.0",
113: "v1.80.0",
115: "v1.82.0",
116: "v1.84.0",
}

View File

@@ -932,61 +932,6 @@ AND auth_key_id NOT IN (
},
Rollback: func(db *gorm.DB) error { return nil },
},
{
// Drop all tables that are no longer in use and has existed.
// They potentially still present from broken migrations in the past.
ID: "202510311551",
Migrate: func(tx *gorm.DB) error {
for _, oldTable := range []string{"namespaces", "machines", "shared_machines", "kvs", "pre_auth_key_acl_tags", "routes"} {
err := tx.Migrator().DropTable(oldTable)
if err != nil {
log.Trace().Str("table", oldTable).
Err(err).
Msg("Error dropping old table, continuing...")
}
}
return nil
},
Rollback: func(tx *gorm.DB) error {
return nil
},
},
{
// Drop all indices that are no longer in use and has existed.
// They potentially still present from broken migrations in the past.
// They should all be cleaned up by the db engine, but we are a bit
// conservative to ensure all our previous mess is cleaned up.
ID: "202511101554-drop-old-idx",
Migrate: func(tx *gorm.DB) error {
for _, oldIdx := range []struct{ name, table string }{
{"idx_namespaces_deleted_at", "namespaces"},
{"idx_routes_deleted_at", "routes"},
{"idx_shared_machines_deleted_at", "shared_machines"},
} {
err := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name)
if err != nil {
log.Trace().
Str("index", oldIdx.name).
Str("table", oldIdx.table).
Err(err).
Msg("Error dropping old index, continuing...")
}
}
return nil
},
Rollback: func(tx *gorm.DB) error {
return nil
},
},
// Migrations **above** this points will be REMOVED in version **0.29.0**
// This is to clean up a lot of old migrations that is seldom used
// and carries a lot of technical debt.
// Any new migrations should be added after the comment below and follow
// the rules it sets out.
// From this point, the following rules must be followed:
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
@@ -1017,17 +962,7 @@ AND auth_key_id NOT IN (
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
defer cancel()
opts := squibble.DigestOptions{
IgnoreTables: []string{
// Litestream tables, these are inserted by
// litestream and not part of our schema
// https://litestream.io/how-it-works
"_litestream_lock",
"_litestream_seq",
},
}
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
return nil, fmt.Errorf("validating schema: %w", err)
}
}

View File

@@ -325,11 +325,7 @@ func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) {
}
if changed {
// Use Updates() with Select() to only update IP fields, avoiding overwriting
// other fields like Expiry. We need Select() because Updates() alone skips
// zero values, but we DO want to update IPv4/IPv6 to nil when removing them.
// See issue #2862.
err := tx.Model(node).Select("ipv4", "ipv6").Updates(node).Error
err := tx.Save(node).Error
if err != nil {
return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err)
}

View File

@@ -5,20 +5,19 @@ import (
"errors"
"fmt"
"net/netip"
"regexp"
"slices"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/ptr"
)
@@ -28,8 +27,6 @@ const (
NodeGivenNameTrimSize = 2
)
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
var (
ErrNodeNotFound = errors.New("node not found")
ErrNodeRouteIsNotAvailable = errors.New("route is not available on node")
@@ -233,17 +230,6 @@ func SetApprovedRoutes(
return nil
}
// When approving exit routes, ensure both IPv4 and IPv6 are included
// If either 0.0.0.0/0 or ::/0 is being approved, both should be approved
hasIPv4Exit := slices.Contains(routes, tsaddr.AllIPv4())
hasIPv6Exit := slices.Contains(routes, tsaddr.AllIPv6())
if hasIPv4Exit && !hasIPv6Exit {
routes = append(routes, tsaddr.AllIPv6())
} else if hasIPv6Exit && !hasIPv4Exit {
routes = append(routes, tsaddr.AllIPv4())
}
b, err := json.Marshal(routes)
if err != nil {
return err
@@ -275,10 +261,6 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
func RenameNode(tx *gorm.DB,
nodeID types.NodeID, newName string,
) error {
if err := util.ValidateHostname(newName); err != nil {
return fmt.Errorf("renaming node: %w", err)
}
// Check if the new name is unique
var count int64
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
@@ -396,14 +378,6 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
node.IPv4 = ipv4
node.IPv6 = ipv6
var err error
node.Hostname, err = util.NormaliseHostname(node.Hostname)
if err != nil {
newHostname := util.InvalidString()
log.Info().Err(err).Str("invalid-hostname", node.Hostname).Str("new-hostname", newHostname).Msgf("Invalid hostname, replacing")
node.Hostname = newHostname
}
if node.GivenName == "" {
givenName, err := EnsureUniqueGivenName(tx, node.Hostname)
if err != nil {
@@ -452,11 +426,15 @@ func NodeSetMachineKey(
}).Error
}
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
// Strip invalid DNS characters for givenName
suppliedName = strings.ToLower(suppliedName)
suppliedName = invalidDNSRegex.ReplaceAllString(suppliedName, "")
// NodeSave saves a node object to the database, prefer to use a specific save method rather
// than this. It is intended to be used when we are changing or.
// TODO(kradalby): Remove this func, just use Save.
func NodeSave(tx *gorm.DB, node *types.Node) error {
return tx.Save(node).Error
}
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
suppliedName = util.ConvertWithFQDNRules(suppliedName)
if len(suppliedName) > util.LabelHostnameLength {
return "", types.ErrHostnameTooLong
}
@@ -516,6 +494,41 @@ func EnsureUniqueGivenName(
return givenName, nil
}
// ExpireExpiredNodes checks for nodes that have expired since the last check
// and returns a time to be used for the next check, a StateUpdate
// containing the expired nodes, and a boolean indicating if any nodes were found.
func ExpireExpiredNodes(tx *gorm.DB,
lastCheck time.Time,
) (time.Time, []change.ChangeSet, bool) {
// use the time of the start of the function to ensure we
// dont miss some nodes by returning it _after_ we have
// checked everything.
started := time.Now()
expired := make([]*tailcfg.PeerChange, 0)
var updates []change.ChangeSet
nodes, err := ListNodes(tx)
if err != nil {
return time.Unix(0, 0), nil, false
}
for _, node := range nodes {
if node.IsExpired() && node.Expiry.After(lastCheck) {
expired = append(expired, &tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: node.Expiry,
})
updates = append(updates, change.KeyExpiry(node.ID))
}
}
if len(expired) > 0 {
return started, updates, true
}
return started, nil, false
}
// EphemeralGarbageCollector is a garbage collector that will delete nodes after
// a certain amount of time.
// It is used to delete ephemeral nodes that have disconnected and should be

View File

@@ -476,7 +476,7 @@ func TestAutoApproveRoutes(t *testing.T) {
require.NoError(t, err)
}
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), nodeTagged.ApprovedRoutes, tt.routes)
newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), node.ApprovedRoutes, tt.routes)
if changed2 {
err = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2)
require.NoError(t, err)
@@ -490,7 +490,7 @@ func TestAutoApproveRoutes(t *testing.T) {
if len(expectedRoutes1) == 0 {
expectedRoutes1 = nil
}
if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
if diff := cmp.Diff(expectedRoutes1, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
}
@@ -501,7 +501,7 @@ func TestAutoApproveRoutes(t *testing.T) {
if len(expectedRoutes2) == 0 {
expectedRoutes2 = nil
}
if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" {
if diff := cmp.Diff(expectedRoutes2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
}
})
@@ -640,7 +640,7 @@ func TestListEphemeralNodes(t *testing.T) {
assert.Equal(t, nodeEph.Hostname, ephemeralNodes[0].Hostname)
}
func TestNodeNaming(t *testing.T) {
func TestRenameNode(t *testing.T) {
db, err := newSQLiteTestDB()
if err != nil {
t.Fatalf("creating db: %s", err)
@@ -672,26 +672,6 @@ func TestNodeNaming(t *testing.T) {
Hostinfo: &tailcfg.Hostinfo{},
}
// Using non-ASCII characters in the hostname can
// break your network, so they should be replaced when registering
// a node.
// https://github.com/juanfont/headscale/issues/2343
nodeInvalidHostname := types.Node{
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "我的电脑",
UserID: user2.ID,
RegisterMethod: util.RegisterMethodAuthKey,
}
nodeShortHostname := types.Node{
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "a",
UserID: user2.ID,
RegisterMethod: util.RegisterMethodAuthKey,
}
err = db.DB.Save(&node).Error
require.NoError(t, err)
@@ -704,11 +684,7 @@ func TestNodeNaming(t *testing.T) {
return err
}
_, err = RegisterNodeForTest(tx, node2, nil, nil)
if err != nil {
return err
}
_, err = RegisterNodeForTest(tx, nodeInvalidHostname, ptr.To(mpp("100.64.0.66/32").Addr()), nil)
_, err = RegisterNodeForTest(tx, nodeShortHostname, ptr.To(mpp("100.64.0.67/32").Addr()), nil)
return err
})
require.NoError(t, err)
@@ -716,12 +692,10 @@ func TestNodeNaming(t *testing.T) {
nodes, err := db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 4)
assert.Len(t, nodes, 2)
t.Logf("node1 %s %s", nodes[0].Hostname, nodes[0].GivenName)
t.Logf("node2 %s %s", nodes[1].Hostname, nodes[1].GivenName)
t.Logf("node3 %s %s", nodes[2].Hostname, nodes[2].GivenName)
t.Logf("node4 %s %s", nodes[3].Hostname, nodes[3].GivenName)
assert.Equal(t, nodes[0].Hostname, nodes[0].GivenName)
assert.NotEqual(t, nodes[1].Hostname, nodes[1].GivenName)
@@ -733,10 +707,6 @@ func TestNodeNaming(t *testing.T) {
assert.Len(t, nodes[1].Hostname, 4)
assert.Len(t, nodes[0].GivenName, 4)
assert.Len(t, nodes[1].GivenName, 13)
assert.Contains(t, nodes[2].Hostname, "invalid-") // invalid chars
assert.Contains(t, nodes[2].GivenName, "invalid-")
assert.Contains(t, nodes[3].Hostname, "invalid-") // too short
assert.Contains(t, nodes[3].GivenName, "invalid-")
// Nodes can be renamed to a unique name
err = db.Write(func(tx *gorm.DB) error {
@@ -746,7 +716,7 @@ func TestNodeNaming(t *testing.T) {
nodes, err = db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 4)
assert.Len(t, nodes, 2)
assert.Equal(t, "test", nodes[0].Hostname)
assert.Equal(t, "newname", nodes[0].GivenName)
@@ -758,7 +728,7 @@ func TestNodeNaming(t *testing.T) {
nodes, err = db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 4)
assert.Len(t, nodes, 2)
assert.Equal(t, "test", nodes[0].Hostname)
assert.Equal(t, "newname", nodes[0].GivenName)
assert.Equal(t, "test", nodes[1].GivenName)
@@ -768,149 +738,6 @@ func TestNodeNaming(t *testing.T) {
return RenameNode(tx, nodes[0].ID, "test")
})
assert.ErrorContains(t, err, "name is not unique")
// Rename invalid chars
err = db.Write(func(tx *gorm.DB) error {
return RenameNode(tx, nodes[2].ID, "我的电脑")
})
assert.ErrorContains(t, err, "invalid characters")
// Rename too short
err = db.Write(func(tx *gorm.DB) error {
return RenameNode(tx, nodes[3].ID, "a")
})
assert.ErrorContains(t, err, "at least 2 characters")
// Rename with emoji
err = db.Write(func(tx *gorm.DB) error {
return RenameNode(tx, nodes[0].ID, "hostname-with-💩")
})
assert.ErrorContains(t, err, "invalid characters")
// Rename with only emoji
err = db.Write(func(tx *gorm.DB) error {
return RenameNode(tx, nodes[0].ID, "🚀")
})
assert.ErrorContains(t, err, "invalid characters")
}
func TestRenameNodeComprehensive(t *testing.T) {
db, err := newSQLiteTestDB()
if err != nil {
t.Fatalf("creating db: %s", err)
}
user, err := db.CreateUser(types.User{Name: "test"})
require.NoError(t, err)
node := types.Node{
ID: 0,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
err = db.DB.Save(&node).Error
require.NoError(t, err)
err = db.DB.Transaction(func(tx *gorm.DB) error {
_, err := RegisterNodeForTest(tx, node, nil, nil)
return err
})
require.NoError(t, err)
nodes, err := db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 1)
tests := []struct {
name string
newName string
wantErr string
}{
{
name: "uppercase_rejected",
newName: "User2-Host",
wantErr: "must be lowercase",
},
{
name: "underscore_rejected",
newName: "test_node",
wantErr: "invalid characters",
},
{
name: "at_sign_uppercase_rejected",
newName: "Test@Host",
wantErr: "must be lowercase",
},
{
name: "at_sign_rejected",
newName: "test@host",
wantErr: "invalid characters",
},
{
name: "chinese_chars_with_dash_rejected",
newName: "server-北京-01",
wantErr: "invalid characters",
},
{
name: "chinese_only_rejected",
newName: "我的电脑",
wantErr: "invalid characters",
},
{
name: "emoji_with_text_rejected",
newName: "laptop-🚀",
wantErr: "invalid characters",
},
{
name: "mixed_chinese_emoji_rejected",
newName: "测试💻机器",
wantErr: "invalid characters",
},
{
name: "only_emojis_rejected",
newName: "🎉🎊",
wantErr: "invalid characters",
},
{
name: "only_at_signs_rejected",
newName: "@@@",
wantErr: "invalid characters",
},
{
name: "starts_with_dash_rejected",
newName: "-test",
wantErr: "cannot start or end with a hyphen",
},
{
name: "ends_with_dash_rejected",
newName: "test-",
wantErr: "cannot start or end with a hyphen",
},
{
name: "too_long_hostname_rejected",
newName: "this-is-a-very-long-hostname-that-exceeds-sixty-three-characters-limit",
wantErr: "must not exceed 63 characters",
},
{
name: "too_short_hostname_rejected",
newName: "a",
wantErr: "at least 2 characters",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := db.Write(func(tx *gorm.DB) error {
return RenameNode(tx, nodes[0].ID, tt.newName)
})
assert.ErrorContains(t, err, tt.wantErr)
})
}
}
func TestListPeers(t *testing.T) {

View File

@@ -145,12 +145,11 @@ func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error {
// UsePreAuthKey marks a PreAuthKey as used.
func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error {
err := tx.Model(k).Update("used", true).Error
if err != nil {
k.Used = true
if err := tx.Save(k).Error; err != nil {
return fmt.Errorf("failed to update key used status in the database: %w", err)
}
k.Used = true
return nil
}

View File

@@ -1,45 +0,0 @@
PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
INSERT INTO migrations VALUES('202312101416');
INSERT INTO migrations VALUES('202312101430');
INSERT INTO migrations VALUES('202402151347');
INSERT INTO migrations VALUES('2024041121742');
INSERT INTO migrations VALUES('202406021630');
INSERT INTO migrations VALUES('202409271400');
INSERT INTO migrations VALUES('202407191627');
INSERT INTO migrations VALUES('202408181235');
INSERT INTO migrations VALUES('202501221827');
INSERT INTO migrations VALUES('202501311657');
INSERT INTO migrations VALUES('202502070949');
INSERT INTO migrations VALUES('202502131714');
INSERT INTO migrations VALUES('202502171819');
INSERT INTO migrations VALUES('202505091439');
INSERT INTO migrations VALUES('202505141324');
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
DELETE FROM sqlite_sequence;
INSERT INTO sqlite_sequence VALUES('nodes',0);
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
-- Create all the old tables we have had and ensure they are clean up.
CREATE TABLE `namespaces` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
CREATE TABLE `shared_machines` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
CREATE TABLE `routes` (`id` text,`deleted_at` datetime,PRIMARY KEY (`id`));
CREATE INDEX `idx_routes_deleted_at` ON `routes`(`deleted_at`);
CREATE INDEX `idx_namespaces_deleted_at` ON `namespaces`(`deleted_at`);
CREATE INDEX `idx_shared_machines_deleted_at` ON `shared_machines`(`deleted_at`);
COMMIT;

View File

@@ -1,14 +0,0 @@
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
CREATE TABLE _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);
CREATE TABLE _litestream_lock (id INTEGER);

View File

@@ -1,134 +0,0 @@
package db
import (
"database/sql"
"testing"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
)
// TestUserUpdatePreservesUnchangedFields verifies that updating a user
// preserves fields that aren't modified. This test validates the fix
// for using Updates() instead of Save() in UpdateUser-like operations.
func TestUserUpdatePreservesUnchangedFields(t *testing.T) {
database := dbForTest(t)
// Create a user with all fields set
initialUser := types.User{
Name: "testuser",
DisplayName: "Test User Display",
Email: "test@example.com",
ProviderIdentifier: sql.NullString{
String: "provider-123",
Valid: true,
},
}
createdUser, err := database.CreateUser(initialUser)
require.NoError(t, err)
require.NotNil(t, createdUser)
// Verify initial state
assert.Equal(t, "testuser", createdUser.Name)
assert.Equal(t, "Test User Display", createdUser.DisplayName)
assert.Equal(t, "test@example.com", createdUser.Email)
assert.True(t, createdUser.ProviderIdentifier.Valid)
assert.Equal(t, "provider-123", createdUser.ProviderIdentifier.String)
// Simulate what UpdateUser does: load user, modify one field, save
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
if err != nil {
return nil, err
}
// Modify ONLY DisplayName
user.DisplayName = "Updated Display Name"
// This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones
err = tx.Save(user).Error
if err != nil {
return nil, err
}
return user, nil
})
require.NoError(t, err)
// Read user back from database
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
return GetUserByID(rx, types.UserID(createdUser.ID))
})
require.NoError(t, err)
// Verify that DisplayName was updated
assert.Equal(t, "Updated Display Name", updatedUser.DisplayName)
// CRITICAL: Verify that other fields were NOT overwritten
// With Save(), these assertions should pass because the user object
// was loaded from DB and has all fields populated.
// But if Updates() is used, these will also pass (and it's safer).
assert.Equal(t, "testuser", updatedUser.Name, "Name should be preserved")
assert.Equal(t, "test@example.com", updatedUser.Email, "Email should be preserved")
assert.True(t, updatedUser.ProviderIdentifier.Valid, "ProviderIdentifier should be preserved")
assert.Equal(t, "provider-123", updatedUser.ProviderIdentifier.String, "ProviderIdentifier value should be preserved")
}
// TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save()
// works correctly and only updates modified fields.
func TestUserUpdateWithUpdatesMethod(t *testing.T) {
database := dbForTest(t)
// Create a user
initialUser := types.User{
Name: "testuser",
DisplayName: "Original Display",
Email: "original@example.com",
ProviderIdentifier: sql.NullString{
String: "provider-abc",
Valid: true,
},
}
createdUser, err := database.CreateUser(initialUser)
require.NoError(t, err)
// Update using Updates() method
_, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) {
user, err := GetUserByID(tx, types.UserID(createdUser.ID))
if err != nil {
return nil, err
}
// Modify multiple fields
user.DisplayName = "New Display"
user.Email = "new@example.com"
// Use Updates() instead of Save()
err = tx.Updates(user).Error
if err != nil {
return nil, err
}
return user, nil
})
require.NoError(t, err)
// Verify changes
updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) {
return GetUserByID(rx, types.UserID(createdUser.ID))
})
require.NoError(t, err)
// Verify updated fields
assert.Equal(t, "New Display", updatedUser.DisplayName)
assert.Equal(t, "new@example.com", updatedUser.Email)
// Verify preserved fields
assert.Equal(t, "testuser", updatedUser.Name)
assert.True(t, updatedUser.ProviderIdentifier.Valid)
assert.Equal(t, "provider-abc", updatedUser.ProviderIdentifier.String)
}

View File

@@ -26,7 +26,8 @@ func (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) {
// CreateUser creates a new User. Returns error if could not be created
// or another user already exists.
func CreateUser(tx *gorm.DB, user types.User) (*types.User, error) {
if err := util.ValidateHostname(user.Name); err != nil {
err := util.ValidateUsername(user.Name)
if err != nil {
return nil, err
}
if err := tx.Create(&user).Error; err != nil {
@@ -92,7 +93,8 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
if err != nil {
return err
}
if err = util.ValidateHostname(newName); err != nil {
err = util.ValidateUsername(newName)
if err != nil {
return err
}
@@ -102,8 +104,7 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
oldUser.Name = newName
err = tx.Updates(&oldUser).Error
if err != nil {
if err := tx.Save(&oldUser).Error; err != nil {
return err
}

View File

@@ -12,7 +12,6 @@ import (
"net/url"
"os"
"reflect"
"slices"
"sync"
"time"
@@ -127,17 +126,7 @@ func shuffleDERPMap(dm *tailcfg.DERPMap) {
return
}
// Collect region IDs and sort them to ensure deterministic iteration order.
// Map iteration order is non-deterministic in Go, which would cause the
// shuffle to be non-deterministic even with a fixed seed.
ids := make([]int, 0, len(dm.Regions))
for id := range dm.Regions {
ids = append(ids, id)
}
slices.Sort(ids)
for _, id := range ids {
region := dm.Regions[id]
for id, region := range dm.Regions {
if len(region.Nodes) == 0 {
continue
}

View File

@@ -83,9 +83,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
RegionCode: "sea",
RegionName: "Seattle",
Nodes: []*tailcfg.DERPNode{
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
},
},
2: {
@@ -93,9 +93,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
RegionCode: "sfo",
RegionName: "San Francisco",
Nodes: []*tailcfg.DERPNode{
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
},
},
},
@@ -169,74 +169,6 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
},
},
},
{
name: "same dataset with another base domain",
baseDomain: "another.example.com",
derpMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
4: {
RegionID: 4,
RegionCode: "fra",
RegionName: "Frankfurt",
Nodes: []*tailcfg.DERPNode{
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
},
},
},
},
expected: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
4: {
RegionID: 4,
RegionCode: "fra",
RegionName: "Frankfurt",
Nodes: []*tailcfg.DERPNode{
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
},
},
},
},
},
{
name: "same dataset with yet another base domain",
baseDomain: "yetanother.example.com",
derpMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
4: {
RegionID: 4,
RegionCode: "fra",
RegionName: "Frankfurt",
Nodes: []*tailcfg.DERPNode{
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
},
},
},
},
expected: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
4: {
RegionID: 4,
RegionCode: "fra",
RegionName: "Frankfurt",
Nodes: []*tailcfg.DERPNode{
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
},
},
},
},
},
}
for _, tt := range tests {
@@ -253,6 +185,7 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
}
})
}
}
func TestShuffleDERPMapEdgeCases(t *testing.T) {

View File

@@ -416,12 +416,9 @@ func (api headscaleV1APIServer) ExpireNode(
ctx context.Context,
request *v1.ExpireNodeRequest,
) (*v1.ExpireNodeResponse, error) {
expiry := time.Now()
if request.GetExpiry() != nil {
expiry = request.GetExpiry().AsTime()
}
now := time.Now()
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), expiry)
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), now)
if err != nil {
return nil, err
}
@@ -744,7 +741,7 @@ func (api headscaleV1APIServer) DebugCreateNode(
hostinfo := tailcfg.Hostinfo{
RoutableIPs: routes,
OS: "TestOS",
Hostname: request.GetName(),
Hostname: "DebugTestNode",
}
registrationId, err := types.RegistrationIDFromString(request.GetKey())
@@ -752,8 +749,8 @@ func (api headscaleV1APIServer) DebugCreateNode(
return nil, err
}
newNode := types.NewRegisterNode(
types.Node{
newNode := types.RegisterNode{
Node: types.Node{
NodeKey: key.NewNode().Public(),
MachineKey: key.NewMachine().Public(),
Hostname: request.GetName(),
@@ -764,7 +761,8 @@ func (api headscaleV1APIServer) DebugCreateNode(
Hostinfo: &hostinfo,
},
)
Registered: make(chan *types.Node),
}
log.Debug().
Caller().
@@ -776,24 +774,4 @@ func (api headscaleV1APIServer) DebugCreateNode(
return &v1.DebugCreateNodeResponse{Node: newNode.Node.Proto()}, nil
}
func (api headscaleV1APIServer) Health(
ctx context.Context,
request *v1.HealthRequest,
) (*v1.HealthResponse, error) {
var healthErr error
response := &v1.HealthResponse{}
if err := api.h.state.PingDB(ctx); err != nil {
healthErr = fmt.Errorf("database ping failed: %w", err)
} else {
response.DatabaseConnectivity = true
}
if healthErr != nil {
log.Error().Err(healthErr).Msg("Health check failed")
}
return response, healthErr
}
func (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}

View File

@@ -201,24 +201,6 @@ func (h *Headscale) RobotsHandler(
}
}
// VersionHandler returns version information about the Headscale server
// Listens in /version.
func (h *Headscale) VersionHandler(
writer http.ResponseWriter,
req *http.Request,
) {
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
versionInfo := types.GetVersionInfo()
if err := json.NewEncoder(writer).Encode(versionInfo); err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write version response")
}
}
var codeStyleRegisterWebAPI = styles.Props{
styles.Display: "block",
styles.Padding: "20px",

View File

@@ -88,9 +88,16 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
// TODO(kradalby): This can potentially be a peer update of the old and new subnet router.
mapResp, err = mapper.fullMapResponse(nodeID, version)
} else {
// Trust the change type for online/offline status to avoid race conditions
// between NodeStore updates and change processing
onlineStatus := c.Change == change.NodeCameOnline
// CRITICAL FIX: Read actual online status from NodeStore when available,
// fall back to deriving from change type for unit tests or when NodeStore is empty
var onlineStatus bool
if node, found := mapper.state.GetNodeByID(c.NodeID); found && node.IsOnline().Valid() {
// Use actual NodeStore status when available (production case)
onlineStatus = node.IsOnline().Get()
} else {
// Fall back to deriving from change type (unit test case or initial setup)
onlineStatus = c.Change == change.NodeCameOnline
}
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
{
@@ -101,33 +108,11 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
}
case change.NodeNewOrUpdate:
// If the node is the one being updated, we send a self update that preserves peer information
// to ensure the node sees changes to its own properties (e.g., hostname/DNS name changes)
// without losing its view of peer status during rapid reconnection cycles
if c.IsSelfUpdate(nodeID) {
mapResp, err = mapper.selfMapResponse(nodeID, version)
} else {
mapResp, err = mapper.peerChangeResponse(nodeID, version, c.NodeID)
}
mapResp, err = mapper.fullMapResponse(nodeID, version)
case change.NodeRemove:
mapResp, err = mapper.peerRemovedResponse(nodeID, c.NodeID)
case change.NodeKeyExpiry:
// If the node is the one whose key is expiring, we send a "full" self update
// as nodes will ignore patch updates about themselves (?).
if c.IsSelfUpdate(nodeID) {
mapResp, err = mapper.selfMapResponse(nodeID, version)
// mapResp, err = mapper.fullMapResponse(nodeID, version)
} else {
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
{
NodeID: c.NodeID.NodeID(),
KeyExpiry: c.NodeExpiry,
},
})
}
default:
// The following will always hit this:
// change.Full, change.Policy

View File

@@ -73,6 +73,7 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
// Use the worker pool for controlled concurrency instead of direct generation
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
if err != nil {
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
nodeConn.removeConnectionByChannel(c)
@@ -601,7 +602,7 @@ func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
mc.updateCount.Add(1)
log.Debug().Uint64("node.id", mc.id.Uint64()).
log.Info().Uint64("node.id", mc.id.Uint64()).
Int("successful_sends", successCount).
Int("failed_connections", len(failedConnections)).
Int("remaining_connections", len(mc.connections)).

View File

@@ -1028,9 +1028,7 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
// Add multiple changes rapidly to test batching
batcher.AddWork(change.DERPSet)
// Use a valid expiry time for testing since test nodes don't have expiry set
testExpiry := time.Now().Add(24 * time.Hour)
batcher.AddWork(change.KeyExpiry(testNodes[1].n.ID, testExpiry))
batcher.AddWork(change.KeyExpiry(testNodes[1].n.ID))
batcher.AddWork(change.DERPSet)
batcher.AddWork(change.NodeAdded(testNodes[1].n.ID))
batcher.AddWork(change.DERPSet)
@@ -1280,9 +1278,7 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
// Add node-specific work occasionally
if i%10 == 0 {
// Use a valid expiry time for testing since test nodes don't have expiry set
testExpiry := time.Now().Add(24 * time.Hour)
batcher.AddWork(change.KeyExpiry(testNode.n.ID, testExpiry))
batcher.AddWork(change.KeyExpiry(testNode.n.ID))
}
// Rapid removal creates race between worker and removal
@@ -1497,9 +1493,7 @@ func TestBatcherConcurrentClients(t *testing.T) {
if i%7 == 0 && len(allNodes) > 0 {
// Node-specific changes using real nodes
node := allNodes[i%len(allNodes)]
// Use a valid expiry time for testing since test nodes don't have expiry set
testExpiry := time.Now().Add(24 * time.Hour)
batcher.AddWork(change.KeyExpiry(node.n.ID, testExpiry))
batcher.AddWork(change.KeyExpiry(node.n.ID))
}
// Small delay to allow some batching

View File

@@ -28,7 +28,6 @@ type debugType string
const (
fullResponseDebug debugType = "full"
selfResponseDebug debugType = "self"
patchResponseDebug debugType = "patch"
removeResponseDebug debugType = "remove"
changeResponseDebug debugType = "change"
@@ -69,17 +68,24 @@ func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVers
// WithSelfNode adds the requesting node to the response.
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
nodeView, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
// Always use batcher's view of online status for self node
// The batcher respects grace periods for logout scenarios
node := nodeView.AsStruct()
// if b.mapper.batcher != nil {
// node.IsOnline = ptr.To(b.mapper.batcher.IsConnected(b.nodeID))
// }
_, matchers := b.mapper.state.Filter()
tailnode, err := tailNode(
nv, b.capVer, b.mapper.state,
node.View(), b.capVer, b.mapper.state,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
return policy.ReduceRoutes(node.View(), b.mapper.state.GetNodePrimaryRoutes(id), matchers)
},
b.mapper.cfg)
if err != nil {
@@ -180,21 +186,14 @@ func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
return b
}
// FilterForNode returns rules already reduced to only those relevant for this node.
// For autogroup:self policies, it returns per-node compiled rules.
// For global policies, it returns the global filter reduced for this node.
filter, err := b.mapper.state.FilterForNode(node)
if err != nil {
b.addError(err)
return b
}
filter, _ := b.mapper.state.Filter()
// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
// Currently, we do not send incremental package filters, however using the
// new PacketFilters field and "base" allows us to send a full update when we
// have to send an empty list, avoiding the hack in the else block.
b.resp.PacketFilters = map[string][]tailcfg.FilterRule{
"base": filter,
"base": policy.ReduceFilterRules(node, filter),
}
return b
@@ -233,19 +232,12 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) (
return nil, errors.New("node not found")
}
// Get unreduced matchers for peer relationship determination.
// MatchersForNode returns unreduced matchers that include all rules where the node
// could be either source or destination. This is different from FilterForNode which
// returns reduced rules for packet filtering (only rules where node is destination).
matchers, err := b.mapper.state.MatchersForNode(node)
if err != nil {
return nil, err
}
filter, matchers := b.mapper.state.Filter()
// If there are filter rules present, see if there are any nodes that cannot
// access each-other at all and remove them from the peers.
var changedViews views.Slice[types.NodeView]
if len(matchers) > 0 {
if len(filter) > 0 {
changedViews = policy.ReduceNodes(node, peers, matchers)
} else {
changedViews = peers

View File

@@ -158,26 +158,6 @@ func (m *mapper) fullMapResponse(
Build()
}
func (m *mapper) selfMapResponse(
nodeID types.NodeID,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
ma, err := m.NewMapResponseBuilder(nodeID).
WithDebugType(selfResponseDebug).
WithCapabilityVersion(capVer).
WithSelfNode().
Build()
if err != nil {
return nil, err
}
// Set the peers to nil, to ensure the node does not think
// its getting a new list.
ma.Peers = nil
return ma, err
}
func (m *mapper) derpMapResponse(
nodeID types.NodeID,
) (*tailcfg.MapResponse, error) {
@@ -210,6 +190,7 @@ func (m *mapper) peerChangeResponse(
return m.NewMapResponseBuilder(nodeID).
WithDebugType(changeResponseDebug).
WithCapabilityVersion(capVer).
WithSelfNode().
WithUserProfiles(peers).
WithPeerChanges(peers).
Build()

View File

@@ -108,12 +108,11 @@ func TestTailNode(t *testing.T) {
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
},
},
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix("192.168.0.0/24")},
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), netip.MustParsePrefix("192.168.0.0/24")},
CreatedAt: created,
},
dnsConfig: &tailcfg.DNSConfig{},
@@ -151,7 +150,6 @@ func TestTailNode(t *testing.T) {
Hostinfo: hiview(tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
},

View File

@@ -213,8 +213,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
return
}
stateCookieName := getCookieName("state", state)
cookieState, err := req.Cookie(stateCookieName)
cookieState, err := req.Cookie("state")
if err != nil {
httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err))
return
@@ -236,13 +235,8 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
httpError(writer, err)
return
}
if idToken.Nonce == "" {
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err))
return
}
nonceCookieName := getCookieName("nonce", idToken.Nonce)
nonce, err := req.Cookie(nonceCookieName)
nonce, err := req.Cookie("nonce")
if err != nil {
httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err))
return
@@ -337,12 +331,6 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
verb := "Reauthenticated"
newNode, err := a.handleRegistration(user, *registrationId, nodeExpiry)
if err != nil {
if errors.Is(err, db.ErrNodeNotFoundRegistrationCache) {
log.Debug().Caller().Str("registration_id", registrationId.String()).Msg("registration session expired before authorization completed")
httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", err))
return
}
httpError(writer, err)
return
}
@@ -590,11 +578,6 @@ func renderOIDCCallbackTemplate(
return &content, nil
}
// getCookieName generates a unique cookie name based on a cookie value.
func getCookieName(baseName, value string) string {
return fmt.Sprintf("%s_%s", baseName, value[:6])
}
func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {
val, err := util.GenerateRandomStringURLSafe(64)
if err != nil {
@@ -603,7 +586,7 @@ func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string,
c := &http.Cookie{
Path: "/oidc/callback",
Name: getCookieName(name, val),
Name: name,
Value: val,
MaxAge: int(time.Hour.Seconds()),
Secure: r.TLS != nil,

View File

@@ -7,7 +7,6 @@ import (
"github.com/juanfont/headscale/hscontrol/util"
"go4.org/netipx"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
)
@@ -92,12 +91,3 @@ func (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
}
// DestsIsTheInternet reports if the destination is equal to "the internet"
// which is a IPSet that represents "autogroup:internet" and is special
// cased for exit nodes.
func (m Match) DestsIsTheInternet() bool {
return m.dests.Equal(util.TheInternet()) ||
m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
m.dests.ContainsPrefix(tsaddr.AllIPv6())
}

View File

@@ -13,12 +13,6 @@ import (
type PolicyManager interface {
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
Filter() ([]tailcfg.FilterRule, []matcher.Match)
// FilterForNode returns filter rules for a specific node, handling autogroup:self
FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
// MatchersForNode returns matchers for peer relationship determination (unreduced)
MatchersForNode(node types.NodeView) ([]matcher.Match, error)
// BuildPeerMap constructs peer relationship maps for the given nodes
BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
SetPolicy([]byte) (bool, error)
SetUsers(users []types.User) (bool, error)

View File

@@ -10,6 +10,7 @@ import (
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
)
@@ -78,6 +79,66 @@ func BuildPeerMap(
return ret
}
// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations
// that are not relevant to that particular node.
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
ret := []tailcfg.FilterRule{}
for _, rule := range rules {
// record if the rule is actually relevant for the given node.
var dests []tailcfg.NetPortRange
DEST_LOOP:
for _, dest := range rule.DstPorts {
expanded, err := util.ParseIPSet(dest.IP, nil)
// Fail closed, if we can't parse it, then we should not allow
// access.
if err != nil {
continue DEST_LOOP
}
if node.InIPSet(expanded) {
dests = append(dests, dest)
continue DEST_LOOP
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo().Valid() {
routableIPs := node.Hostinfo().RoutableIPs()
if routableIPs.Len() > 0 {
for _, routableIP := range routableIPs.All() {
if expanded.OverlapsPrefix(routableIP) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
}
// Also check approved subnet routes - nodes should have access
// to subnets they're approved to route traffic for.
subnetRoutes := node.SubnetRoutes()
for _, subnetRoute := range subnetRoutes {
if expanded.OverlapsPrefix(subnetRoute) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
if len(dests) > 0 {
ret = append(ret, tailcfg.FilterRule{
SrcIPs: rule.SrcIPs,
DstPorts: dests,
IPProto: rule.IPProto,
})
}
}
return ret
}
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
// and returns the new list of approved routes.
// The approved routes will include:

File diff suppressed because it is too large Load Diff

View File

@@ -1,71 +0,0 @@
package policyutil
import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"tailscale.com/tailcfg"
)
// ReduceFilterRules takes a node and a set of global filter rules and removes all rules
// and destinations that are not relevant to that particular node.
//
// IMPORTANT: This function is designed for global filters only. Per-node filters
// (from autogroup:self policies) are already node-specific and should not be passed
// to this function. Use PolicyManager.FilterForNode() instead, which handles both cases.
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
ret := []tailcfg.FilterRule{}
for _, rule := range rules {
// record if the rule is actually relevant for the given node.
var dests []tailcfg.NetPortRange
DEST_LOOP:
for _, dest := range rule.DstPorts {
expanded, err := util.ParseIPSet(dest.IP, nil)
// Fail closed, if we can't parse it, then we should not allow
// access.
if err != nil {
continue DEST_LOOP
}
if node.InIPSet(expanded) {
dests = append(dests, dest)
continue DEST_LOOP
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo().Valid() {
routableIPs := node.Hostinfo().RoutableIPs()
if routableIPs.Len() > 0 {
for _, routableIP := range routableIPs.All() {
if expanded.OverlapsPrefix(routableIP) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
}
// Also check approved subnet routes - nodes should have access
// to subnets they're approved to route traffic for.
subnetRoutes := node.SubnetRoutes()
for _, subnetRoute := range subnetRoutes {
if expanded.OverlapsPrefix(subnetRoute) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
if len(dests) > 0 {
ret = append(ret, tailcfg.FilterRule{
SrcIPs: rule.SrcIPs,
DstPorts: dests,
IPProto: rule.IPProto,
})
}
}
return ret
}

View File

@@ -1,841 +0,0 @@
package policyutil_test
import (
"encoding/json"
"fmt"
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/util/must"
)
var ap = func(ipStr string) *netip.Addr {
ip := netip.MustParseAddr(ipStr)
return &ip
}
var p = func(prefStr string) netip.Prefix {
ip := netip.MustParsePrefix(prefStr)
return ip
}
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
// we use headscale "autogroup:internet".
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
}
func TestTheInternet(t *testing.T) {
internetSet := util.TheInternet()
internetPrefs := internetSet.Prefixes()
for i := range internetPrefs {
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
t.Errorf(
"prefix from internet set %q != hsExit list %q",
internetPrefs[i].String(),
hsExitNodeDestForTest[i].IP,
)
}
}
if len(internetPrefs) != len(hsExitNodeDestForTest) {
t.Fatalf(
"expected same length of prefixes, internet: %d, hsExit: %d",
len(internetPrefs),
len(hsExitNodeDestForTest),
)
}
}
func TestReduceFilterRules(t *testing.T) {
users := types.Users{
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
}
tests := []struct {
name string
node *types.Node
peers types.Nodes
pol string
want []tailcfg.FilterRule
}{
{
name: "host1-can-reach-host2-no-rules",
pol: `
{
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"100.64.0.1"
],
"dst": [
"100.64.0.2:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
User: users[0],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
User: users[0],
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: `
{
"groups": {
"group:admins": [
"user1@"
]
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"group:admins:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"10.33.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-client",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
// "internal" exit node
&types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1786-reducing-breaks-exit-nodes-the-exit",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: hsExitNodeDestForTest,
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"0.0.0.0/5:*",
"8.0.0.0/7:*",
"11.0.0.0/8:*",
"12.0.0.0/6:*",
"16.0.0.0/4:*",
"32.0.0.0/3:*",
"64.0.0.0/2:*",
"128.0.0.0/3:*",
"160.0.0.0/5:*",
"168.0.0.0/6:*",
"172.0.0.0/12:*",
"172.32.0.0/11:*",
"172.64.0.0/10:*",
"172.128.0.0/9:*",
"173.0.0.0/8:*",
"174.0.0.0/7:*",
"176.0.0.0/4:*",
"192.0.0.0/9:*",
"192.128.0.0/11:*",
"192.160.0.0/13:*",
"192.169.0.0/16:*",
"192.170.0.0/15:*",
"192.172.0.0/14:*",
"192.176.0.0/12:*",
"192.192.0.0/10:*",
"193.0.0.0/8:*",
"194.0.0.0/7:*",
"196.0.0.0/6:*",
"200.0.0.0/5:*",
"208.0.0.0/4:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/8:*",
"16.0.0.0/8:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/16:*",
"16.0.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1817-reduce-breaks-32-mask",
pol: `
{
"tagOwners": {
"tag:access-servers": ["user100@"],
},
"groups": {
"group:access": [
"user1@"
]
},
"hosts": {
"dns1": "172.16.0.21/32",
"vlan1": "172.16.0.0/24"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:access"
],
"dst": [
"tag:access-servers:*",
"dns1:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
},
ForcedTags: []string{"tag:access-servers"},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
{
IP: "172.16.0.21/32",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "2365-only-route-policy",
pol: `
{
"hosts": {
"router": "100.64.0.1/32",
"node": "100.64.0.2/32"
},
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"router:8000"
]
},
{
"action": "accept",
"src": [
"node"
],
"dst": [
"172.26.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[3],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
},
want: []tailcfg.FilterRule{},
},
}
for _, tt := range tests {
for idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
var pm policy.PolicyManager
var err error
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
require.NoError(t, err)
got, _ := pm.Filter()
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
got = policyutil.ReduceFilterRules(tt.node.View(), got)
if diff := cmp.Diff(tt.want, got); diff != "" {
log.Trace().Interface("got", got).Msg("result")
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
}
})
}
}
}

View File

@@ -82,195 +82,6 @@ func (pol *Policy) compileFilterRules(
return rules, nil
}
// compileFilterRulesForNode compiles filter rules for a specific node.
func (pol *Policy) compileFilterRulesForNode(
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]tailcfg.FilterRule, error) {
if pol == nil {
return tailcfg.FilterAllowAll, nil
}
var rules []tailcfg.FilterRule
for _, acl := range pol.ACLs {
if acl.Action != ActionAccept {
return nil, ErrInvalidAction
}
aclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
if err != nil {
log.Trace().Err(err).Msgf("compiling ACL")
continue
}
for _, rule := range aclRules {
if rule != nil {
rules = append(rules, *rule)
}
}
}
return rules, nil
}
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
// autogroup:self per-node while supporting all other alias types normally.
// It returns a slice of filter rules because when an ACL has both autogroup:self
// and other destinations, they need to be split into separate rules with different
// source filtering logic.
func (pol *Policy) compileACLWithAutogroupSelf(
acl ACL,
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]*tailcfg.FilterRule, error) {
var autogroupSelfDests []AliasWithPorts
var otherDests []AliasWithPorts
for _, dest := range acl.Destinations {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dest)
} else {
otherDests = append(otherDests, dest)
}
}
protocols, _ := acl.Protocol.parseProtocol()
var rules []*tailcfg.FilterRule
var resolvedSrcIPs []*netipx.IPSet
for _, src := range acl.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
}
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Err(err).Msgf("resolving source ips")
continue
}
if ips != nil {
resolvedSrcIPs = append(resolvedSrcIPs, ips)
}
}
if len(resolvedSrcIPs) == 0 {
return rules, nil
}
// Handle autogroup:self destinations (if any)
if len(autogroupSelfDests) > 0 {
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
if len(sameUserNodes) > 0 {
// Filter sources to only same-user untagged devices
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcIPs {
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
for _, nodeIP := range n.IPs() {
if ips.Contains(nodeIP) {
n.AppendToIPSet(&srcIPs)
break
}
}
}
}
srcSet, err := srcIPs.IPSet()
if err != nil {
return nil, err
}
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
var destPorts []tailcfg.NetPortRange
for _, dest := range autogroupSelfDests {
for _, n := range sameUserNodes {
for _, port := range dest.Ports {
for _, ip := range n.IPs() {
destPorts = append(destPorts, tailcfg.NetPortRange{
IP: ip.String(),
Ports: port,
})
}
}
}
}
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
}
}
}
}
if len(otherDests) > 0 {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcIPs {
srcIPs.AddSet(ips)
}
srcSet, err := srcIPs.IPSet()
if err != nil {
return nil, err
}
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
var destPorts []tailcfg.NetPortRange
for _, dest := range otherDests {
ips, err := dest.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Err(err).Msgf("resolving destination ips")
continue
}
if ips == nil {
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
continue
}
prefixes := ips.Prefixes()
for _, pref := range prefixes {
for _, port := range dest.Ports {
pr := tailcfg.NetPortRange{
IP: pref.String(),
Ports: port,
}
destPorts = append(destPorts, pr)
}
}
}
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
}
}
}
return rules, nil
}
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
return tailcfg.SSHAction{
Reject: !accept,
@@ -296,29 +107,21 @@ func (pol *Policy) compileSSHPolicy(
var rules []*tailcfg.SSHRule
for index, rule := range pol.SSHs {
// Separate destinations into autogroup:self and others
// This is needed because autogroup:self requires filtering sources to same-user only,
// while other destinations should use all resolved sources
var autogroupSelfDests []Alias
var otherDests []Alias
for _, dst := range rule.Destinations {
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dst)
} else {
otherDests = append(otherDests, dst)
var dest netipx.IPSetBuilder
for _, src := range rule.Destinations {
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
}
dest.AddSet(ips)
}
// Note: Tagged nodes can't match autogroup:self destinations, but can still match other destinations
// Resolve sources once - we'll use them differently for each destination type
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
destSet, err := dest.IPSet()
if err != nil {
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
return nil, err
}
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
if !node.InIPSet(destSet) {
continue
}
@@ -332,9 +135,23 @@ func (pol *Policy) compileSSHPolicy(
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
}
var principals []*tailcfg.SSHPrincipal
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
continue // Skip this rule if we can't resolve sources
}
for addr := range util.IPSetAddrIter(srcIPs) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
userMap := make(map[string]string, len(rule.Users))
if rule.Users.ContainsNonRoot() {
userMap["*"] = "="
// by default, we do not allow root unless explicitly stated
userMap["root"] = ""
}
@@ -344,108 +161,11 @@ func (pol *Policy) compileSSHPolicy(
for _, u := range rule.Users.NormalUsers() {
userMap[u.String()] = u.String()
}
// Handle autogroup:self destinations (if any)
// Note: Tagged nodes can't match autogroup:self, so skip this block for tagged nodes
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
// Build destination set for autogroup:self (same-user untagged devices only)
var dest netipx.IPSetBuilder
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
n.AppendToIPSet(&dest)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// Filter sources to only same-user untagged devices
// Pre-filter to same-user untagged devices for efficiency
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
var filteredSrcIPs netipx.IPSetBuilder
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
for _, nodeIP := range n.IPs() {
if srcIPs.Contains(nodeIP) {
n.AppendToIPSet(&filteredSrcIPs)
break // Found this node, move to next
}
}
}
filteredSrcSet, err := filteredSrcIPs.IPSet()
if err != nil {
return nil, err
}
if filteredSrcSet != nil && len(filteredSrcSet.Prefixes()) > 0 {
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(filteredSrcSet) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
}
// Handle other destinations (if any)
if len(otherDests) > 0 {
// Build destination set for other destinations
var dest netipx.IPSetBuilder
for _, dst := range otherDests {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
continue
}
if ips != nil {
dest.AddSet(ips)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// For non-autogroup:self destinations, use all resolved sources (no filtering)
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(srcIPs) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
return &tailcfg.SSHPolicy{

View File

@@ -3,7 +3,6 @@ package v2
import (
"encoding/json"
"net/netip"
"strings"
"testing"
"time"
@@ -16,14 +15,6 @@ import (
"tailscale.com/tailcfg"
)
// aliasWithPorts creates an AliasWithPorts structure from an alias and ports.
func aliasWithPorts(alias Alias, ports ...tailcfg.PortRange) AliasWithPorts {
return AliasWithPorts{
Alias: alias,
Ports: ports,
}
}
func TestParsing(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "testuser"},
@@ -795,614 +786,8 @@ func TestSSHJSONSerialization(t *testing.T) {
assert.NotContains(t, string(jsonData), `"sshUsers": null`, "SSH users should not be null")
}
func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{
User: users[0],
IPv4: ap("100.64.0.1"),
},
{
User: users[0],
IPv4: ap("100.64.0.2"),
},
{
User: users[1],
IPv4: ap("100.64.0.3"),
},
{
User: users[1],
IPv4: ap("100.64.0.4"),
},
// Tagged device for user1
{
User: users[0],
IPv4: ap("100.64.0.5"),
ForcedTags: []string{"tag:test"},
},
// Tagged device for user2
{
User: users[1],
IPv4: ap("100.64.0.6"),
ForcedTags: []string{"tag:test"},
},
}
// Test: Tailscale intended usage pattern (autogroup:member + autogroup:self)
policy2 := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{agp("autogroup:member")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy2.validate()
if err != nil {
t.Fatalf("policy validation failed: %v", err)
}
// Test compilation for user1's first node
node1 := nodes[0].View()
rules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(rules) != 1 {
t.Fatalf("expected 1 rule, got %d", len(rules))
}
// Check that the rule includes:
// - Sources: only user1's untagged devices (filtered by autogroup:self semantics)
// - Destinations: only user1's untagged devices (autogroup:self)
rule := rules[0]
// Sources should ONLY include user1's untagged devices (100.64.0.1, 100.64.0.2)
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSourceIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rule.SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
if !found {
t.Errorf("expected source IP %s to be covered by generated prefixes %v", expectedIP, rule.SrcIPs)
}
}
// Verify that other users' devices and tagged devices are not included in sources
excludedSourceIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
for _, excludedIP := range excludedSourceIPs {
addr := netip.MustParseAddr(excludedIP)
for _, prefix := range rule.SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
t.Errorf("SECURITY VIOLATION: source IP %s should not be included but found in prefix %s", excludedIP, prefix)
}
}
}
expectedDestIPs := []string{"100.64.0.1", "100.64.0.2"}
actualDestIPs := make([]string, 0, len(rule.DstPorts))
for _, dst := range rule.DstPorts {
actualDestIPs = append(actualDestIPs, dst.IP)
}
for _, expectedIP := range expectedDestIPs {
found := false
for _, actualIP := range actualDestIPs {
if actualIP == expectedIP {
found = true
break
}
}
if !found {
t.Errorf("expected destination IP %s to be included, got: %v", expectedIP, actualDestIPs)
}
}
// Verify that other users' devices and tagged devices are not in destinations
excludedDestIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
for _, excludedIP := range excludedDestIPs {
for _, actualIP := range actualDestIPs {
if actualIP == excludedIP {
t.Errorf("SECURITY: destination IP %s should not be included but found in destinations", excludedIP)
}
}
}
}
func TestAutogroupSelfInSourceIsRejected(t *testing.T) {
// Test that autogroup:self cannot be used in sources (per Tailscale spec)
policy := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{agp("autogroup:self")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:member"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
if err == nil {
t.Error("expected validation error when using autogroup:self in sources")
}
if !strings.Contains(err.Error(), "autogroup:self") {
t.Errorf("expected error message to mention autogroup:self, got: %v", err)
}
}
// TestAutogroupSelfWithSpecificUserSource verifies that when autogroup:self is in
// the destination and a specific user is in the source, only that user's devices
// are allowed (and only if they match the target user).
func TestAutogroupSelfWithSpecificUserSource(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1")},
{User: users[0], IPv4: ap("100.64.0.2")},
{User: users[1], IPv4: ap("100.64.0.3")},
{User: users[1], IPv4: ap("100.64.0.4")},
}
policy := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{up("user1@")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
require.NoError(t, err)
// For user1's node: sources should be user1's devices
node1 := nodes[0].View()
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.Len(t, rules, 1)
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSourceIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rules[0].SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
assert.True(t, found, "expected source IP %s to be present", expectedIP)
}
actualDestIPs := make([]string, 0, len(rules[0].DstPorts))
for _, dst := range rules[0].DstPorts {
actualDestIPs = append(actualDestIPs, dst.IP)
}
assert.ElementsMatch(t, expectedSourceIPs, actualDestIPs)
node2 := nodes[2].View()
rules2, err := policy.compileFilterRulesForNode(users, node2, nodes.ViewSlice())
require.NoError(t, err)
assert.Empty(t, rules2, "user2's node should have no rules (user1@ devices can't match user2's self)")
}
// TestAutogroupSelfWithGroupSource verifies that when a group is used as source
// and autogroup:self as destination, only group members who are the same user
// as the target are allowed.
func TestAutogroupSelfWithGroupSource(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
{Model: gorm.Model{ID: 3}, Name: "user3"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1")},
{User: users[0], IPv4: ap("100.64.0.2")},
{User: users[1], IPv4: ap("100.64.0.3")},
{User: users[1], IPv4: ap("100.64.0.4")},
{User: users[2], IPv4: ap("100.64.0.5")},
}
policy := &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user1@"), Username("user2@")},
},
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{gp("group:admins")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
require.NoError(t, err)
// (group:admins has user1+user2, but autogroup:self filters to same user)
node1 := nodes[0].View()
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.Len(t, rules, 1)
expectedSrcIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSrcIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rules[0].SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
assert.True(t, found, "expected source IP %s for user1", expectedIP)
}
node3 := nodes[4].View()
rules3, err := policy.compileFilterRulesForNode(users, node3, nodes.ViewSlice())
require.NoError(t, err)
assert.Empty(t, rules3, "user3 should have no rules")
}
// Helper function to create IP addresses for testing
func createAddr(ip string) *netip.Addr {
addr, _ := netip.ParseAddr(ip)
return &addr
}
// TestSSHWithAutogroupSelfInDestination verifies that SSH policies work correctly
// with autogroup:self in destinations
func TestSSHWithAutogroupSelfInDestination(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
// User1's nodes
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-node1"},
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-node2"},
// User2's nodes
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-node1"},
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-node2"},
// Tagged node for user1 (should be excluded)
{User: users[0], IPv4: ap("100.64.0.5"), Hostname: "user1-tagged", ForcedTags: []string{"tag:server"}},
}
policy := &Policy{
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{agp("autogroup:member")},
Destinations: SSHDstAliases{agp("autogroup:self")},
Users: []SSHUser{"autogroup:nonroot"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// Test for user1's first node
node1 := nodes[0].View()
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
// Principals should only include user1's untagged devices
require.Len(t, rule.Principals, 2, "should have 2 principals (user1's 2 untagged nodes)")
principalIPs := make([]string, len(rule.Principals))
for i, p := range rule.Principals {
principalIPs[i] = p.NodeIP
}
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
// Test for user2's first node
node3 := nodes[2].View()
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy2)
require.Len(t, sshPolicy2.Rules, 1)
rule2 := sshPolicy2.Rules[0]
// Principals should only include user2's untagged devices
require.Len(t, rule2.Principals, 2, "should have 2 principals (user2's 2 untagged nodes)")
principalIPs2 := make([]string, len(rule2.Principals))
for i, p := range rule2.Principals {
principalIPs2[i] = p.NodeIP
}
assert.ElementsMatch(t, []string{"100.64.0.3", "100.64.0.4"}, principalIPs2)
// Test for tagged node (should have no SSH rules)
node5 := nodes[4].View()
sshPolicy3, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice())
require.NoError(t, err)
if sshPolicy3 != nil {
assert.Empty(t, sshPolicy3.Rules, "tagged nodes should not get SSH rules with autogroup:self")
}
}
// TestSSHWithAutogroupSelfAndSpecificUser verifies that when a specific user
// is in the source and autogroup:self in destination, only that user's devices
// can SSH (and only if they match the target user)
func TestSSHWithAutogroupSelfAndSpecificUser(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1")},
{User: users[0], IPv4: ap("100.64.0.2")},
{User: users[1], IPv4: ap("100.64.0.3")},
{User: users[1], IPv4: ap("100.64.0.4")},
}
policy := &Policy{
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{up("user1@")},
Destinations: SSHDstAliases{agp("autogroup:self")},
Users: []SSHUser{"ubuntu"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// For user1's node: should allow SSH from user1's devices
node1 := nodes[0].View()
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
require.Len(t, rule.Principals, 2, "user1 should have 2 principals")
principalIPs := make([]string, len(rule.Principals))
for i, p := range rule.Principals {
principalIPs[i] = p.NodeIP
}
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
// For user2's node: should have no rules (user1's devices can't match user2's self)
node3 := nodes[2].View()
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
require.NoError(t, err)
if sshPolicy2 != nil {
assert.Empty(t, sshPolicy2.Rules, "user2 should have no SSH rules since source is user1")
}
}
// TestSSHWithAutogroupSelfAndGroup verifies SSH with group sources and autogroup:self destinations
func TestSSHWithAutogroupSelfAndGroup(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
{Model: gorm.Model{ID: 3}, Name: "user3"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1")},
{User: users[0], IPv4: ap("100.64.0.2")},
{User: users[1], IPv4: ap("100.64.0.3")},
{User: users[1], IPv4: ap("100.64.0.4")},
{User: users[2], IPv4: ap("100.64.0.5")},
}
policy := &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user1@"), Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{agp("autogroup:self")},
Users: []SSHUser{"root"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// For user1's node: should allow SSH from user1's devices only (not user2's)
node1 := nodes[0].View()
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
require.Len(t, rule.Principals, 2, "user1 should have 2 principals (only user1's nodes)")
principalIPs := make([]string, len(rule.Principals))
for i, p := range rule.Principals {
principalIPs[i] = p.NodeIP
}
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
// For user3's node: should have no rules (not in group:admins)
node5 := nodes[4].View()
sshPolicy2, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice())
require.NoError(t, err)
if sshPolicy2 != nil {
assert.Empty(t, sshPolicy2.Rules, "user3 should have no SSH rules (not in group)")
}
}
// TestSSHWithAutogroupSelfExcludesTaggedDevices verifies that tagged devices
// are excluded from both sources and destinations when autogroup:self is used
func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "untagged1"},
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "untagged2"},
{User: users[0], IPv4: ap("100.64.0.3"), Hostname: "tagged1", ForcedTags: []string{"tag:server"}},
{User: users[0], IPv4: ap("100.64.0.4"), Hostname: "tagged2", ForcedTags: []string{"tag:web"}},
}
policy := &Policy{
TagOwners: TagOwners{
Tag("tag:server"): Owners{up("user1@")},
Tag("tag:web"): Owners{up("user1@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{agp("autogroup:member")},
Destinations: SSHDstAliases{agp("autogroup:self")},
Users: []SSHUser{"admin"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// For untagged node: should only get principals from other untagged nodes
node1 := nodes[0].View()
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
require.Len(t, rule.Principals, 2, "should only have 2 principals (untagged nodes)")
principalIPs := make([]string, len(rule.Principals))
for i, p := range rule.Principals {
principalIPs[i] = p.NodeIP
}
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs,
"should only include untagged devices")
// For tagged node: should get no SSH rules
node3 := nodes[2].View()
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
require.NoError(t, err)
if sshPolicy2 != nil {
assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self")
}
}
// TestSSHWithAutogroupSelfAndMixedDestinations tests that SSH rules can have both
// autogroup:self and other destinations (like tag:router) in the same rule, and that
// autogroup:self filtering only applies to autogroup:self destinations, not others.
func TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-device"},
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-device2"},
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-device"},
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-router", ForcedTags: []string{"tag:router"}},
}
policy := &Policy{
TagOwners: TagOwners{
Tag("tag:router"): Owners{up("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{agp("autogroup:member")},
Destinations: SSHDstAliases{agp("autogroup:self"), tp("tag:router")},
Users: []SSHUser{"admin"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// Test 1: Compile for user1's device (should only match autogroup:self destination)
node1 := nodes[0].View()
sshPolicy1, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy1)
require.Len(t, sshPolicy1.Rules, 1, "user1's device should have 1 SSH rule (autogroup:self)")
// Verify autogroup:self rule has filtered sources (only same-user devices)
selfRule := sshPolicy1.Rules[0]
require.Len(t, selfRule.Principals, 2, "autogroup:self rule should only have user1's devices")
selfPrincipals := make([]string, len(selfRule.Principals))
for i, p := range selfRule.Principals {
selfPrincipals[i] = p.NodeIP
}
require.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, selfPrincipals,
"autogroup:self rule should only include same-user untagged devices")
// Test 2: Compile for router (should only match tag:router destination)
routerNode := nodes[3].View() // user2-router
sshPolicyRouter, err := policy.compileSSHPolicy(users, routerNode, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicyRouter)
require.Len(t, sshPolicyRouter.Rules, 1, "router should have 1 SSH rule (tag:router)")
routerRule := sshPolicyRouter.Rules[0]
routerPrincipals := make([]string, len(routerRule.Principals))
for i, p := range routerRule.Principals {
routerPrincipals[i] = p.NodeIP
}
require.Contains(t, routerPrincipals, "100.64.0.1", "router rule should include user1's device (unfiltered sources)")
require.Contains(t, routerPrincipals, "100.64.0.2", "router rule should include user1's other device (unfiltered sources)")
require.Contains(t, routerPrincipals, "100.64.0.3", "router rule should include user2's device (unfiltered sources)")
}

View File

@@ -9,7 +9,6 @@ import (
"sync"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"go4.org/netipx"
@@ -39,12 +38,6 @@ type PolicyManager struct {
// Lazy map of SSH policies
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)
compiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule
// Lazy map of per-node filter rules (reduced, for packet filters)
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
usesAutogroupSelf bool
}
// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.
@@ -57,13 +50,10 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
}
pm := PolicyManager{
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
compiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
usesAutogroupSelf: policy.usesAutogroupSelf(),
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
}
_, err = pm.updateLocked()
@@ -82,18 +72,8 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
// policies for nodes that have changed. Particularly if the only difference is
// that nodes has been added or removed.
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
// Check if policy uses autogroup:self
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
var filter []tailcfg.FilterRule
var err error
// Standard compilation for all policies
filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)
filter, err := pm.pol.compileFilterRules(pm.users, pm.nodes)
if err != nil {
return false, fmt.Errorf("compiling filter rules: %w", err)
}
@@ -238,197 +218,6 @@ func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
return pm.filter, pm.matchers
}
// BuildPeerMap constructs peer relationship maps for the given nodes.
// For global filters, it uses the global filter matchers for all nodes.
// For autogroup:self policies (empty global filter), it builds per-node
// peer maps using each node's specific filter rules.
func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView {
if pm == nil {
return nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// If we have a global filter, use it for all nodes (normal case)
if !pm.usesAutogroupSelf {
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Build the map of all peers according to the matchers.
// Compared to ReduceNodes, which builds the list per node, we end up with doing
// the full work for every node O(n^2), while this will reduce the list as we see
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
for i := range nodes.Len() {
for j := i + 1; j < nodes.Len(); j++ {
if nodes.At(i).ID() == nodes.At(j).ID() {
continue
}
if nodes.At(i).CanAccess(pm.matchers, nodes.At(j)) || nodes.At(j).CanAccess(pm.matchers, nodes.At(i)) {
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
}
}
}
return ret
}
// For autogroup:self (empty global filter), build per-node peer relationships
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Pre-compute per-node matchers using unreduced compiled rules
// We need unreduced rules to determine peer relationships correctly.
// Reduced rules only show destinations where the node is the target,
// but peer relationships require the full bidirectional access rules.
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
for _, node := range nodes.All() {
filter, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil || len(filter) == 0 {
continue
}
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
}
// Check each node pair for peer relationships.
// Start j at i+1 to avoid checking the same pair twice and creating duplicates.
// We check both directions (i->j and j->i) since ACLs can be asymmetric.
for i := range nodes.Len() {
nodeI := nodes.At(i)
matchersI, hasFilterI := nodeMatchers[nodeI.ID()]
for j := i + 1; j < nodes.Len(); j++ {
nodeJ := nodes.At(j)
matchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]
// Check if nodeI can access nodeJ
if hasFilterI && nodeI.CanAccess(matchersI, nodeJ) {
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
}
// Check if nodeJ can access nodeI
if hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI) {
ret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)
}
}
}
return ret
}
// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node
// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.
// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.
func (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
// Check if we have cached compiled rules
if rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {
return rules, nil
}
// Compile per-node rules with autogroup:self expanded
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
if err != nil {
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
}
// Cache the unreduced compiled rules
pm.compiledFilterRulesMap[node.ID()] = rules
return rules, nil
}
// filterForNodeLocked returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// This is a lock-free version of FilterForNode for internal use when the lock is already held.
// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.
func (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
if !pm.usesAutogroupSelf {
// For global filters, reduce to only rules relevant to this node.
// Cache the reduced filter per node for efficiency.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Use policyutil.ReduceFilterRules for global filter reduction.
reducedFilter := policyutil.ReduceFilterRules(node, pm.filter)
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// For autogroup:self, compile per-node rules then reduce them.
// Check if we have cached reduced rules for this node.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Get unreduced compiled rules
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
}
// Reduce the compiled rules to only destinations relevant to this node
reducedFilter := policyutil.ReduceFilterRules(node, compiledRules)
// Cache the reduced filter
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// FilterForNode returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// If the policy uses autogroup:self, this returns node-specific compiled rules.
// Otherwise, it returns the global filter reduced for this node.
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
return pm.filterForNodeLocked(node)
}
// MatchersForNode returns the matchers for peer relationship determination for a specific node.
// These are UNREDUCED matchers - they include all rules where the node could be either source or destination.
// This is different from FilterForNode which returns REDUCED rules for packet filtering.
//
// For global policies: returns the global matchers (same for all nodes)
// For autogroup:self: returns node-specific matchers from unreduced compiled rules
func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// For global policies, return the shared global matchers
if !pm.usesAutogroupSelf {
return pm.matchers, nil
}
// For autogroup:self, get unreduced compiled rules and create matchers
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
}
// Create matchers from unreduced rules for peer relationship determination
return matcher.MatchesFromFilterRules(compiledRules), nil
}
// SetUsers updates the users in the policy manager and updates the filter rules.
func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
if pm == nil {
@@ -439,23 +228,7 @@ func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
defer pm.mu.Unlock()
pm.users = users
// Clear SSH policy map when users change to force SSH policy recomputation
// This ensures that if SSH policy compilation previously failed due to missing users,
// it will be retried with the new user list
clear(pm.sshPolicyMap)
changed, err := pm.updateLocked()
if err != nil {
return false, err
}
// If SSH policies exist, force a policy change when users are updated
// This ensures nodes get updated SSH policies even if other policy hashes didn't change
if pm.pol != nil && pm.pol.SSHs != nil && len(pm.pol.SSHs) > 0 {
return true, nil
}
return changed, nil
return pm.updateLocked()
}
// SetNodes updates the nodes in the policy manager and updates the filter rules.
@@ -466,41 +239,9 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
pm.mu.Lock()
defer pm.mu.Unlock()
oldNodeCount := pm.nodes.Len()
newNodeCount := nodes.Len()
// Invalidate cache entries for nodes that changed.
// For autogroup:self: invalidate all nodes belonging to affected users (peer changes).
// For global policies: invalidate only nodes whose properties changed (IPs, routes).
pm.invalidateNodeCache(nodes)
pm.nodes = nodes
nodesChanged := oldNodeCount != newNodeCount
// When nodes are added/removed, we must recompile filters because:
// 1. User/group aliases (like "user1@") resolve to node IPs
// 2. Filter compilation needs nodes to generate rules
// 3. Without nodes, filters compile to empty (0 rules)
//
// For autogroup:self: return true when nodes change even if the global filter
// hash didn't change. The global filter is empty for autogroup:self (each node
// has its own filter), so the hash never changes. But peer relationships DO
// change when nodes are added/removed, so we must signal this to trigger updates.
// For global policies: the filter must be recompiled to include the new nodes.
if nodesChanged {
// Recompile filter with the new node list
_, err := pm.updateLocked()
if err != nil {
return false, err
}
// Always return true when nodes changed, even if filter hash didn't change
// (can happen with autogroup:self or when nodes are added but don't affect rules)
return true, nil
}
return false, nil
return pm.updateLocked()
}
func (pm *PolicyManager) NodeCanHaveTag(node types.NodeView, tag string) bool {
@@ -642,162 +383,3 @@ func (pm *PolicyManager) DebugString() string {
return sb.String()
}
// invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be
// invalidated when using autogroup:self policies. This is much more efficient than clearing
// the entire cache.
func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) {
// Build maps for efficient lookup
oldNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range oldNodes.All() {
oldNodeMap[node.ID()] = node
}
newNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range newNodes.All() {
newNodeMap[node.ID()] = node
}
// Track which users are affected by changes
affectedUsers := make(map[uint]struct{})
// Check for removed nodes
for nodeID, oldNode := range oldNodeMap {
if _, exists := newNodeMap[nodeID]; !exists {
affectedUsers[oldNode.User().ID] = struct{}{}
}
}
// Check for added nodes
for nodeID, newNode := range newNodeMap {
if _, exists := oldNodeMap[nodeID]; !exists {
affectedUsers[newNode.User().ID] = struct{}{}
}
}
// Check for modified nodes (user changes, tag changes, IP changes)
for nodeID, newNode := range newNodeMap {
if oldNode, exists := oldNodeMap[nodeID]; exists {
// Check if user changed
if oldNode.User().ID != newNode.User().ID {
affectedUsers[oldNode.User().ID] = struct{}{}
affectedUsers[newNode.User().ID] = struct{}{}
}
// Check if tag status changed
if oldNode.IsTagged() != newNode.IsTagged() {
affectedUsers[newNode.User().ID] = struct{}{}
}
// Check if IPs changed (simple check - could be more sophisticated)
oldIPs := oldNode.IPs()
newIPs := newNode.IPs()
if len(oldIPs) != len(newIPs) {
affectedUsers[newNode.User().ID] = struct{}{}
} else {
// Check if any IPs are different
for i, oldIP := range oldIPs {
if i >= len(newIPs) || oldIP != newIPs[i] {
affectedUsers[newNode.User().ID] = struct{}{}
break
}
}
}
}
}
// Clear cache entries for affected users only
// For autogroup:self, we need to clear all nodes belonging to affected users
// because autogroup:self rules depend on the entire user's device set
for nodeID := range pm.filterRulesMap {
// Find the user for this cached node
var nodeUserID uint
found := false
// Check in new nodes first
for _, node := range newNodes.All() {
if node.ID() == nodeID {
nodeUserID = node.User().ID
found = true
break
}
}
// If not found in new nodes, check old nodes
if !found {
for _, node := range oldNodes.All() {
if node.ID() == nodeID {
nodeUserID = node.User().ID
found = true
break
}
}
}
// If we found the user and they're affected, clear this cache entry
if found {
if _, affected := affectedUsers[nodeUserID]; affected {
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
} else {
// Node not found in either old or new list, clear it
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
}
if len(affectedUsers) > 0 {
log.Debug().
Int("affected_users", len(affectedUsers)).
Int("remaining_cache_entries", len(pm.filterRulesMap)).
Msg("Selectively cleared autogroup:self cache for affected users")
}
}
// invalidateNodeCache invalidates cache entries based on what changed.
func (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {
if pm.usesAutogroupSelf {
// For autogroup:self, a node's filter depends on its peers (same user).
// When any node in a user changes, all nodes for that user need invalidation.
pm.invalidateAutogroupSelfCache(pm.nodes, newNodes)
} else {
// For global policies, a node's filter depends only on its own properties.
// Only invalidate nodes whose properties actually changed.
pm.invalidateGlobalPolicyCache(newNodes)
}
}
// invalidateGlobalPolicyCache invalidates only nodes whose properties affecting
// ReduceFilterRules changed. For global policies, each node's filter is independent.
func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.NodeView]) {
oldNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range pm.nodes.All() {
oldNodeMap[node.ID()] = node
}
newNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range newNodes.All() {
newNodeMap[node.ID()] = node
}
// Invalidate nodes whose properties changed
for nodeID, newNode := range newNodeMap {
oldNode, existed := oldNodeMap[nodeID]
if !existed {
// New node - no cache entry yet, will be lazily calculated
continue
}
if newNode.HasNetworkChanges(oldNode) {
delete(pm.filterRulesMap, nodeID)
}
}
// Remove deleted nodes from cache
for nodeID := range pm.filterRulesMap {
if _, exists := newNodeMap[nodeID]; !exists {
delete(pm.filterRulesMap, nodeID)
}
}
}

View File

@@ -1,8 +1,6 @@
package v2
import (
"net/netip"
"slices"
"testing"
"github.com/google/go-cmp/cmp"
@@ -68,454 +66,3 @@ func TestPolicyManager(t *testing.T) {
})
}
}
func TestInvalidateAutogroupSelfCache(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"},
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"},
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
}
policy := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
initialNodes := types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
}
for i, n := range initialNodes {
n.ID = types.NodeID(i + 1)
}
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
require.NoError(t, err)
// Add to cache by calling FilterForNode for each node
for _, n := range initialNodes {
_, err := pm.FilterForNode(n.View())
require.NoError(t, err)
}
require.Equal(t, len(initialNodes), len(pm.filterRulesMap))
tests := []struct {
name string
newNodes types.Nodes
expectedCleared int
description string
}{
{
name: "no_changes",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 0,
description: "No changes should clear no cache entries",
},
{
name: "node_added",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user1-node3", "100.64.0.5", "fd7a:115c:a1e0::5", users[0], nil), // New node
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's existing nodes should be cleared
description: "Adding a node should clear cache for that user's existing nodes",
},
{
name: "node_removed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
// user1-node2 removed
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's remaining node + removed node should be cleared
description: "Removing a node should clear cache for that user's remaining nodes",
},
{
name: "user_changed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[2], nil), // Changed to user3
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared
description: "Changing a node's user should clear cache for both old and new users",
},
{
name: "ip_changed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.10", "fd7a:115c:a1e0::10", users[0], nil), // IP changed
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's nodes should be cleared
description: "Changing a node's IP should clear cache for that user's nodes",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i, n := range tt.newNodes {
found := false
for _, origNode := range initialNodes {
if n.Hostname == origNode.Hostname {
n.ID = origNode.ID
found = true
break
}
}
if !found {
n.ID = types.NodeID(len(initialNodes) + i + 1)
}
}
pm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule)
for _, n := range initialNodes {
_, err := pm.FilterForNode(n.View())
require.NoError(t, err)
}
initialCacheSize := len(pm.filterRulesMap)
require.Equal(t, len(initialNodes), initialCacheSize)
pm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice())
// Verify the expected number of cache entries were cleared
finalCacheSize := len(pm.filterRulesMap)
clearedEntries := initialCacheSize - finalCacheSize
require.Equal(t, tt.expectedCleared, clearedEntries, tt.description)
})
}
}
// TestInvalidateGlobalPolicyCache tests the cache invalidation logic for global policies.
func TestInvalidateGlobalPolicyCache(t *testing.T) {
mustIPPtr := func(s string) *netip.Addr {
ip := netip.MustParseAddr(s)
return &ip
}
tests := []struct {
name string
oldNodes types.Nodes
newNodes types.Nodes
initialCache map[types.NodeID][]tailcfg.FilterRule
expectedCacheAfter map[types.NodeID]bool // true = should exist, false = should not exist
}{
{
name: "node property changed - invalidates only that node",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
{
name: "multiple nodes changed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.3")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.88")}, // Changed
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
3: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
3: false, // Invalidated
},
},
{
name: "node deleted - removes from cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Deleted
2: true, // Preserved
},
},
{
name: "node added - no cache invalidation needed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // New
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true, // Preserved
2: false, // Not in cache (new node)
},
},
{
name: "no changes - preserves all cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true,
2: true,
},
},
{
name: "routes changed - invalidates that node only",
oldNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, // Changed
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pm := &PolicyManager{
nodes: tt.oldNodes.ViewSlice(),
filterRulesMap: tt.initialCache,
usesAutogroupSelf: false,
}
pm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())
// Verify cache state
for nodeID, shouldExist := range tt.expectedCacheAfter {
_, exists := pm.filterRulesMap[nodeID]
require.Equal(t, shouldExist, exists, "node %d cache existence mismatch", nodeID)
}
})
}
}
// TestAutogroupSelfReducedVsUnreducedRules verifies that:
// 1. BuildPeerMap uses unreduced compiled rules for determining peer relationships
// 2. FilterForNode returns reduced compiled rules for packet filters
func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
user1 := types.User{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"}
user2 := types.User{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"}
users := types.Users{user1, user2}
// Create two nodes
node1 := node("node1", "100.64.0.1", "fd7a:115c:a1e0::1", user1, nil)
node1.ID = 1
node2 := node("node2", "100.64.0.2", "fd7a:115c:a1e0::2", user2, nil)
node2.ID = 2
nodes := types.Nodes{node1, node2}
// Policy with autogroup:self - all members can reach their own devices
policyStr := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
// Test FilterForNode returns reduced rules
// For node1: should have rules where node1 is in destinations (its own IP)
filterNode1, err := pm.FilterForNode(nodes[0].View())
require.NoError(t, err)
// For node2: should have rules where node2 is in destinations (its own IP)
filterNode2, err := pm.FilterForNode(nodes[1].View())
require.NoError(t, err)
// FilterForNode should return reduced rules - verify they only contain the node's own IPs as destinations
// For node1, destinations should only be node1's IPs
node1IPs := []string{"100.64.0.1/32", "100.64.0.1", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::1"}
for _, rule := range filterNode1 {
for _, dst := range rule.DstPorts {
require.Contains(t, node1IPs, dst.IP,
"node1 filter should only contain node1's IPs as destinations")
}
}
// For node2, destinations should only be node2's IPs
node2IPs := []string{"100.64.0.2/32", "100.64.0.2", "fd7a:115c:a1e0::2/128", "fd7a:115c:a1e0::2"}
for _, rule := range filterNode2 {
for _, dst := range rule.DstPorts {
require.Contains(t, node2IPs, dst.IP,
"node2 filter should only contain node2's IPs as destinations")
}
}
// Test BuildPeerMap uses unreduced rules
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// According to the policy, user1 can reach autogroup:self (which expands to node1's own IPs for node1)
// So node1 should be able to reach itself, but since we're looking at peer relationships,
// node1 should NOT have itself in the peer map (nodes don't peer with themselves)
// node2 should also not have any peers since user2 has no rules allowing it to reach anyone
// Verify peer relationships based on unreduced rules
// With unreduced rules, BuildPeerMap can properly determine that:
// - node1 can access autogroup:self (its own IPs)
// - node2 cannot access node1
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
}
// When separate ACL rules exist (one with autogroup:self, one with tag:router),
// the autogroup:self rule should not prevent the tag:router rule from working.
// This ensures that autogroup:self doesn't interfere with other ACL rules.
func TestAutogroupSelfWithOtherRules(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
}
// test-1 has a regular device
test1Node := &types.Node{
ID: 1,
Hostname: "test-1-device",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[0],
UserID: users[0].ID,
Hostinfo: &tailcfg.Hostinfo{},
}
// test-2 has a router device with tag:node-router
test2RouterNode := &types.Node{
ID: 2,
Hostname: "test-2-router",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[1],
UserID: users[1].ID,
ForcedTags: []string{"tag:node-router"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodes := types.Nodes{test1Node, test2RouterNode}
// This matches the exact policy from issue #2838:
// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)
// - Second rule: group:home -> tag:node-router (should allow group members to see router)
policy := `{
"groups": {
"group:home": ["test-1@example.com", "test-2@example.com"]
},
"tagOwners": {
"tag:node-router": ["group:home"]
},
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
},
{
"action": "accept",
"src": ["group:home"],
"dst": ["tag:node-router:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
require.NoError(t, err)
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// test-1 (in group:home) should see:
// 1. Their own node (from autogroup:self rule)
// 2. The router node (from group:home -> tag:node-router rule)
test1Peers := peerMap[test1Node.ID]
// Verify test-1 can see the router (group:home -> tag:node-router rule)
require.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {
return n.ID() == test2RouterNode.ID
}), "test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)")
// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)
rules, err := pm.FilterForNode(test1Node.View())
require.NoError(t, err)
require.NotEmpty(t, rules, "test-1 should have filter rules from both ACL rules")
}

View File

@@ -32,8 +32,6 @@ var policyJSONOpts = []json.Options{
const Wildcard = Asterix(0)
var ErrAutogroupSelfRequiresPerNodeResolution = errors.New("autogroup:self requires per-node resolution and cannot be resolved in this context")
type Asterix int
func (a Asterix) Validate() error {
@@ -487,7 +485,9 @@ const (
AutoGroupMember AutoGroup = "autogroup:member"
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
AutoGroupTagged AutoGroup = "autogroup:tagged"
AutoGroupSelf AutoGroup = "autogroup:self"
// These are not yet implemented.
AutoGroupSelf AutoGroup = "autogroup:self"
)
var autogroups = []AutoGroup{
@@ -495,7 +495,6 @@ var autogroups = []AutoGroup{
AutoGroupMember,
AutoGroupNonRoot,
AutoGroupTagged,
AutoGroupSelf,
}
func (ag AutoGroup) Validate() error {
@@ -591,12 +590,6 @@ func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[type
return build.IPSet()
case AutoGroupSelf:
// autogroup:self represents all devices owned by the same user.
// This cannot be resolved in the general context and should be handled
// specially during policy compilation per-node for security.
return nil, ErrAutogroupSelfRequiresPerNodeResolution
default:
return nil, fmt.Errorf("unknown autogroup %q", ag)
}
@@ -1593,11 +1586,11 @@ type Policy struct {
var (
// TODO(kradalby): Add these checks for tagOwners and autoApprovers.
autogroupForSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged}
autogroupForSSHSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged}
autogroupForSSHUser = []AutoGroup{AutoGroupNonRoot}
autogroupNotSupported = []AutoGroup{}
autogroupNotSupported = []AutoGroup{AutoGroupSelf}
)
func validateAutogroupSupported(ag *AutoGroup) error {
@@ -1621,10 +1614,6 @@ func validateAutogroupForSrc(src *AutoGroup) error {
return errors.New(`"autogroup:internet" used in source, it can only be used in ACL destinations`)
}
if src.Is(AutoGroupSelf) {
return errors.New(`"autogroup:self" used in source, it can only be used in ACL destinations`)
}
if !slices.Contains(autogroupForSrc, *src) {
return fmt.Errorf("autogroup %q is not supported for ACL sources, can be %v", *src, autogroupForSrc)
}
@@ -2123,40 +2112,3 @@ func validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWi
return nil
}
// usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules.
func (p *Policy) usesAutogroupSelf() bool {
if p == nil {
return false
}
// Check ACL rules
for _, acl := range p.ACLs {
for _, src := range acl.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
for _, dest := range acl.Destinations {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
}
// Check SSH rules
for _, ssh := range p.SSHs {
for _, src := range ssh.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
for _, dest := range ssh.Destinations {
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
}
return false
}

View File

@@ -459,7 +459,7 @@ func TestUnmarshalPolicy(t *testing.T) {
],
}
`,
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`,
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged]`,
},
{
name: "undefined-hostname-errors-2490",
@@ -1881,38 +1881,6 @@ func TestResolvePolicy(t *testing.T) {
mp("100.100.101.7/32"), // Multiple forced tags
},
},
{
name: "autogroup-self",
toResolve: ptr.To(AutoGroupSelf),
nodes: types.Nodes{
{
User: users["testuser"],
IPv4: ap("100.100.101.1"),
},
{
User: users["testuser2"],
IPv4: ap("100.100.101.2"),
},
{
User: users["testuser"],
ForcedTags: []string{"tag:test"},
IPv4: ap("100.100.101.3"),
},
{
User: users["testuser2"],
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:test"},
},
IPv4: ap("100.100.101.4"),
},
},
pol: &Policy{
TagOwners: TagOwners{
Tag("tag:test"): Owners{ptr.To(Username("testuser@"))},
},
},
wantErr: "autogroup:self requires per-node resolution",
},
{
name: "autogroup-invalid",
toResolve: ptr.To(AutoGroup("autogroup:invalid")),

View File

@@ -197,12 +197,11 @@ func (m *mapSession) serveLongPoll() {
m.keepAliveTicker = time.NewTicker(m.keepAlive)
// Process the initial MapRequest to update node state (endpoints, hostinfo, etc.)
// This must be done BEFORE calling Connect() to ensure routes are properly synchronized.
// When nodes reconnect, they send their hostinfo with announced routes in the MapRequest.
// We need this data in NodeStore before Connect() sets up the primary routes, because
// SubnetRoutes() calculates the intersection of announced and approved routes. If we
// call Connect() first, SubnetRoutes() returns empty (no announced routes yet), causing
// the node to be incorrectly removed from AvailableRoutes.
// CRITICAL: This must be done BEFORE calling Connect() to ensure routes are properly
// synchronized. When nodes reconnect, they send their hostinfo with announced routes
// in the MapRequest. We need this data in NodeStore before Connect() sets up the
// primary routes, otherwise SubnetRoutes() returns empty and the node is removed
// from AvailableRoutes.
mapReqChange, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)
if err != nil {
m.errf(err, "failed to update node from initial MapRequest")

View File

@@ -60,6 +60,9 @@ type DebugStringInfo struct {
// DebugOverview returns a comprehensive overview of the current state for debugging.
func (s *State) DebugOverview() string {
s.mu.RLock()
defer s.mu.RUnlock()
allNodes := s.nodeStore.ListNodes()
users, _ := s.ListAllUsers()
@@ -267,6 +270,9 @@ func (s *State) PolicyDebugString() string {
// DebugOverviewJSON returns a structured overview of the current state for debugging.
func (s *State) DebugOverviewJSON() DebugOverviewInfo {
s.mu.RLock()
defer s.mu.RUnlock()
allNodes := s.nodeStore.ListNodes()
users, _ := s.ListAllUsers()

View File

@@ -33,8 +33,8 @@ func TestNodeStoreDebugString(t *testing.T) {
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
_ = store.PutNode(node1)
_ = store.PutNode(node2)
store.PutNode(node1)
store.PutNode(node2)
return store
},

View File

@@ -1,460 +0,0 @@
package state
import (
"net/netip"
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/types/ptr"
)
// TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode
// are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes
// are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view
// after the node has been deleted from the NodeStore.
func TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) {
// Create a simple test node
node := createTestNode(1, 1, "test-user", "test-node")
// Create NodeStore
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put the node in the store
resultNode := store.PutNode(node)
require.True(t, resultNode.Valid(), "initial PutNode should return valid node")
// Verify node exists
retrievedNode, found := store.GetNode(node.ID)
require.True(t, found)
require.Equal(t, node.ID, retrievedNode.ID())
// Test scenario: UpdateNode is called, returns a node view from the batch,
// but in the same batch a DeleteNode removes the node.
// This simulates what happens when:
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode
// 2. At the same time, handleLogout calls DeleteNode
// 3. They get batched together: [UPDATE, DELETE]
// 4. UPDATE modifies the node, DELETE removes it
// 5. UpdateNode returns a node view based on the state AFTER both operations
// 6. If DELETE came after UPDATE, the returned node should be invalid
done := make(chan bool, 2)
var updatedNode types.NodeView
var updateOk bool
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
go func() {
updatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
done <- true
}()
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
go func() {
// Small delay to increase chance of batching together
time.Sleep(1 * time.Millisecond)
store.DeleteNode(node.ID)
done <- true
}()
// Wait for both operations
<-done
<-done
// Give batching time to complete
time.Sleep(50 * time.Millisecond)
// The key assertion: if UpdateNode and DeleteNode were batched together
// with DELETE after UPDATE, then UpdateNode should return an invalid node
// OR it should return a valid node but the node should no longer exist in the store
_, found = store.GetNode(node.ID)
assert.False(t, found, "node should be deleted from NodeStore")
// If the update happened before delete in the batch, the returned node might be invalid
if updateOk {
t.Logf("UpdateNode returned ok=true, valid=%v", updatedNode.Valid())
// This is the bug scenario - UpdateNode thinks it succeeded but node is gone
if updatedNode.Valid() {
t.Logf("WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug")
}
} else {
t.Logf("UpdateNode correctly returned ok=false (node deleted in same batch)")
}
}
// TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when
// UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE,
// the UpdateNode should return an invalid node view.
func TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) {
node := createTestNode(2, 1, "test-user", "test-node-2")
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put node in store
_ = store.PutNode(node)
// Simulate the exact sequence: UpdateNode gets queued, then DeleteNode gets queued,
// they batch together, and we check what UpdateNode returns
resultChan := make(chan struct {
node types.NodeView
ok bool
})
// Start UpdateNode - it will block until batch is applied
go func() {
node, ok := store.UpdateNode(node.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
resultChan <- struct {
node types.NodeView
ok bool
}{node, ok}
}()
// Give UpdateNode a moment to queue its work
time.Sleep(5 * time.Millisecond)
// Now queue DeleteNode - should batch with the UPDATE
store.DeleteNode(node.ID)
// Get the result from UpdateNode
result := <-resultChan
// Wait for batch to complete
time.Sleep(50 * time.Millisecond)
// Node should be deleted
_, found := store.GetNode(node.ID)
assert.False(t, found, "node should be deleted")
// The critical check: what did UpdateNode return?
// After the commit c6b09289988f34398eb3157e31ba092eb8721a9f,
// UpdateNode returns the node state from the batch.
// If DELETE came after UPDATE in the batch, the node doesn't exist anymore,
// so UpdateNode should return (invalid, false)
t.Logf("UpdateNode returned: ok=%v, valid=%v", result.ok, result.node.Valid())
// This is the expected behavior - if node was deleted in same batch,
// UpdateNode should return invalid node
if result.ok && result.node.Valid() {
t.Error("BUG: UpdateNode returned valid node even though it was deleted in same batch")
}
}
// TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles
// the race condition where a node is deleted after UpdateNode returns but before
// persistNodeToDB is called. This reproduces the ephemeral node deletion bug.
func TestPersistNodeToDBPreventsRaceCondition(t *testing.T) {
node := createTestNode(3, 1, "test-user", "test-node-3")
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put node in store
_ = store.PutNode(node)
// Simulate UpdateNode being called
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
require.True(t, ok, "UpdateNode should succeed")
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
// Now delete the node (simulating ephemeral logout happening concurrently)
store.DeleteNode(node.ID)
// Wait for deletion to complete
time.Sleep(50 * time.Millisecond)
// Verify node is deleted
_, found := store.GetNode(node.ID)
require.False(t, found, "node should be deleted")
// Now try to use the updatedNode from before the deletion
// In the old code, this would re-insert the node into the database
// With our fix, GetNode check in persistNodeToDB should prevent this
// Simulate what persistNodeToDB does - check if node still exists
_, exists := store.GetNode(updatedNode.ID())
if !exists {
t.Log("SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node")
} else {
t.Error("BUG: Node still exists in NodeStore after deletion")
}
// The key assertion: after deletion, attempting to persist the old updatedNode
// should fail because the node no longer exists in NodeStore
assert.False(t, exists, "persistNodeToDB should detect node was deleted and refuse to persist")
}
// TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs
// when an ephemeral node logs out. This reproduces the bug where:
// 1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view
// 2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode
// 3. UpdateNode and DeleteNode get batched together
// 4. If UpdateNode's result is used to call persistNodeToDB after the deletion,
// the node could be re-inserted into the database even though it was deleted
func TestEphemeralNodeLogoutRaceCondition(t *testing.T) {
ephemeralNode := createTestNode(4, 1, "test-user", "ephemeral-node")
ephemeralNode.AuthKey = &types.PreAuthKey{
ID: 1,
Key: "test-key",
Ephemeral: true,
}
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put ephemeral node in store
_ = store.PutNode(ephemeralNode)
// Simulate concurrent operations:
// 1. UpdateNode (from UpdateNodeFromMapRequest during polling)
// 2. DeleteNode (from handleLogout when client sends logout request)
var updatedNode types.NodeView
var updateOk bool
done := make(chan bool, 2)
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
go func() {
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
done <- true
}()
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
go func() {
time.Sleep(1 * time.Millisecond) // Slight delay to batch operations
store.DeleteNode(ephemeralNode.ID)
done <- true
}()
// Wait for both operations
<-done
<-done
// Give batching time to complete
time.Sleep(50 * time.Millisecond)
// Node should be deleted from store
_, found := store.GetNode(ephemeralNode.ID)
assert.False(t, found, "ephemeral node should be deleted from NodeStore")
// Critical assertion: if UpdateNode returned before DeleteNode completed,
// the updatedNode might be valid but the node is actually deleted.
// This is the bug - UpdateNodeFromMapRequest would get a valid node,
// then try to persist it, re-inserting the deleted ephemeral node.
if updateOk && updatedNode.Valid() {
t.Log("UpdateNode returned valid node, but node is deleted - this is the race condition")
// In the real code, this would cause persistNodeToDB to be called with updatedNode
// The fix in persistNodeToDB checks if the node still exists:
_, stillExists := store.GetNode(updatedNode.ID())
assert.False(t, stillExists, "persistNodeToDB should check NodeStore and find node deleted")
} else if !updateOk || !updatedNode.Valid() {
t.Log("UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)")
}
}
// TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence
// that causes ephemeral node logout failures:
// 1. Client sends MapRequest with updated endpoint info
// 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode
// 3. Client sends logout request (past expiry)
// 4. handleLogout calls DeleteNode for ephemeral node
// 5. UpdateNode and DeleteNode batch together
// 6. UpdateNode returns a valid node (from before delete in batch)
// 7. persistNodeToDB is called with the stale valid node
// 8. Node gets re-inserted into database instead of staying deleted
func TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) {
ephemeralNode := createTestNode(5, 1, "test-user", "ephemeral-node-5")
ephemeralNode.AuthKey = &types.PreAuthKey{
ID: 2,
Key: "test-key-2",
Ephemeral: true,
}
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Initial state: ephemeral node exists
_ = store.PutNode(ephemeralNode)
// Step 1: UpdateNodeFromMapRequest calls UpdateNode
// (simulating client sending MapRequest with endpoint updates)
updateStarted := make(chan bool)
var updatedNode types.NodeView
var updateOk bool
go func() {
updateStarted <- true
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
endpoint := netip.MustParseAddrPort("10.0.0.1:41641")
n.Endpoints = []netip.AddrPort{endpoint}
})
}()
<-updateStarted
// Small delay to ensure UpdateNode is queued
time.Sleep(5 * time.Millisecond)
// Step 2: Logout happens - handleLogout calls DeleteNode
// (simulating client sending logout with past expiry)
store.DeleteNode(ephemeralNode.ID)
// Wait for batching to complete
time.Sleep(50 * time.Millisecond)
// Step 3: Check results
_, nodeExists := store.GetNode(ephemeralNode.ID)
assert.False(t, nodeExists, "ephemeral node must be deleted after logout")
// Step 4: Simulate what happens if we try to persist the updatedNode
if updateOk && updatedNode.Valid() {
// This is the problematic path - UpdateNode returned a valid node
// but the node was deleted in the same batch
t.Log("UpdateNode returned valid node even though node was deleted")
// The fix: persistNodeToDB must check NodeStore before persisting
_, checkExists := store.GetNode(updatedNode.ID())
if checkExists {
t.Error("BUG: Node still exists in NodeStore after deletion - should be impossible")
} else {
t.Log("SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist")
}
} else {
t.Log("UpdateNode correctly indicated node was deleted (returned invalid or not-ok)")
}
// Final assertion: node must not exist
_, finalExists := store.GetNode(ephemeralNode.ID)
assert.False(t, finalExists, "ephemeral node must remain deleted")
}
// TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when
// UpdateNode and DeleteNode are batched together with DELETE after UPDATE,
// UpdateNode returns ok=false to indicate the node was deleted.
func TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) {
node := createTestNode(6, 1, "test-user", "test-node-6")
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put node in store
_ = store.PutNode(node)
// Queue UpdateNode
updateDone := make(chan struct {
node types.NodeView
ok bool
})
go func() {
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
updateDone <- struct {
node types.NodeView
ok bool
}{updatedNode, ok}
}()
// Small delay to ensure UpdateNode is queued
time.Sleep(5 * time.Millisecond)
// Queue DeleteNode - should batch with UpdateNode
store.DeleteNode(node.ID)
// Get UpdateNode result
result := <-updateDone
// Wait for batch to complete
time.Sleep(50 * time.Millisecond)
// Node should be deleted
_, exists := store.GetNode(node.ID)
assert.False(t, exists, "node should be deleted from store")
// UpdateNode should indicate the node was deleted
// After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE
// are in the same batch with DELETE after UPDATE, UpdateNode returns
// the state after the batch is applied - which means the node doesn't exist
assert.False(t, result.ok, "UpdateNode should return ok=false when node deleted in same batch")
assert.False(t, result.node.Valid(), "UpdateNode should return invalid node when node deleted in same batch")
}
// TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB
// checks if the node still exists in NodeStore before persisting to database.
// This prevents the race condition where:
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node
// 2. Ephemeral node logout calls DeleteNode
// 3. UpdateNode and DeleteNode batch together
// 4. UpdateNode returns a valid node (from before delete in batch)
// 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node
// 6. persistNodeToDB must detect the node is deleted and refuse to persist
func TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) {
ephemeralNode := createTestNode(7, 1, "test-user", "ephemeral-node-7")
ephemeralNode.AuthKey = &types.PreAuthKey{
ID: 3,
Key: "test-key-3",
Ephemeral: true,
}
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
// Put node in store
_ = store.PutNode(ephemeralNode)
// Simulate the race:
// 1. UpdateNode is called (from UpdateNodeFromMapRequest)
updatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
n.LastSeen = ptr.To(time.Now())
})
require.True(t, ok, "UpdateNode should succeed")
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
// 2. Node is deleted (from handleLogout for ephemeral node)
store.DeleteNode(ephemeralNode.ID)
// Wait for deletion
time.Sleep(50 * time.Millisecond)
// 3. Verify node is deleted from store
_, exists := store.GetNode(ephemeralNode.ID)
require.False(t, exists, "node should be deleted from NodeStore")
// 4. Simulate what persistNodeToDB does - check if node still exists
// The fix in persistNodeToDB checks NodeStore before persisting:
// if !exists { return error }
// This prevents re-inserting the deleted node into the database
// Verify the node from UpdateNode is valid but node is gone from store
assert.True(t, updatedNode.Valid(), "UpdateNode returned a valid node view")
_, stillExists := store.GetNode(updatedNode.ID())
assert.False(t, stillExists, "but node should be deleted from NodeStore")
// This is the critical test: persistNodeToDB must check NodeStore
// and refuse to persist if the node doesn't exist anymore
// The actual persistNodeToDB implementation does:
// _, exists := s.nodeStore.GetNode(node.ID())
// if !exists { return error }
}

View File

@@ -10,9 +10,9 @@ import (
"tailscale.com/tailcfg"
)
// netInfoFromMapRequest determines the correct NetInfo to use.
// NetInfoFromMapRequest determines the correct NetInfo to use.
// Returns the NetInfo that should be used for this request.
func netInfoFromMapRequest(
func NetInfoFromMapRequest(
nodeID types.NodeID,
currentHostinfo *tailcfg.Hostinfo,
reqHostinfo *tailcfg.Hostinfo,

View File

@@ -61,7 +61,7 @@ func TestNetInfoFromMapRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
result := NetInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
if tt.expectNetInfo == nil {
assert.Nil(t, result, "expected nil NetInfo")
@@ -100,40 +100,14 @@ func TestNetInfoPreservationInRegistrationFlow(t *testing.T) {
}
// BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo)
buggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
buggyResult := NetInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
assert.Nil(t, buggyResult, "Bug: Should return nil when using wrong hostinfo reference")
// CORRECT: Using the existing node's hostinfo (has NetInfo)
correctResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
correctResult := NetInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
assert.NotNil(t, correctResult, "Fix: Should preserve NetInfo when using correct hostinfo reference")
assert.Equal(t, 5, correctResult.PreferredDERP, "Should preserve the DERP region from existing node")
})
t.Run("new_node_creation_for_different_user_should_preserve_netinfo", func(t *testing.T) {
// This test covers the scenario where:
// 1. A node exists for user1 with NetInfo
// 2. The same machine logs in as user2 (different user)
// 3. A NEW node is created for user2 (pre-auth key flow)
// 4. The new node should preserve NetInfo from the old node
// Existing node for user1 with NetInfo
existingNodeUser1Hostinfo := &tailcfg.Hostinfo{
Hostname: "test-node",
NetInfo: &tailcfg.NetInfo{PreferredDERP: 7},
}
// New registration request for user2 (no NetInfo yet)
newNodeUser2Hostinfo := &tailcfg.Hostinfo{
Hostname: "test-node",
OS: "linux",
// NetInfo is nil - registration request doesn't include it
}
// When creating a new node for user2, we should preserve NetInfo from user1's node
result := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo)
assert.NotNil(t, result, "New node for user2 should preserve NetInfo from user1's node")
assert.Equal(t, 7, result.PreferredDERP, "Should preserve DERP region from existing node")
})
}
// Simple helper function for tests

View File

@@ -15,15 +15,14 @@ import (
)
const (
batchSize = 100
batchSize = 10
batchTimeout = 500 * time.Millisecond
)
const (
put = 1
del = 2
update = 3
rebuildPeerMaps = 4
put = 1
del = 2
update = 3
)
const prometheusNamespace = "headscale"
@@ -122,11 +121,10 @@ type Snapshot struct {
nodesByID map[types.NodeID]types.Node
// calculated from nodesByID
nodesByNodeKey map[key.NodePublic]types.NodeView
nodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView
peersByNode map[types.NodeID][]types.NodeView
nodesByUser map[types.UserID][]types.NodeView
allNodes []types.NodeView
nodesByNodeKey map[key.NodePublic]types.NodeView
peersByNode map[types.NodeID][]types.NodeView
nodesByUser map[types.UserID][]types.NodeView
allNodes []types.NodeView
}
// PeersFunc is a function that takes a list of nodes and returns a map
@@ -137,31 +135,26 @@ type PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView
// work represents a single operation to be performed on the NodeStore.
type work struct {
op int
nodeID types.NodeID
node types.Node
updateFn UpdateNodeFunc
result chan struct{}
nodeResult chan types.NodeView // Channel to return the resulting node after batch application
// For rebuildPeerMaps operation
rebuildResult chan struct{}
op int
nodeID types.NodeID
node types.Node
updateFn UpdateNodeFunc
result chan struct{}
}
// PutNode adds or updates a node in the store.
// If the node already exists, it will be replaced.
// If the node does not exist, it will be added.
// This is a blocking operation that waits for the write to complete.
// Returns the resulting node after all modifications in the batch have been applied.
func (s *NodeStore) PutNode(n types.Node) types.NodeView {
func (s *NodeStore) PutNode(n types.Node) {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("put"))
defer timer.ObserveDuration()
work := work{
op: put,
nodeID: n.ID,
node: n,
result: make(chan struct{}),
nodeResult: make(chan types.NodeView, 1),
op: put,
nodeID: n.ID,
node: n,
result: make(chan struct{}),
}
nodeStoreQueueDepth.Inc()
@@ -169,10 +162,7 @@ func (s *NodeStore) PutNode(n types.Node) types.NodeView {
<-work.result
nodeStoreQueueDepth.Dec()
resultNode := <-work.nodeResult
nodeStoreOperations.WithLabelValues("put").Inc()
return resultNode
}
// UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it.
@@ -183,7 +173,6 @@ type UpdateNodeFunc func(n *types.Node)
// This is analogous to a database "transaction", or, the caller should
// rather collect all data they want to change, and then call this function.
// Fewer calls are better.
// Returns the resulting node after all modifications in the batch have been applied.
//
// TODO(kradalby): Technically we could have a version of this that modifies the node
// in the current snapshot if _we know_ that the change will not affect the peer relationships.
@@ -192,16 +181,15 @@ type UpdateNodeFunc func(n *types.Node)
// a lock around the nodesByID map to ensure that no other writes are happening
// while we are modifying the node. Which mean we would need to implement read-write locks
// on all read operations.
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) {
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("update"))
defer timer.ObserveDuration()
work := work{
op: update,
nodeID: nodeID,
updateFn: updateFn,
result: make(chan struct{}),
nodeResult: make(chan types.NodeView, 1),
op: update,
nodeID: nodeID,
updateFn: updateFn,
result: make(chan struct{}),
}
nodeStoreQueueDepth.Inc()
@@ -209,11 +197,7 @@ func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)
<-work.result
nodeStoreQueueDepth.Dec()
resultNode := <-work.nodeResult
nodeStoreOperations.WithLabelValues("update").Inc()
// Return the node and whether it exists (is valid)
return resultNode, resultNode.Valid()
}
// DeleteNode removes a node from the store by its ID.
@@ -298,39 +282,18 @@ func (s *NodeStore) applyBatch(batch []work) {
nodes := make(map[types.NodeID]types.Node)
maps.Copy(nodes, s.data.Load().nodesByID)
// Track which work items need node results
nodeResultRequests := make(map[types.NodeID][]*work)
// Track rebuildPeerMaps operations
var rebuildOps []*work
for i := range batch {
w := &batch[i]
for _, w := range batch {
switch w.op {
case put:
nodes[w.nodeID] = w.node
if w.nodeResult != nil {
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
}
case update:
// Update the specific node identified by nodeID
if n, exists := nodes[w.nodeID]; exists {
w.updateFn(&n)
nodes[w.nodeID] = n
}
if w.nodeResult != nil {
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
}
case del:
delete(nodes, w.nodeID)
// For delete operations, send an invalid NodeView if requested
if w.nodeResult != nil {
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
}
case rebuildPeerMaps:
// rebuildPeerMaps doesn't modify nodes, it just forces the snapshot rebuild
// below to recalculate peer relationships using the current peersFunc
rebuildOps = append(rebuildOps, w)
}
}
@@ -340,33 +303,8 @@ func (s *NodeStore) applyBatch(batch []work) {
// Update node count gauge
nodeStoreNodesCount.Set(float64(len(nodes)))
// Send the resulting nodes to all work items that requested them
for nodeID, workItems := range nodeResultRequests {
if node, exists := nodes[nodeID]; exists {
nodeView := node.View()
for _, w := range workItems {
w.nodeResult <- nodeView
close(w.nodeResult)
}
} else {
// Node was deleted or doesn't exist
for _, w := range workItems {
w.nodeResult <- types.NodeView{} // Send invalid view
close(w.nodeResult)
}
}
}
// Signal completion for rebuildPeerMaps operations
for _, w := range rebuildOps {
close(w.rebuildResult)
}
// Signal completion for all other work items
for _, w := range batch {
if w.op != rebuildPeerMaps {
close(w.result)
}
close(w.result)
}
}
@@ -385,10 +323,9 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
}
newSnap := Snapshot{
nodesByID: nodes,
allNodes: allNodes,
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
nodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView),
nodesByID: nodes,
allNodes: allNodes,
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
// peersByNode is most likely the most expensive operation,
// it will use the list of all nodes, combined with the
@@ -402,19 +339,11 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
nodesByUser: make(map[types.UserID][]types.NodeView),
}
// Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps
// Build nodesByUser and nodesByNodeKey maps
for _, n := range nodes {
nodeView := n.View()
userID := types.UserID(n.UserID)
newSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView)
newSnap.nodesByUser[types.UserID(n.UserID)] = append(newSnap.nodesByUser[types.UserID(n.UserID)], nodeView)
newSnap.nodesByNodeKey[n.NodeKey] = nodeView
// Build machine key index
if newSnap.nodesByMachineKey[n.MachineKey] == nil {
newSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView)
}
newSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView
}
return newSnap
@@ -453,40 +382,19 @@ func (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bo
return nodeView, exists
}
// GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists.
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {
// GetNodeByMachineKey returns a node by its machine key. The bool indicates if the node exists.
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic) (types.NodeView, bool) {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key"))
defer timer.ObserveDuration()
nodeStoreOperations.WithLabelValues("get_by_machine_key").Inc()
snapshot := s.data.Load()
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
if node, exists := userMap[userID]; exists {
return node, true
}
}
return types.NodeView{}, false
}
// GetNodeByMachineKeyAnyUser returns the first node with the given machine key,
// regardless of which user it belongs to. This is useful for scenarios like
// transferring a node to a different user when re-authenticating with a
// different user's auth key.
// If multiple nodes exist with the same machine key (different users), the
// first one found is returned (order is not guaranteed).
func (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key_any_user"))
defer timer.ObserveDuration()
nodeStoreOperations.WithLabelValues("get_by_machine_key_any_user").Inc()
snapshot := s.data.Load()
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
// Return the first node found (order not guaranteed due to map iteration)
for _, node := range userMap {
return node, true
// We don't have a byMachineKey map, so we need to iterate
// This could be optimized by adding a byMachineKey map if this becomes a hot path
for _, node := range snapshot.nodesByID {
if node.MachineKey == machineKey {
return node.View(), true
}
}
@@ -563,22 +471,6 @@ func (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] {
return views.SliceOf(s.data.Load().peersByNode[id])
}
// RebuildPeerMaps rebuilds the peer relationship map using the current peersFunc.
// This must be called after policy changes because peersFunc uses PolicyManager's
// filters to determine which nodes can see each other. Without rebuilding, the
// peer map would use stale filter data until the next node add/delete.
func (s *NodeStore) RebuildPeerMaps() {
result := make(chan struct{})
w := work{
op: rebuildPeerMaps,
rebuildResult: result,
}
s.writeQueue <- w
<-result
}
// ListNodesByUser returns a slice of all nodes for a given user ID.
func (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_by_user"))

View File

@@ -1,11 +1,7 @@
package state
import (
"context"
"fmt"
"net/netip"
"runtime"
"sync"
"testing"
"time"
@@ -253,9 +249,7 @@ func TestNodeStoreOperations(t *testing.T) {
name: "add first node",
action: func(store *NodeStore) {
node := createTestNode(1, 1, "user1", "node1")
resultNode := store.PutNode(node)
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
assert.Equal(t, node.ID, resultNode.ID())
store.PutNode(node)
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 1)
@@ -294,9 +288,7 @@ func TestNodeStoreOperations(t *testing.T) {
name: "add second node same user",
action: func(store *NodeStore) {
node2 := createTestNode(2, 1, "user1", "node2")
resultNode := store.PutNode(node2)
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
assert.Equal(t, types.NodeID(2), resultNode.ID())
store.PutNode(node2)
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 2)
@@ -316,9 +308,7 @@ func TestNodeStoreOperations(t *testing.T) {
name: "add third node different user",
action: func(store *NodeStore) {
node3 := createTestNode(3, 2, "user2", "node3")
resultNode := store.PutNode(node3)
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
assert.Equal(t, types.NodeID(3), resultNode.ID())
store.PutNode(node3)
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 3)
@@ -419,14 +409,10 @@ func TestNodeStoreOperations(t *testing.T) {
{
name: "update node hostname",
action: func(store *NodeStore) {
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
store.UpdateNode(1, func(n *types.Node) {
n.Hostname = "updated-node1"
n.GivenName = "updated-node1"
})
assert.True(t, ok, "UpdateNode should return true for existing node")
assert.True(t, resultNode.Valid(), "Result node should be valid")
assert.Equal(t, "updated-node1", resultNode.Hostname())
assert.Equal(t, "updated-node1", resultNode.GivenName())
snapshot := store.data.Load()
assert.Equal(t, "updated-node1", snapshot.nodesByID[1].Hostname)
@@ -450,14 +436,10 @@ func TestNodeStoreOperations(t *testing.T) {
name: "add nodes with odd-even filtering",
action: func(store *NodeStore) {
// Add nodes in sequence
n1 := store.PutNode(createTestNode(1, 1, "user1", "node1"))
assert.True(t, n1.Valid())
n2 := store.PutNode(createTestNode(2, 2, "user2", "node2"))
assert.True(t, n2.Valid())
n3 := store.PutNode(createTestNode(3, 3, "user3", "node3"))
assert.True(t, n3.Valid())
n4 := store.PutNode(createTestNode(4, 4, "user4", "node4"))
assert.True(t, n4.Valid())
store.PutNode(createTestNode(1, 1, "user1", "node1"))
store.PutNode(createTestNode(2, 2, "user2", "node2"))
store.PutNode(createTestNode(3, 3, "user3", "node3"))
store.PutNode(createTestNode(4, 4, "user4", "node4"))
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 4)
@@ -496,328 +478,6 @@ func TestNodeStoreOperations(t *testing.T) {
},
},
},
{
name: "test batch modifications return correct node state",
setupFunc: func(t *testing.T) *NodeStore {
node1 := createTestNode(1, 1, "user1", "node1")
node2 := createTestNode(2, 1, "user1", "node2")
initialNodes := types.Nodes{&node1, &node2}
return NewNodeStore(initialNodes, allowAllPeersFunc)
},
steps: []testStep{
{
name: "verify initial state",
action: func(store *NodeStore) {
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 2)
assert.Equal(t, "node1", snapshot.nodesByID[1].Hostname)
assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname)
},
},
{
name: "concurrent updates should reflect all batch changes",
action: func(store *NodeStore) {
// Start multiple updates that will be batched together
done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})
var resultNode1, resultNode2 types.NodeView
var newNode3 types.NodeView
var ok1, ok2 bool
// These should all be processed in the same batch
go func() {
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
n.Hostname = "batch-updated-node1"
n.GivenName = "batch-given-1"
})
close(done1)
}()
go func() {
resultNode2, ok2 = store.UpdateNode(2, func(n *types.Node) {
n.Hostname = "batch-updated-node2"
n.GivenName = "batch-given-2"
})
close(done2)
}()
go func() {
node3 := createTestNode(3, 1, "user1", "node3")
newNode3 = store.PutNode(node3)
close(done3)
}()
// Wait for all operations to complete
<-done1
<-done2
<-done3
// Verify the returned nodes reflect the batch state
assert.True(t, ok1, "UpdateNode should succeed for node 1")
assert.True(t, ok2, "UpdateNode should succeed for node 2")
assert.True(t, resultNode1.Valid())
assert.True(t, resultNode2.Valid())
assert.True(t, newNode3.Valid())
// Check that returned nodes have the updated values
assert.Equal(t, "batch-updated-node1", resultNode1.Hostname())
assert.Equal(t, "batch-given-1", resultNode1.GivenName())
assert.Equal(t, "batch-updated-node2", resultNode2.Hostname())
assert.Equal(t, "batch-given-2", resultNode2.GivenName())
assert.Equal(t, "node3", newNode3.Hostname())
// Verify the snapshot also reflects all changes
snapshot := store.data.Load()
assert.Len(t, snapshot.nodesByID, 3)
assert.Equal(t, "batch-updated-node1", snapshot.nodesByID[1].Hostname)
assert.Equal(t, "batch-updated-node2", snapshot.nodesByID[2].Hostname)
assert.Equal(t, "node3", snapshot.nodesByID[3].Hostname)
// Verify peer relationships are updated correctly with new node
assert.Len(t, snapshot.peersByNode[1], 2) // sees nodes 2 and 3
assert.Len(t, snapshot.peersByNode[2], 2) // sees nodes 1 and 3
assert.Len(t, snapshot.peersByNode[3], 2) // sees nodes 1 and 2
},
},
{
name: "update non-existent node returns invalid view",
action: func(store *NodeStore) {
resultNode, ok := store.UpdateNode(999, func(n *types.Node) {
n.Hostname = "should-not-exist"
})
assert.False(t, ok, "UpdateNode should return false for non-existent node")
assert.False(t, resultNode.Valid(), "Result should be invalid NodeView")
},
},
{
name: "multiple updates to same node in batch all see final state",
action: func(store *NodeStore) {
// This test verifies that when multiple updates to the same node
// are batched together, each returned node reflects ALL changes
// in the batch, not just the individual update's changes.
done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})
var resultNode1, resultNode2, resultNode3 types.NodeView
var ok1, ok2, ok3 bool
// These updates all modify node 1 and should be batched together
// The final state should have all three modifications applied
go func() {
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
n.Hostname = "multi-update-hostname"
})
close(done1)
}()
go func() {
resultNode2, ok2 = store.UpdateNode(1, func(n *types.Node) {
n.GivenName = "multi-update-givenname"
})
close(done2)
}()
go func() {
resultNode3, ok3 = store.UpdateNode(1, func(n *types.Node) {
n.ForcedTags = []string{"tag1", "tag2"}
})
close(done3)
}()
// Wait for all operations to complete
<-done1
<-done2
<-done3
// All updates should succeed
assert.True(t, ok1, "First update should succeed")
assert.True(t, ok2, "Second update should succeed")
assert.True(t, ok3, "Third update should succeed")
// CRITICAL: Each returned node should reflect ALL changes from the batch
// not just the change from its specific update call
// resultNode1 (from hostname update) should also have the givenname and tags changes
assert.Equal(t, "multi-update-hostname", resultNode1.Hostname())
assert.Equal(t, "multi-update-givenname", resultNode1.GivenName())
assert.Equal(t, []string{"tag1", "tag2"}, resultNode1.ForcedTags().AsSlice())
// resultNode2 (from givenname update) should also have the hostname and tags changes
assert.Equal(t, "multi-update-hostname", resultNode2.Hostname())
assert.Equal(t, "multi-update-givenname", resultNode2.GivenName())
assert.Equal(t, []string{"tag1", "tag2"}, resultNode2.ForcedTags().AsSlice())
// resultNode3 (from tags update) should also have the hostname and givenname changes
assert.Equal(t, "multi-update-hostname", resultNode3.Hostname())
assert.Equal(t, "multi-update-givenname", resultNode3.GivenName())
assert.Equal(t, []string{"tag1", "tag2"}, resultNode3.ForcedTags().AsSlice())
// Verify the snapshot also has all changes
snapshot := store.data.Load()
finalNode := snapshot.nodesByID[1]
assert.Equal(t, "multi-update-hostname", finalNode.Hostname)
assert.Equal(t, "multi-update-givenname", finalNode.GivenName)
assert.Equal(t, []string{"tag1", "tag2"}, finalNode.ForcedTags)
},
},
},
},
{
name: "test UpdateNode result is immutable for database save",
setupFunc: func(t *testing.T) *NodeStore {
node1 := createTestNode(1, 1, "user1", "node1")
node2 := createTestNode(2, 1, "user1", "node2")
initialNodes := types.Nodes{&node1, &node2}
return NewNodeStore(initialNodes, allowAllPeersFunc)
},
steps: []testStep{
{
name: "verify returned node is complete and consistent",
action: func(store *NodeStore) {
// Update a node and verify the returned view is complete
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
n.Hostname = "db-save-hostname"
n.GivenName = "db-save-given"
n.ForcedTags = []string{"db-tag1", "db-tag2"}
})
assert.True(t, ok, "UpdateNode should succeed")
assert.True(t, resultNode.Valid(), "Result should be valid")
// Verify the returned node has all expected values
assert.Equal(t, "db-save-hostname", resultNode.Hostname())
assert.Equal(t, "db-save-given", resultNode.GivenName())
assert.Equal(t, []string{"db-tag1", "db-tag2"}, resultNode.ForcedTags().AsSlice())
// Convert to struct as would be done for database save
nodePtr := resultNode.AsStruct()
assert.NotNil(t, nodePtr)
assert.Equal(t, "db-save-hostname", nodePtr.Hostname)
assert.Equal(t, "db-save-given", nodePtr.GivenName)
assert.Equal(t, []string{"db-tag1", "db-tag2"}, nodePtr.ForcedTags)
// Verify the snapshot also reflects the same state
snapshot := store.data.Load()
storedNode := snapshot.nodesByID[1]
assert.Equal(t, "db-save-hostname", storedNode.Hostname)
assert.Equal(t, "db-save-given", storedNode.GivenName)
assert.Equal(t, []string{"db-tag1", "db-tag2"}, storedNode.ForcedTags)
},
},
{
name: "concurrent updates all return consistent final state for DB save",
action: func(store *NodeStore) {
// Multiple goroutines updating the same node
// All should receive the final batch state suitable for DB save
done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})
var result1, result2, result3 types.NodeView
var ok1, ok2, ok3 bool
// Start concurrent updates
go func() {
result1, ok1 = store.UpdateNode(1, func(n *types.Node) {
n.Hostname = "concurrent-db-hostname"
})
close(done1)
}()
go func() {
result2, ok2 = store.UpdateNode(1, func(n *types.Node) {
n.GivenName = "concurrent-db-given"
})
close(done2)
}()
go func() {
result3, ok3 = store.UpdateNode(1, func(n *types.Node) {
n.ForcedTags = []string{"concurrent-tag"}
})
close(done3)
}()
// Wait for all to complete
<-done1
<-done2
<-done3
assert.True(t, ok1 && ok2 && ok3, "All updates should succeed")
// All results should be valid and suitable for database save
assert.True(t, result1.Valid())
assert.True(t, result2.Valid())
assert.True(t, result3.Valid())
// Convert each to struct as would be done for DB save
nodePtr1 := result1.AsStruct()
nodePtr2 := result2.AsStruct()
nodePtr3 := result3.AsStruct()
// All should have the complete final state
assert.Equal(t, "concurrent-db-hostname", nodePtr1.Hostname)
assert.Equal(t, "concurrent-db-given", nodePtr1.GivenName)
assert.Equal(t, []string{"concurrent-tag"}, nodePtr1.ForcedTags)
assert.Equal(t, "concurrent-db-hostname", nodePtr2.Hostname)
assert.Equal(t, "concurrent-db-given", nodePtr2.GivenName)
assert.Equal(t, []string{"concurrent-tag"}, nodePtr2.ForcedTags)
assert.Equal(t, "concurrent-db-hostname", nodePtr3.Hostname)
assert.Equal(t, "concurrent-db-given", nodePtr3.GivenName)
assert.Equal(t, []string{"concurrent-tag"}, nodePtr3.ForcedTags)
// Verify consistency with stored state
snapshot := store.data.Load()
storedNode := snapshot.nodesByID[1]
assert.Equal(t, nodePtr1.Hostname, storedNode.Hostname)
assert.Equal(t, nodePtr1.GivenName, storedNode.GivenName)
assert.Equal(t, nodePtr1.ForcedTags, storedNode.ForcedTags)
},
},
{
name: "verify returned node preserves all fields for DB save",
action: func(store *NodeStore) {
// Get initial state
snapshot := store.data.Load()
originalNode := snapshot.nodesByID[2]
originalIPv4 := originalNode.IPv4
originalIPv6 := originalNode.IPv6
originalCreatedAt := originalNode.CreatedAt
originalUser := originalNode.User
// Update only hostname
resultNode, ok := store.UpdateNode(2, func(n *types.Node) {
n.Hostname = "preserve-test-hostname"
})
assert.True(t, ok, "Update should succeed")
// Convert to struct for DB save
nodeForDB := resultNode.AsStruct()
// Verify all fields are preserved
assert.Equal(t, "preserve-test-hostname", nodeForDB.Hostname)
assert.Equal(t, originalIPv4, nodeForDB.IPv4)
assert.Equal(t, originalIPv6, nodeForDB.IPv6)
assert.Equal(t, originalCreatedAt, nodeForDB.CreatedAt)
assert.Equal(t, originalUser.Name, nodeForDB.User.Name)
assert.Equal(t, types.NodeID(2), nodeForDB.ID)
// These fields should be suitable for direct database save
assert.NotNil(t, nodeForDB.IPv4)
assert.NotNil(t, nodeForDB.IPv6)
assert.False(t, nodeForDB.CreatedAt.IsZero())
},
},
},
},
}
for _, tt := range tests {
@@ -839,302 +499,3 @@ type testStep struct {
name string
action func(store *NodeStore)
}
// --- Additional NodeStore concurrency, batching, race, resource, timeout, and allocation tests ---
// Helper for concurrent test nodes
func createConcurrentTestNode(id types.NodeID, hostname string) types.Node {
machineKey := key.NewMachine()
nodeKey := key.NewNode()
return types.Node{
ID: id,
Hostname: hostname,
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
UserID: 1,
User: types.User{
Name: "concurrent-test-user",
},
}
}
// --- Concurrency: concurrent PutNode operations ---
func TestNodeStoreConcurrentPutNode(t *testing.T) {
const concurrentOps = 20
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
var wg sync.WaitGroup
results := make(chan bool, concurrentOps)
for i := 0; i < concurrentOps; i++ {
wg.Add(1)
go func(nodeID int) {
defer wg.Done()
node := createConcurrentTestNode(types.NodeID(nodeID), "concurrent-node")
resultNode := store.PutNode(node)
results <- resultNode.Valid()
}(i + 1)
}
wg.Wait()
close(results)
successCount := 0
for success := range results {
if success {
successCount++
}
}
require.Equal(t, concurrentOps, successCount, "All concurrent PutNode operations should succeed")
}
// --- Batching: concurrent ops fit in one batch ---
func TestNodeStoreBatchingEfficiency(t *testing.T) {
const batchSize = 10
const ops = 15 // more than batchSize
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
var wg sync.WaitGroup
results := make(chan bool, ops)
for i := 0; i < ops; i++ {
wg.Add(1)
go func(nodeID int) {
defer wg.Done()
node := createConcurrentTestNode(types.NodeID(nodeID), "batch-node")
resultNode := store.PutNode(node)
results <- resultNode.Valid()
}(i + 1)
}
wg.Wait()
close(results)
successCount := 0
for success := range results {
if success {
successCount++
}
}
require.Equal(t, ops, successCount, "All batch PutNode operations should succeed")
}
// --- Race conditions: many goroutines on same node ---
func TestNodeStoreRaceConditions(t *testing.T) {
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
nodeID := types.NodeID(1)
node := createConcurrentTestNode(nodeID, "race-node")
resultNode := store.PutNode(node)
require.True(t, resultNode.Valid())
const numGoroutines = 30
const opsPerGoroutine = 10
var wg sync.WaitGroup
errors := make(chan error, numGoroutines*opsPerGoroutine)
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(gid int) {
defer wg.Done()
for j := 0; j < opsPerGoroutine; j++ {
switch j % 3 {
case 0:
resultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) {
n.Hostname = "race-updated"
})
if !resultNode.Valid() {
errors <- fmt.Errorf("UpdateNode failed in goroutine %d, op %d", gid, j)
}
case 1:
retrieved, found := store.GetNode(nodeID)
if !found || !retrieved.Valid() {
errors <- fmt.Errorf("GetNode failed in goroutine %d, op %d", gid, j)
}
case 2:
newNode := createConcurrentTestNode(nodeID, "race-put")
resultNode := store.PutNode(newNode)
if !resultNode.Valid() {
errors <- fmt.Errorf("PutNode failed in goroutine %d, op %d", gid, j)
}
}
}
}(i)
}
wg.Wait()
close(errors)
errorCount := 0
for err := range errors {
t.Error(err)
errorCount++
}
if errorCount > 0 {
t.Fatalf("Race condition test failed with %d errors", errorCount)
}
}
// --- Resource cleanup: goroutine leak detection ---
func TestNodeStoreResourceCleanup(t *testing.T) {
// initialGoroutines := runtime.NumGoroutine()
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
time.Sleep(50 * time.Millisecond)
afterStartGoroutines := runtime.NumGoroutine()
const ops = 100
for i := 0; i < ops; i++ {
nodeID := types.NodeID(i + 1)
node := createConcurrentTestNode(nodeID, "cleanup-node")
resultNode := store.PutNode(node)
assert.True(t, resultNode.Valid())
store.UpdateNode(nodeID, func(n *types.Node) {
n.Hostname = "cleanup-updated"
})
retrieved, found := store.GetNode(nodeID)
assert.True(t, found && retrieved.Valid())
if i%10 == 9 {
store.DeleteNode(nodeID)
}
}
runtime.GC()
time.Sleep(100 * time.Millisecond)
finalGoroutines := runtime.NumGoroutine()
if finalGoroutines > afterStartGoroutines+2 {
t.Errorf("Potential goroutine leak: started with %d, ended with %d", afterStartGoroutines, finalGoroutines)
}
}
// --- Timeout/deadlock: operations complete within reasonable time ---
func TestNodeStoreOperationTimeout(t *testing.T) {
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
const ops = 30
var wg sync.WaitGroup
putResults := make([]error, ops)
updateResults := make([]error, ops)
// Launch all PutNode operations concurrently
for i := 1; i <= ops; i++ {
nodeID := types.NodeID(i)
wg.Add(1)
go func(idx int, id types.NodeID) {
defer wg.Done()
startPut := time.Now()
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) starting\n", startPut.Format("15:04:05.000"), id)
node := createConcurrentTestNode(id, "timeout-node")
resultNode := store.PutNode(node)
endPut := time.Now()
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) finished, valid=%v, duration=%v\n", endPut.Format("15:04:05.000"), id, resultNode.Valid(), endPut.Sub(startPut))
if !resultNode.Valid() {
putResults[idx-1] = fmt.Errorf("PutNode failed for node %d", id)
}
}(i, nodeID)
}
wg.Wait()
// Launch all UpdateNode operations concurrently
wg = sync.WaitGroup{}
for i := 1; i <= ops; i++ {
nodeID := types.NodeID(i)
wg.Add(1)
go func(idx int, id types.NodeID) {
defer wg.Done()
startUpdate := time.Now()
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) starting\n", startUpdate.Format("15:04:05.000"), id)
resultNode, ok := store.UpdateNode(id, func(n *types.Node) {
n.Hostname = "timeout-updated"
})
endUpdate := time.Now()
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) finished, valid=%v, ok=%v, duration=%v\n", endUpdate.Format("15:04:05.000"), id, resultNode.Valid(), ok, endUpdate.Sub(startUpdate))
if !ok || !resultNode.Valid() {
updateResults[idx-1] = fmt.Errorf("UpdateNode failed for node %d", id)
}
}(i, nodeID)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
errorCount := 0
for _, err := range putResults {
if err != nil {
t.Error(err)
errorCount++
}
}
for _, err := range updateResults {
if err != nil {
t.Error(err)
errorCount++
}
}
if errorCount == 0 {
t.Log("All concurrent operations completed successfully within timeout")
} else {
t.Fatalf("Some concurrent operations failed: %d errors", errorCount)
}
case <-ctx.Done():
fmt.Println("[TestNodeStoreOperationTimeout] Timeout reached, test failed")
t.Fatal("Operations timed out - potential deadlock or resource issue")
}
}
// --- Edge case: update non-existent node ---
func TestNodeStoreUpdateNonExistentNode(t *testing.T) {
for i := 0; i < 10; i++ {
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
nonExistentID := types.NodeID(999 + i)
updateCallCount := 0
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) starting\n", nonExistentID)
resultNode, ok := store.UpdateNode(nonExistentID, func(n *types.Node) {
updateCallCount++
n.Hostname = "should-never-be-called"
})
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) finished, valid=%v, ok=%v, updateCallCount=%d\n", nonExistentID, resultNode.Valid(), ok, updateCallCount)
assert.False(t, ok, "UpdateNode should return false for non-existent node")
assert.False(t, resultNode.Valid(), "UpdateNode should return invalid node for non-existent node")
assert.Equal(t, 0, updateCallCount, "UpdateFn should not be called for non-existent node")
store.Stop()
}
}
// --- Allocation benchmark ---
func BenchmarkNodeStoreAllocations(b *testing.B) {
store := NewNodeStore(nil, allowAllPeersFunc)
store.Start()
defer store.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
nodeID := types.NodeID(i + 1)
node := createConcurrentTestNode(nodeID, "bench-node")
store.PutNode(node)
store.UpdateNode(nodeID, func(n *types.Node) {
n.Hostname = "bench-updated"
})
store.GetNode(nodeID)
if i%10 == 9 {
store.DeleteNode(nodeID)
}
}
}
func TestNodeStoreAllocationStats(t *testing.T) {
res := testing.Benchmark(BenchmarkNodeStoreAllocations)
allocs := res.AllocsPerOp()
t.Logf("NodeStore allocations per op: %.2f", float64(allocs))
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ package change
import (
"errors"
"time"
"github.com/juanfont/headscale/hscontrol/types"
)
@@ -69,9 +68,6 @@ type ChangeSet struct {
// IsSubnetRouter indicates whether the node is a subnet router.
IsSubnetRouter bool
// NodeExpiry is set if the change is NodeKeyExpiry.
NodeExpiry *time.Time
}
func (c *ChangeSet) Validate() error {
@@ -130,11 +126,6 @@ func RemoveUpdatesForSelf(id types.NodeID, cs []ChangeSet) (ret []ChangeSet) {
return ret
}
// IsSelfUpdate reports whether this ChangeSet represents an update to the given node itself.
func (c ChangeSet) IsSelfUpdate(nodeID types.NodeID) bool {
return c.NodeID == nodeID
}
func (c ChangeSet) AlsoSelf() bool {
// If NodeID is 0, it means this ChangeSet is not related to a specific node,
// so we consider it as a change that should be sent to all nodes.
@@ -188,11 +179,10 @@ func NodeOffline(id types.NodeID) ChangeSet {
}
}
func KeyExpiry(id types.NodeID, expiry time.Time) ChangeSet {
func KeyExpiry(id types.NodeID) ChangeSet {
return ChangeSet{
Change: NodeKeyExpiry,
NodeID: id,
NodeExpiry: &expiry,
Change: NodeKeyExpiry,
NodeID: id,
}
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"runtime"
"sync/atomic"
"time"
"github.com/juanfont/headscale/hscontrol/util"
@@ -187,28 +186,6 @@ func (r RegistrationID) String() string {
type RegisterNode struct {
Node Node
Registered chan *Node
closed *atomic.Bool
}
func NewRegisterNode(node Node) RegisterNode {
return RegisterNode{
Node: node,
Registered: make(chan *Node),
closed: &atomic.Bool{},
}
}
func (rn *RegisterNode) SendAndClose(node *Node) {
if rn.closed.Swap(true) {
return
}
select {
case rn.Registered <- node:
default:
}
close(rn.Registered)
}
// DefaultBatcherWorkers returns the default number of batcher workers.

View File

@@ -235,8 +235,6 @@ type Tuning struct {
BatchChangeDelay time.Duration
NodeMapSessionBufferedChanSize int
BatcherWorkers int
RegisterCacheCleanup time.Duration
RegisterCacheExpiration time.Duration
}
func validatePKCEMethod(method string) error {
@@ -340,7 +338,7 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential))
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
if errors.Is(err, fs.ErrNotExist) {
log.Warn().Msg("No config file found, using defaults")
return nil
}
@@ -1004,8 +1002,6 @@ func LoadServerConfig() (*Config, error) {
}
return DefaultBatcherWorkers()
}(),
RegisterCacheCleanup: viper.GetDuration("tuning.register_cache_cleanup"),
RegisterCacheExpiration: viper.GetDuration("tuning.register_cache_expiration"),
},
}, nil
}

View File

@@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"net/netip"
"regexp"
"slices"
"sort"
"strconv"
@@ -28,8 +27,6 @@ var (
ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars")
ErrNodeHasNoGivenName = errors.New("node has no given name")
ErrNodeUserHasNoName = errors.New("node user has no name")
invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
)
type (
@@ -147,10 +144,7 @@ func (ns Nodes) ViewSlice() views.Slice[NodeView] {
// GivenNameHasBeenChanged returns whether the `givenName` can be automatically changed based on the `Hostname` of the node.
func (node *Node) GivenNameHasBeenChanged() bool {
// Strip invalid DNS characters for givenName comparison
normalised := strings.ToLower(node.Hostname)
normalised = invalidDNSRegex.ReplaceAllString(normalised, "")
return node.GivenName == normalised
return node.GivenName == util.ConvertWithFQDNRules(node.Hostname)
}
// IsExpired returns whether the node registration has expired.
@@ -269,19 +263,11 @@ func (node *Node) Prefixes() []netip.Prefix {
// node has any exit routes enabled.
// If none are enabled, it will return nil.
func (node *Node) ExitRoutes() []netip.Prefix {
var routes []netip.Prefix
for _, route := range node.AnnouncedRoutes() {
if tsaddr.IsExitRoute(route) && slices.Contains(node.ApprovedRoutes, route) {
routes = append(routes, route)
}
if slices.ContainsFunc(node.SubnetRoutes(), tsaddr.IsExitRoute) {
return tsaddr.ExitRoutes()
}
return routes
}
func (node *Node) IsExitNode() bool {
return len(node.ExitRoutes()) > 0
return nil
}
func (node *Node) IPsAsString() []string {
@@ -319,16 +305,9 @@ func (node *Node) CanAccess(matchers []matcher.Match, node2 *Node) bool {
return true
}
// Check if the node has access to routes that might be part of a
// smaller subnet that is served from node2 as a subnet router.
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
return true
}
// If the dst is "the internet" and node2 is an exit node, allow access.
if matcher.DestsIsTheInternet() && node2.IsExitNode() {
return true
}
}
return false
@@ -455,22 +434,16 @@ func (node *Node) AnnouncedRoutes() []netip.Prefix {
return node.Hostinfo.RoutableIPs
}
// SubnetRoutes returns the list of routes (excluding exit routes) that the node
// announces and are approved.
// SubnetRoutes returns the list of routes that the node announces and are approved.
//
// IMPORTANT: This method is used for internal data structures and should NOT be
// used for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated
// manually with PrimaryRoutes to ensure it includes only routes actively served
// by the node. See the comment in Proto() method and the implementation in
// grpcv1.go/nodesToProto.
// IMPORTANT: This method is used for internal data structures and should NOT be used
// for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated manually
// with PrimaryRoutes to ensure it includes only routes actively served by the node.
// See the comment in Proto() method and the implementation in grpcv1.go/nodesToProto.
func (node *Node) SubnetRoutes() []netip.Prefix {
var routes []netip.Prefix
for _, route := range node.AnnouncedRoutes() {
if tsaddr.IsExitRoute(route) {
continue
}
if slices.Contains(node.ApprovedRoutes, route) {
routes = append(routes, route)
}
@@ -484,11 +457,6 @@ func (node *Node) IsSubnetRouter() bool {
return len(node.SubnetRoutes()) > 0
}
// AllApprovedRoutes returns the combination of SubnetRoutes and ExitRoutes
func (node *Node) AllApprovedRoutes() []netip.Prefix {
return append(node.SubnetRoutes(), node.ExitRoutes()...)
}
func (node *Node) String() string {
return node.Hostname
}
@@ -563,34 +531,20 @@ func (node *Node) ApplyHostnameFromHostInfo(hostInfo *tailcfg.Hostinfo) {
return
}
newHostname := strings.ToLower(hostInfo.Hostname)
if err := util.ValidateHostname(newHostname); err != nil {
log.Warn().
Str("node.id", node.ID.String()).
Str("current_hostname", node.Hostname).
Str("rejected_hostname", hostInfo.Hostname).
Err(err).
Msg("Rejecting invalid hostname update from hostinfo")
return
}
if node.Hostname != newHostname {
if node.Hostname != hostInfo.Hostname {
log.Trace().
Str("node.id", node.ID.String()).
Str("old_hostname", node.Hostname).
Str("new_hostname", newHostname).
Str("new_hostname", hostInfo.Hostname).
Str("old_given_name", node.GivenName).
Bool("given_name_changed", node.GivenNameHasBeenChanged()).
Msg("Updating hostname from hostinfo")
if node.GivenNameHasBeenChanged() {
// Strip invalid DNS characters for givenName display
givenName := strings.ToLower(newHostname)
givenName = invalidDNSRegex.ReplaceAllString(givenName, "")
node.GivenName = givenName
node.GivenName = util.ConvertWithFQDNRules(hostInfo.Hostname)
}
node.Hostname = newHostname
node.Hostname = hostInfo.Hostname
log.Trace().
Str("node.id", node.ID.String()).
@@ -679,17 +633,11 @@ func (node Node) DebugString() string {
fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes)
fmt.Fprintf(&sb, "\tAnnouncedRoutes: %v\n", node.AnnouncedRoutes())
fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes())
fmt.Fprintf(&sb, "\tExitRoutes: %v\n", node.ExitRoutes())
sb.WriteString("\n")
return sb.String()
}
func (v NodeView) UserView() UserView {
u := v.User()
return u.View()
}
func (v NodeView) IPs() []netip.Addr {
if !v.Valid() {
return nil
@@ -705,11 +653,27 @@ func (v NodeView) InIPSet(set *netipx.IPSet) bool {
}
func (v NodeView) CanAccess(matchers []matcher.Match, node2 NodeView) bool {
if !v.Valid() {
if !v.Valid() || !node2.Valid() {
return false
}
src := v.IPs()
allowedIPs := node2.IPs()
return v.ж.CanAccess(matchers, node2.AsStruct())
for _, matcher := range matchers {
if !matcher.SrcsContainsIPs(src...) {
continue
}
if matcher.DestsContainsIP(allowedIPs...) {
return true
}
if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) {
return true
}
}
return false
}
func (v NodeView) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool {
@@ -741,13 +705,6 @@ func (v NodeView) IsSubnetRouter() bool {
return v.ж.IsSubnetRouter()
}
func (v NodeView) AllApprovedRoutes() []netip.Prefix {
if !v.Valid() {
return nil
}
return v.ж.AllApprovedRoutes()
}
func (v NodeView) AppendToIPSet(build *netipx.IPSetBuilder) {
if !v.Valid() {
return
@@ -826,13 +783,6 @@ func (v NodeView) ExitRoutes() []netip.Prefix {
return v.ж.ExitRoutes()
}
func (v NodeView) IsExitNode() bool {
if !v.Valid() {
return false
}
return v.ж.IsExitNode()
}
// RequestTags returns the ACL tags that the node is requesting.
func (v NodeView) RequestTags() []string {
if !v.Valid() || !v.Hostinfo().Valid() {
@@ -880,22 +830,3 @@ func (v NodeView) IPsAsString() []string {
}
return v.ж.IPsAsString()
}
// HasNetworkChanges checks if the node has network-related changes.
// Returns true if IPs, announced routes, or approved routes changed.
// This is primarily used for policy cache invalidation.
func (v NodeView) HasNetworkChanges(other NodeView) bool {
if !slices.Equal(v.IPs(), other.IPs()) {
return true
}
if !slices.Equal(v.AnnouncedRoutes(), other.AnnouncedRoutes()) {
return true
}
if !slices.Equal(v.SubnetRoutes(), other.SubnetRoutes()) {
return true
}
return false
}

View File

@@ -369,7 +369,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
},
want: Node{
GivenName: "manual-test.local",
Hostname: "newhostname.local",
Hostname: "NewHostName.Local",
},
},
{
@@ -383,245 +383,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
},
want: Node{
GivenName: "newhostname.local",
Hostname: "newhostname.local",
},
},
{
name: "invalid-hostname-with-emoji-rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "hostname-with-💩",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname", // Should reject and keep old hostname
},
},
{
name: "invalid-hostname-with-unicode-rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "我的电脑",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname", // Should keep old hostname
},
},
{
name: "invalid-hostname-with-special-chars-rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "node-with-special!@#$%",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname", // Should reject and keep old hostname
},
},
{
name: "invalid-hostname-too-short-rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "a",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname", // Should keep old hostname
},
},
{
name: "invalid-hostname-uppercase-accepted-lowercased",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "ValidHostName",
},
want: Node{
GivenName: "validhostname", // GivenName follows hostname when it changes
Hostname: "validhostname", // Uppercase is lowercased, not rejected
},
},
{
name: "uppercase_to_lowercase_accepted",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "User2-Host",
},
want: Node{
GivenName: "user2-host",
Hostname: "user2-host",
},
},
{
name: "at_sign_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "Test@Host",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "chinese_chars_with_dash_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "server-北京-01",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "chinese_only_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "我的电脑",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "emoji_with_text_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "laptop-🚀",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "mixed_chinese_emoji_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "测试💻机器",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "only_emojis_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "🎉🎊",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "only_at_signs_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "@@@",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "starts_with_dash_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "-test",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "ends_with_dash_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "test-",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "too_long_hostname_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: strings.Repeat("t", 65),
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
},
{
name: "underscore_rejected",
nodeBefore: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
},
change: &tailcfg.Hostinfo{
Hostname: "test_node",
},
want: Node{
GivenName: "valid-hostname",
Hostname: "valid-hostname",
Hostname: "NewHostName.Local",
},
},
}
@@ -793,179 +555,3 @@ func TestNodeRegisterMethodToV1Enum(t *testing.T) {
})
}
}
// TestHasNetworkChanges tests the NodeView method for detecting
// when a node's network properties have changed.
func TestHasNetworkChanges(t *testing.T) {
mustIPPtr := func(s string) *netip.Addr {
ip := netip.MustParseAddr(s)
return &ip
}
tests := []struct {
name string
old *Node
new *Node
changed bool
}{
{
name: "no changes",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: false,
},
{
name: "IPv4 changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.2"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
changed: true,
},
{
name: "IPv6 changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::2"),
},
changed: true,
},
{
name: "RoutableIPs added",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
changed: true,
},
{
name: "RoutableIPs removed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{},
},
changed: true,
},
{
name: "RoutableIPs changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
},
changed: true,
},
{
name: "SubnetRoutes added",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: true,
},
{
name: "SubnetRoutes removed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{},
},
changed: true,
},
{
name: "SubnetRoutes changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: true,
},
{
name: "irrelevant property changed (Hostname)",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostname: "old-name",
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostname: "new-name",
},
changed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.new.View().HasNetworkChanges(tt.old.View())
if got != tt.changed {
t.Errorf("HasNetworkChanges() = %v, want %v", got, tt.changed)
}
})
}
}

View File

@@ -104,31 +104,27 @@ func (u *User) profilePicURL() string {
return u.ProfilePicURL
}
func (u *User) TailscaleUser() tailcfg.User {
return tailcfg.User{
func (u *User) TailscaleUser() *tailcfg.User {
user := tailcfg.User{
ID: tailcfg.UserID(u.ID),
DisplayName: u.Display(),
ProfilePicURL: u.profilePicURL(),
Created: u.CreatedAt,
}
return &user
}
func (u UserView) TailscaleUser() tailcfg.User {
return u.ж.TailscaleUser()
}
func (u *User) TailscaleLogin() tailcfg.Login {
return tailcfg.Login{
func (u *User) TailscaleLogin() *tailcfg.Login {
login := tailcfg.Login{
ID: tailcfg.LoginID(u.ID),
Provider: u.Provider,
LoginName: u.Username(),
DisplayName: u.Display(),
ProfilePicURL: u.profilePicURL(),
}
}
func (u UserView) TailscaleLogin() tailcfg.Login {
return u.ж.TailscaleLogin()
return &login
}
func (u *User) TailscaleUserProfile() tailcfg.UserProfile {
@@ -140,10 +136,6 @@ func (u *User) TailscaleUserProfile() tailcfg.UserProfile {
}
}
func (u UserView) TailscaleUserProfile() tailcfg.UserProfile {
return u.ж.TailscaleUserProfile()
}
func (u *User) Proto() *v1.User {
return &v1.User{
Id: uint64(u.ID),

View File

@@ -27,7 +27,7 @@ var (
invalidCharsInUserRegex = regexp.MustCompile("[^a-z0-9-.]+")
)
var ErrInvalidHostName = errors.New("invalid hostname")
var ErrInvalidUserName = errors.New("invalid user name")
// ValidateUsername checks if a username is valid.
// It must be at least 2 characters long, start with a letter, and contain
@@ -67,86 +67,42 @@ func ValidateUsername(username string) error {
return nil
}
// ValidateHostname checks if a hostname meets DNS requirements.
// This function does NOT modify the input - it only validates.
// The hostname must already be lowercase and contain only valid characters.
func ValidateHostname(name string) error {
func CheckForFQDNRules(name string) error {
// Ensure the username meets the minimum length requirement
if len(name) < 2 {
return fmt.Errorf(
"hostname %q is too short, must be at least 2 characters",
name,
)
return errors.New("name must be at least 2 characters long")
}
if len(name) > LabelHostnameLength {
return fmt.Errorf(
"hostname %q is too long, must not exceed 63 characters",
"DNS segment must not be over 63 chars. %v doesn't comply with this rule: %w",
name,
ErrInvalidUserName,
)
}
if strings.ToLower(name) != name {
return fmt.Errorf(
"hostname %q must be lowercase (try %q)",
name,
strings.ToLower(name),
)
}
if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") {
return fmt.Errorf(
"hostname %q cannot start or end with a hyphen",
name,
)
}
if strings.HasPrefix(name, ".") || strings.HasSuffix(name, ".") {
return fmt.Errorf(
"hostname %q cannot start or end with a dot",
"DNS segment should be lowercase. %v doesn't comply with this rule: %w",
name,
ErrInvalidUserName,
)
}
if invalidDNSRegex.MatchString(name) {
return fmt.Errorf(
"hostname %q contains invalid characters, only lowercase letters, numbers, hyphens and dots are allowed",
"DNS segment should only be composed of lowercase ASCII letters numbers, hyphen and dots. %v doesn't comply with these rules: %w",
name,
ErrInvalidUserName,
)
}
return nil
}
// NormaliseHostname transforms a string into a valid DNS hostname.
// Returns error if the transformation results in an invalid hostname.
//
// Transformations applied:
// - Converts to lowercase
// - Removes invalid DNS characters
// - Truncates to 63 characters if needed
//
// After transformation, validates the result.
func NormaliseHostname(name string) (string, error) {
// Early return if already valid
if err := ValidateHostname(name); err == nil {
return name, nil
}
// Transform to lowercase
func ConvertWithFQDNRules(name string) string {
name = strings.ToLower(name)
// Strip invalid DNS characters
name = invalidDNSRegex.ReplaceAllString(name, "")
// Truncate to DNS label limit
if len(name) > LabelHostnameLength {
name = name[:LabelHostnameLength]
}
// Validate result after transformation
if err := ValidateHostname(name); err != nil {
return "", fmt.Errorf(
"hostname invalid after normalisation: %w",
err,
)
}
return name, nil
return name
}
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.

View File

@@ -2,7 +2,6 @@ package util
import (
"net/netip"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -10,173 +9,94 @@ import (
"tailscale.com/util/must"
)
func TestNormaliseHostname(t *testing.T) {
func TestCheckForFQDNRules(t *testing.T) {
type args struct {
name string
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "valid: lowercase user",
name: "valid: user",
args: args{name: "valid-user"},
want: "valid-user",
wantErr: false,
},
{
name: "normalise: capitalized user",
name: "invalid: capitalized user",
args: args{name: "Invalid-CapItaLIzed-user"},
want: "invalid-capitalized-user",
wantErr: false,
wantErr: true,
},
{
name: "normalise: email as user",
name: "invalid: email as user",
args: args{name: "foo.bar@example.com"},
want: "foo.barexample.com",
wantErr: false,
wantErr: true,
},
{
name: "normalise: chars in user name",
name: "invalid: chars in user name",
args: args{name: "super-user+name"},
want: "super-username",
wantErr: false,
wantErr: true,
},
{
name: "invalid: too long name truncated leaves trailing hyphen",
name: "invalid: too long name for user",
args: args{
name: "super-long-useruseruser-name-that-should-be-a-little-more-than-63-chars",
},
want: "",
wantErr: true,
},
{
name: "invalid: emoji stripped leaves trailing hyphen",
args: args{name: "hostname-with-💩"},
want: "",
wantErr: true,
},
{
name: "normalise: multiple emojis stripped",
args: args{name: "node-🎉-🚀-test"},
want: "node---test",
wantErr: false,
},
{
name: "invalid: only emoji becomes empty",
args: args{name: "💩"},
want: "",
wantErr: true,
},
{
name: "invalid: emoji at start leaves leading hyphen",
args: args{name: "🚀-rocket-node"},
want: "",
wantErr: true,
},
{
name: "invalid: emoji at end leaves trailing hyphen",
args: args{name: "node-test-🎉"},
want: "",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NormaliseHostname(tt.args.name)
if (err != nil) != tt.wantErr {
t.Errorf("NormaliseHostname() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr && got != tt.want {
t.Errorf("NormaliseHostname() = %v, want %v", got, tt.want)
if err := CheckForFQDNRules(tt.args.name); (err != nil) != tt.wantErr {
t.Errorf("CheckForFQDNRules() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestValidateHostname(t *testing.T) {
func TestConvertWithFQDNRules(t *testing.T) {
tests := []struct {
name string
hostname string
wantErr bool
errorContains string
name string
hostname string
dnsHostName string
}{
{
name: "valid lowercase",
hostname: "valid-hostname",
wantErr: false,
name: "User1.test",
hostname: "User1.Test",
dnsHostName: "user1.test",
},
{
name: "uppercase rejected",
hostname: "MyHostname",
wantErr: true,
errorContains: "must be lowercase",
name: "User'1$2.test",
hostname: "User'1$2.Test",
dnsHostName: "user12.test",
},
{
name: "too short",
hostname: "a",
wantErr: true,
errorContains: "too short",
name: "User-^_12.local.test",
hostname: "User-^_12.local.Test",
dnsHostName: "user-12.local.test",
},
{
name: "too long",
hostname: "a" + strings.Repeat("b", 63),
wantErr: true,
errorContains: "too long",
name: "User-MacBook-Pro",
hostname: "User-MacBook-Pro",
dnsHostName: "user-macbook-pro",
},
{
name: "emoji rejected",
hostname: "hostname-💩",
wantErr: true,
errorContains: "invalid characters",
name: "User-Linux-Ubuntu/Fedora",
hostname: "User-Linux-Ubuntu/Fedora",
dnsHostName: "user-linux-ubuntufedora",
},
{
name: "starts with hyphen",
hostname: "-hostname",
wantErr: true,
errorContains: "cannot start or end with a hyphen",
},
{
name: "ends with hyphen",
hostname: "hostname-",
wantErr: true,
errorContains: "cannot start or end with a hyphen",
},
{
name: "starts with dot",
hostname: ".hostname",
wantErr: true,
errorContains: "cannot start or end with a dot",
},
{
name: "ends with dot",
hostname: "hostname.",
wantErr: true,
errorContains: "cannot start or end with a dot",
},
{
name: "special characters",
hostname: "host!@#$name",
wantErr: true,
errorContains: "invalid characters",
name: "User-[Space]123",
hostname: "User-[ ]123",
dnsHostName: "user-123",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateHostname(tt.hostname)
if (err != nil) != tt.wantErr {
t.Errorf("ValidateHostname() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr && tt.errorContains != "" {
if err == nil || !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("ValidateHostname() error = %v, should contain %q", err, tt.errorContains)
}
}
fqdnHostName := ConvertWithFQDNRules(tt.hostname)
assert.Equal(t, tt.dnsHostName, fqdnHostName)
})
}
}

View File

@@ -66,11 +66,6 @@ func MustGenerateRandomStringDNSSafe(size int) string {
return hash
}
func InvalidString() string {
hash, _ := GenerateRandomStringDNSSafe(8)
return "invalid-" + hash
}
func TailNodesToString(nodes []*tailcfg.Node) string {
temp := make([]string, len(nodes))

View File

@@ -1,7 +1,6 @@
package util
import (
"cmp"
"errors"
"fmt"
"net/netip"
@@ -12,7 +11,6 @@ import (
"strings"
"time"
"tailscale.com/tailcfg"
"tailscale.com/util/cmpver"
)
@@ -260,37 +258,3 @@ func IsCI() bool {
return false
}
// SafeHostname extracts a hostname from Hostinfo, providing sensible defaults
// if Hostinfo is nil or Hostname is empty. This prevents nil pointer dereferences
// and ensures nodes always have a valid hostname.
// The hostname is truncated to 63 characters to comply with DNS label length limits (RFC 1123).
// EnsureHostname guarantees a valid hostname for node registration.
// This function never fails - it always returns a valid hostname.
//
// Strategy:
// 1. If hostinfo is nil/empty → generate default from keys
// 2. If hostname is provided → normalise it
// 3. If normalisation fails → generate invalid-<random> replacement
//
// Returns the guaranteed-valid hostname to use.
func EnsureHostname(hostinfo *tailcfg.Hostinfo, machineKey, nodeKey string) string {
if hostinfo == nil || hostinfo.Hostname == "" {
key := cmp.Or(machineKey, nodeKey)
if key == "" {
return "unknown-node"
}
keyPrefix := key
if len(key) > 8 {
keyPrefix = key[:8]
}
return fmt.Sprintf("node-%s", keyPrefix)
}
lowercased := strings.ToLower(hostinfo.Hostname)
if err := ValidateHostname(lowercased); err == nil {
return lowercased
}
return InvalidString()
}

View File

@@ -3,12 +3,10 @@ package util
import (
"errors"
"net/netip"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"tailscale.com/tailcfg"
)
func TestTailscaleVersionNewerOrEqual(t *testing.T) {
@@ -795,496 +793,3 @@ over a maximum of 30 hops:
})
}
}
func TestEnsureHostname(t *testing.T) {
t.Parallel()
tests := []struct {
name string
hostinfo *tailcfg.Hostinfo
machineKey string
nodeKey string
want string
}{
{
name: "valid_hostname",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test-node",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "test-node",
},
{
name: "nil_hostinfo_with_machine_key",
hostinfo: nil,
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "node-mkey1234",
},
{
name: "nil_hostinfo_with_node_key_only",
hostinfo: nil,
machineKey: "",
nodeKey: "nkey12345678",
want: "node-nkey1234",
},
{
name: "nil_hostinfo_no_keys",
hostinfo: nil,
machineKey: "",
nodeKey: "",
want: "unknown-node",
},
{
name: "empty_hostname_with_machine_key",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "node-mkey1234",
},
{
name: "empty_hostname_with_node_key_only",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "",
nodeKey: "nkey12345678",
want: "node-nkey1234",
},
{
name: "empty_hostname_no_keys",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "",
nodeKey: "",
want: "unknown-node",
},
{
name: "hostname_exactly_63_chars",
hostinfo: &tailcfg.Hostinfo{
Hostname: "123456789012345678901234567890123456789012345678901234567890123",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "123456789012345678901234567890123456789012345678901234567890123",
},
{
name: "hostname_64_chars_truncated",
hostinfo: &tailcfg.Hostinfo{
Hostname: "1234567890123456789012345678901234567890123456789012345678901234",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "hostname_very_long_truncated",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters-and-should-be-truncated",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "hostname_with_special_chars",
hostinfo: &tailcfg.Hostinfo{
Hostname: "node-with-special!@#$%",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "hostname_with_unicode",
hostinfo: &tailcfg.Hostinfo{
Hostname: "node-ñoño-测试",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "short_machine_key",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "short",
nodeKey: "nkey12345678",
want: "node-short",
},
{
name: "short_node_key",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "",
nodeKey: "short",
want: "node-short",
},
{
name: "hostname_with_emoji_replaced",
hostinfo: &tailcfg.Hostinfo{
Hostname: "hostname-with-💩",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "hostname_only_emoji_replaced",
hostinfo: &tailcfg.Hostinfo{
Hostname: "🚀",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "hostname_with_multiple_emojis_replaced",
hostinfo: &tailcfg.Hostinfo{
Hostname: "node-🎉-🚀-test",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "uppercase_to_lowercase",
hostinfo: &tailcfg.Hostinfo{
Hostname: "User2-Host",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "user2-host",
},
{
name: "underscore_removed",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test_node",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "at_sign_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "Test@Host",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "chinese_chars_with_dash_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "server-北京-01",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "chinese_only_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "我的电脑",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "emoji_with_text_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "laptop-🚀",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "mixed_chinese_emoji_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "测试💻机器",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "only_emojis_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "🎉🎊",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "only_at_signs_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "@@@",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "starts_with_dash_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "-test",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "ends_with_dash_invalid",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test-",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
{
name: "very_long_hostname_truncated",
hostinfo: &tailcfg.Hostinfo{
Hostname: strings.Repeat("t", 70),
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
want: "invalid-",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey)
// For invalid hostnames, we just check the prefix since the random part varies
if strings.HasPrefix(tt.want, "invalid-") {
if !strings.HasPrefix(got, "invalid-") {
t.Errorf("EnsureHostname() = %v, want prefix %v", got, tt.want)
}
} else if got != tt.want {
t.Errorf("EnsureHostname() = %v, want %v", got, tt.want)
}
})
}
}
func TestEnsureHostnameWithHostinfo(t *testing.T) {
t.Parallel()
tests := []struct {
name string
hostinfo *tailcfg.Hostinfo
machineKey string
nodeKey string
wantHostname string
checkHostinfo func(*testing.T, *tailcfg.Hostinfo)
}{
{
name: "valid_hostinfo_unchanged",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test-node",
OS: "linux",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "test-node",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if hi.Hostname != "test-node" {
t.Errorf("hostname = %v, want test-node", hi.Hostname)
}
if hi.OS != "linux" {
t.Errorf("OS = %v, want linux", hi.OS)
}
},
},
{
name: "nil_hostinfo_creates_default",
hostinfo: nil,
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "node-mkey1234",
},
{
name: "empty_hostname_updated",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
OS: "darwin",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "node-mkey1234",
},
{
name: "long_hostname_rejected",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "invalid-",
},
{
name: "nil_hostinfo_node_key_only",
hostinfo: nil,
machineKey: "",
nodeKey: "nkey12345678",
wantHostname: "node-nkey1234",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if hi.Hostname != "node-nkey1234" {
t.Errorf("hostname = %v, want node-nkey1234", hi.Hostname)
}
},
},
{
name: "nil_hostinfo_no_keys",
hostinfo: nil,
machineKey: "",
nodeKey: "",
wantHostname: "unknown-node",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if hi.Hostname != "unknown-node" {
t.Errorf("hostname = %v, want unknown-node", hi.Hostname)
}
},
},
{
name: "empty_hostname_no_keys",
hostinfo: &tailcfg.Hostinfo{
Hostname: "",
},
machineKey: "",
nodeKey: "",
wantHostname: "unknown-node",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if hi.Hostname != "unknown-node" {
t.Errorf("hostname = %v, want unknown-node", hi.Hostname)
}
},
},
{
name: "preserves_other_fields",
hostinfo: &tailcfg.Hostinfo{
Hostname: "test",
OS: "windows",
OSVersion: "10.0.19044",
DeviceModel: "test-device",
BackendLogID: "log123",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "test",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if hi.Hostname != "test" {
t.Errorf("hostname = %v, want test", hi.Hostname)
}
if hi.OS != "windows" {
t.Errorf("OS = %v, want windows", hi.OS)
}
if hi.OSVersion != "10.0.19044" {
t.Errorf("OSVersion = %v, want 10.0.19044", hi.OSVersion)
}
if hi.DeviceModel != "test-device" {
t.Errorf("DeviceModel = %v, want test-device", hi.DeviceModel)
}
if hi.BackendLogID != "log123" {
t.Errorf("BackendLogID = %v, want log123", hi.BackendLogID)
}
},
},
{
name: "exactly_63_chars_unchanged",
hostinfo: &tailcfg.Hostinfo{
Hostname: "123456789012345678901234567890123456789012345678901234567890123",
},
machineKey: "mkey12345678",
nodeKey: "nkey12345678",
wantHostname: "123456789012345678901234567890123456789012345678901234567890123",
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
if hi == nil {
t.Error("hostinfo should not be nil")
}
if len(hi.Hostname) != 63 {
t.Errorf("hostname length = %v, want 63", len(hi.Hostname))
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
gotHostname := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey)
// For invalid hostnames, we just check the prefix since the random part varies
if strings.HasPrefix(tt.wantHostname, "invalid-") {
if !strings.HasPrefix(gotHostname, "invalid-") {
t.Errorf("EnsureHostname() = %v, want prefix %v", gotHostname, tt.wantHostname)
}
} else if gotHostname != tt.wantHostname {
t.Errorf("EnsureHostname() hostname = %v, want %v", gotHostname, tt.wantHostname)
}
})
}
}
func TestEnsureHostname_DNSLabelLimit(t *testing.T) {
t.Parallel()
testCases := []string{
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
"dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
}
for i, hostname := range testCases {
t.Run(cmp.Diff("", ""), func(t *testing.T) {
hostinfo := &tailcfg.Hostinfo{Hostname: hostname}
result := EnsureHostname(hostinfo, "mkey", "nkey")
if len(result) > 63 {
t.Errorf("test case %d: hostname length = %d, want <= 63", i, len(result))
}
})
}
}
func TestEnsureHostname_Idempotent(t *testing.T) {
t.Parallel()
originalHostinfo := &tailcfg.Hostinfo{
Hostname: "test-node",
OS: "linux",
}
hostname1 := EnsureHostname(originalHostinfo, "mkey", "nkey")
hostname2 := EnsureHostname(originalHostinfo, "mkey", "nkey")
if hostname1 != hostname2 {
t.Errorf("hostnames not equal: %v != %v", hostname1, hostname2)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,657 +0,0 @@
package integration
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
)
// TestAPIAuthenticationBypass tests that the API authentication middleware
// properly blocks unauthorized requests and does not leak sensitive data.
// This test reproduces the security issue described in:
// - https://github.com/juanfont/headscale/issues/2809
// - https://github.com/juanfont/headscale/pull/2810
//
// The bug: When authentication fails, the middleware writes "Unauthorized"
// but doesn't return early, allowing the handler to execute and append
// sensitive data to the response.
func TestAPIAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"user1", "user2", "user3"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthbypass"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create an API key using the CLI
var validAPIKey string
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
assert.NoError(ct, err)
assert.NotEmpty(ct, apiKeyOutput)
validAPIKey = strings.TrimSpace(apiKeyOutput)
}, 20*time.Second, 1*time.Second)
// Get the API endpoint
endpoint := headscale.GetEndpoint()
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
// Create HTTP client
client := &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
},
}
t.Run("HTTP_NoAuthHeader", func(t *testing.T) {
// Test 1: Request without any Authorization header
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Should return 401 Unauthorized
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for request without auth header")
bodyStr := string(body)
// Should contain "Unauthorized" message
assert.Contains(t, bodyStr, "Unauthorized",
"Response should contain 'Unauthorized' message")
// Should NOT contain user data after "Unauthorized"
// This is the security bypass - if users array is present, auth was bypassed
var jsonCheck map[string]interface{}
jsonErr := json.Unmarshal(body, &jsonCheck)
// If we can unmarshal JSON and it contains "users", that's the bypass
if jsonErr == nil {
assert.NotContains(t, jsonCheck, "users",
"SECURITY ISSUE: Response should NOT contain 'users' data when unauthorized")
assert.NotContains(t, jsonCheck, "user",
"SECURITY ISSUE: Response should NOT contain 'user' data when unauthorized")
}
// Additional check: response should not contain "user1", "user2", "user3"
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user 'user1' data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user 'user2' data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user 'user3' data")
// Response should be minimal, just "Unauthorized"
// Allow some variation in response format but body should be small
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response body should be minimal, got: %s", bodyStr)
})
t.Run("HTTP_InvalidAuthHeader", func(t *testing.T) {
// Test 2: Request with invalid Authorization header (missing "Bearer " prefix)
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", "InvalidToken")
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for invalid auth header format")
bodyStr := string(body)
assert.Contains(t, bodyStr, "Unauthorized")
// Should not leak user data
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user data")
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response should be minimal")
})
t.Run("HTTP_InvalidBearerToken", func(t *testing.T) {
// Test 3: Request with Bearer prefix but invalid token
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
// Note: Both malformed and properly formatted invalid tokens should return 401
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", "Bearer invalid-token-12345")
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for invalid bearer token")
bodyStr := string(body)
assert.Contains(t, bodyStr, "Unauthorized")
// Should not leak user data
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user data")
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response should be minimal")
})
t.Run("HTTP_ValidAPIKey", func(t *testing.T) {
// Test 4: Request with valid API key
// Expected: Should return 200 with user data (this is the authorized case)
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", validAPIKey))
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Should succeed with valid auth
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Expected 200 status code with valid API key")
// Should be able to parse as protobuf JSON
var response v1.ListUsersResponse
err = protojson.Unmarshal(body, &response)
assert.NoError(t, err, "Response should be valid protobuf JSON with valid API key")
// Should contain our test users
users := response.GetUsers()
assert.Len(t, users, 3, "Should have 3 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "user1")
assert.Contains(t, userNames, "user2")
assert.Contains(t, userNames, "user3")
})
}
// TestAPIAuthenticationBypassCurl tests the same security issue using curl
// from inside a container, which is closer to how the issue was discovered.
func TestAPIAuthenticationBypassCurl(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"testuser1", "testuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthcurl"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
endpoint := headscale.GetEndpoint()
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
t.Run("Curl_NoAuth", func(t *testing.T) {
// Execute curl from inside the headscale container without auth
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
// Parse the output
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if strings.HasPrefix(line, "HTTP_CODE:") {
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
} else {
responseBody += line
}
}
// Should return 401
assert.Equal(t, "401", httpCode,
"Curl without auth should return 401")
// Should contain Unauthorized
assert.Contains(t, responseBody, "Unauthorized",
"Response should contain 'Unauthorized'")
// Should NOT leak user data
assert.NotContains(t, responseBody, "testuser1",
"SECURITY ISSUE: Should not leak user data")
assert.NotContains(t, responseBody, "testuser2",
"SECURITY ISSUE: Should not leak user data")
// Response should be small (just "Unauthorized")
assert.Less(t, len(responseBody), 100,
"SECURITY ISSUE: Unauthorized response should be minimal, got: %s", responseBody)
})
t.Run("Curl_InvalidAuth", func(t *testing.T) {
// Execute curl with invalid auth header
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-H",
"Authorization: InvalidToken",
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if strings.HasPrefix(line, "HTTP_CODE:") {
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
} else {
responseBody += line
}
}
assert.Equal(t, "401", httpCode)
assert.Contains(t, responseBody, "Unauthorized")
assert.NotContains(t, responseBody, "testuser1",
"SECURITY ISSUE: Should not leak user data")
assert.NotContains(t, responseBody, "testuser2",
"SECURITY ISSUE: Should not leak user data")
})
t.Run("Curl_ValidAuth", func(t *testing.T) {
// Execute curl with valid API key
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-H",
fmt.Sprintf("Authorization: Bearer %s", validAPIKey),
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if strings.HasPrefix(line, "HTTP_CODE:") {
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
} else {
responseBody += line
}
}
// Should succeed
assert.Equal(t, "200", httpCode,
"Curl with valid API key should return 200")
// Should contain user data
var response v1.ListUsersResponse
err = protojson.Unmarshal([]byte(responseBody), &response)
assert.NoError(t, err, "Response should be valid protobuf JSON")
users := response.GetUsers()
assert.Len(t, users, 2, "Should have 2 users")
})
}
// TestGRPCAuthenticationBypass tests that the gRPC authentication interceptor
// properly blocks unauthorized requests.
// This test verifies that the gRPC API does not have the same bypass issue
// as the HTTP API middleware.
func TestGRPCAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"grpcuser1", "grpcuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// We need TLS for remote gRPC connections
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("grpcauthtest"),
hsic.WithTLS(),
hsic.WithConfigEnv(map[string]string{
// Enable gRPC on the standard port
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
}),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
// Get the gRPC endpoint
// For gRPC, we need to use the hostname and port 50443
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
t.Run("gRPC_NoAPIKey", func(t *testing.T) {
// Test 1: Try to use CLI without API key (should fail)
// When HEADSCALE_CLI_ADDRESS is set but HEADSCALE_CLI_API_KEY is not set,
// the CLI should fail immediately
_, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
},
)
// Should fail - CLI exits when API key is missing
assert.Error(t, err,
"gRPC connection without API key should fail")
})
t.Run("gRPC_InvalidAPIKey", func(t *testing.T) {
// Test 2: Try to use CLI with invalid API key (should fail with auth error)
output, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=invalid-key-12345 HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
},
)
// Should fail with authentication error
assert.Error(t, err,
"gRPC connection with invalid API key should fail")
// Should contain authentication error message
outputStr := strings.ToLower(output)
assert.True(t,
strings.Contains(outputStr, "unauthenticated") ||
strings.Contains(outputStr, "invalid token") ||
strings.Contains(outputStr, "failed to validate token") ||
strings.Contains(outputStr, "authentication"),
"Error should indicate authentication failure, got: %s", output)
// Should NOT leak user data
assert.NotContains(t, output, "grpcuser1",
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
assert.NotContains(t, output, "grpcuser2",
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
})
t.Run("gRPC_ValidAPIKey", func(t *testing.T) {
// Test 3: Use CLI with valid API key (should succeed)
output, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json", grpcAddress, validAPIKey),
},
)
// Should succeed
assert.NoError(t, err,
"gRPC connection with valid API key should succeed, output: %s", output)
// CLI outputs the users array directly, not wrapped in ListUsersResponse
// Parse as JSON array (CLI uses json.Marshal, not protojson)
var users []*v1.User
err = json.Unmarshal([]byte(output), &users)
assert.NoError(t, err, "Response should be valid JSON array")
assert.Len(t, users, 2, "Should have 2 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "grpcuser1")
assert.Contains(t, userNames, "grpcuser2")
})
}
// TestCLIWithConfigAuthenticationBypass tests that the headscale CLI
// with --config flag does not have authentication bypass issues when
// connecting to a remote server.
// Note: When using --config with local unix socket, no auth is needed.
// This test focuses on remote gRPC connections which require API keys.
func TestCLIWithConfigAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"cliuser1", "cliuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("cliconfigauth"),
hsic.WithTLS(),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
}),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
// Create a config file for testing
configWithoutKey := fmt.Sprintf(`
cli:
address: %s
timeout: 5s
insecure: true
`, grpcAddress)
configWithInvalidKey := fmt.Sprintf(`
cli:
address: %s
api_key: invalid-key-12345
timeout: 5s
insecure: true
`, grpcAddress)
configWithValidKey := fmt.Sprintf(`
cli:
address: %s
api_key: %s
timeout: 5s
insecure: true
`, grpcAddress, validAPIKey)
t.Run("CLI_Config_NoAPIKey", func(t *testing.T) {
// Create config file without API key
err := headscale.WriteFile("/tmp/config_no_key.yaml", []byte(configWithoutKey))
require.NoError(t, err)
// Try to use CLI with config that has no API key
_, err = headscale.Execute(
[]string{
"headscale",
"--config", "/tmp/config_no_key.yaml",
"users", "list",
"--output", "json",
},
)
// Should fail
assert.Error(t, err,
"CLI with config missing API key should fail")
})
t.Run("CLI_Config_InvalidAPIKey", func(t *testing.T) {
// Create config file with invalid API key
err := headscale.WriteFile("/tmp/config_invalid_key.yaml", []byte(configWithInvalidKey))
require.NoError(t, err)
// Try to use CLI with invalid API key
output, err := headscale.Execute(
[]string{
"sh", "-c",
"headscale --config /tmp/config_invalid_key.yaml users list --output json 2>&1",
},
)
// Should fail
assert.Error(t, err,
"CLI with invalid API key should fail")
// Should indicate authentication failure
outputStr := strings.ToLower(output)
assert.True(t,
strings.Contains(outputStr, "unauthenticated") ||
strings.Contains(outputStr, "invalid token") ||
strings.Contains(outputStr, "failed to validate token") ||
strings.Contains(outputStr, "authentication"),
"Error should indicate authentication failure, got: %s", output)
// Should NOT leak user data
assert.NotContains(t, output, "cliuser1",
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
assert.NotContains(t, output, "cliuser2",
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
})
t.Run("CLI_Config_ValidAPIKey", func(t *testing.T) {
// Create config file with valid API key
err := headscale.WriteFile("/tmp/config_valid_key.yaml", []byte(configWithValidKey))
require.NoError(t, err)
// Use CLI with valid API key
output, err := headscale.Execute(
[]string{
"headscale",
"--config", "/tmp/config_valid_key.yaml",
"users", "list",
"--output", "json",
},
)
// Should succeed
assert.NoError(t, err,
"CLI with valid API key should succeed")
// CLI outputs the users array directly, not wrapped in ListUsersResponse
// Parse as JSON array (CLI uses json.Marshal, not protojson)
var users []*v1.User
err = json.Unmarshal([]byte(output), &users)
assert.NoError(t, err, "Response should be valid JSON array")
assert.Len(t, users, 2, "Should have 2 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "cliuser1")
assert.Contains(t, userNames, "cliuser2")
})
}

View File

@@ -28,7 +28,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
opts := []hsic.Option{
@@ -43,25 +43,31 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
requireNoErrHeadscaleEnv(t, err)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
assertNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
assertNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
assertNoErrGetHeadscale(t, err)
expectedNodes := collectExpectedNodeIDs(t, allClients)
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 120*time.Second)
expectedNodes := make([]types.NodeID, 0, len(allClients))
for _, client := range allClients {
status := client.MustStatus()
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
assertNoErr(t, err)
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
}
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 30*time.Second)
// Validate that all nodes have NetInfo and DERP servers before logout
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP before logout", 3*time.Minute)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP before logout", 1*time.Minute)
// assertClientsState(t, allClients)
@@ -74,22 +80,15 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
clientIPs[client] = ips
}
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
for _, node := range listNodes {
assertLastSeenSetWithCollect(c, node)
}
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
listNodes, err := headscale.ListNodes()
assert.Len(t, allClients, len(listNodes))
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, node := range listNodes {
assertLastSeenSet(t, node)
}
for _, client := range allClients {
err := client.Logout()
if err != nil {
@@ -98,20 +97,19 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
assertNoErrLogout(t, err)
// After taking down all nodes, verify all systems show nodes offline
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should have logged out", 120*time.Second)
t.Logf("all clients logged out")
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after logout")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node persistence after logout (nodes should remain in database)")
assert.NoError(ct, err)
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count")
}, 20*time.Second, 1*time.Second)
for _, node := range listNodes {
assertLastSeenSet(t, node)
@@ -127,7 +125,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
userMap, err := headscale.MapUsers()
require.NoError(t, err)
assertNoErr(t, err)
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
@@ -141,13 +139,12 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
}
t.Logf("Validating node persistence after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after relogin")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after relogin - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 60*time.Second, 2*time.Second, "validating node count stability after same-user auth key relogin")
assert.NoError(ct, err)
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match after HTTPS reconnection")
}, 30*time.Second, 2*time.Second)
for _, node := range listNodes {
assertLastSeenSet(t, node)
@@ -155,15 +152,11 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to batcher", 120*time.Second)
// Wait for Tailscale sync before validating NetInfo to ensure proper state propagation
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Validate that all nodes have NetInfo and DERP servers after reconnection
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after reconnection", 3*time.Minute)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after reconnection", 1*time.Minute)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@@ -195,24 +188,78 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
}
}
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, nodeCountBeforeLogout)
for _, node := range listNodes {
assertLastSeenSetWithCollect(c, node)
}
}, 10*time.Second, 200*time.Millisecond, "Waiting for node list after relogin")
listNodes, err = headscale.ListNodes()
require.Len(t, listNodes, nodeCountBeforeLogout)
for _, node := range listNodes {
assertLastSeenSet(t, node)
}
})
}
}
// requireAllClientsNetInfoAndDERP validates that all nodes have NetInfo in the database
// and a valid DERP server based on the NetInfo. This function follows the pattern of
// requireAllClientsOnline by using hsic.DebugNodeStore to get the database state.
func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, message string, timeout time.Duration) {
t.Helper()
startTime := time.Now()
t.Logf("requireAllClientsNetInfoAndDERP: Starting validation at %s - %s", startTime.Format(TimestampFormat), message)
require.EventuallyWithT(t, func(c *assert.CollectT) {
// Get nodestore state
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
if err != nil {
return
}
// Validate node counts first
expectedCount := len(expectedNodes)
assert.Equal(c, expectedCount, len(nodeStore), "NodeStore total nodes mismatch")
// Check each expected node
for _, nodeID := range expectedNodes {
node, exists := nodeStore[nodeID]
assert.True(c, exists, "Node %d not found in nodestore", nodeID)
if !exists {
continue
}
// Validate that the node has Hostinfo
assert.NotNil(c, node.Hostinfo, "Node %d (%s) should have Hostinfo", nodeID, node.Hostname)
if node.Hostinfo == nil {
continue
}
// Validate that the node has NetInfo
assert.NotNil(c, node.Hostinfo.NetInfo, "Node %d (%s) should have NetInfo in Hostinfo", nodeID, node.Hostname)
if node.Hostinfo.NetInfo == nil {
continue
}
// Validate that the node has a valid DERP server (PreferredDERP should be > 0)
preferredDERP := node.Hostinfo.NetInfo.PreferredDERP
assert.Greater(c, preferredDERP, 0, "Node %d (%s) should have a valid DERP server (PreferredDERP > 0), got %d", nodeID, node.Hostname, preferredDERP)
t.Logf("Node %d (%s) has valid NetInfo with DERP server %d", nodeID, node.Hostname, preferredDERP)
}
}, timeout, 2*time.Second, message)
endTime := time.Now()
duration := endTime.Sub(startTime)
t.Logf("requireAllClientsNetInfoAndDERP: Completed validation at %s - Duration: %v - %s", endTime.Format(TimestampFormat), duration, message)
}
func assertLastSeenSet(t *testing.T, node *v1.Node) {
assert.NotNil(t, node)
assert.NotNil(t, node.GetLastSeen())
}
// This test will first log in two sets of nodes to two sets of users, then
// it will log out all nodes and log them in as user1 using a pre-auth key.
// This should create new nodes for user1 while preserving the original nodes for user2.
// Pre-auth key re-authentication with a different user creates new nodes, not transfers.
// it will log out all users from user2 and log them in as user1.
// This should leave us with all nodes connected to user1, while user2
// still has nodes, but they are not connected.
func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
IntegrationSkip(t)
@@ -222,8 +269,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
@@ -231,36 +277,22 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
hsic.WithTLS(),
hsic.WithDERPAsIP(),
)
requireNoErrHeadscaleEnv(t, err)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
assertNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
listNodes, err := headscale.ListNodes()
assert.Len(t, allClients, len(listNodes))
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, client := range allClients {
@@ -271,15 +303,12 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
assertNoErrLogout(t, err)
t.Logf("all clients logged out")
userMap, err := headscale.MapUsers()
require.NoError(t, err)
assertNoErr(t, err)
// Create a new authkey for user1, to be used for all clients
key, err := scenario.CreatePreAuthKey(userMap["user1"].GetId(), true, false)
@@ -297,43 +326,28 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
}
var user1Nodes []*v1.Node
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user1Nodes, err = headscale.ListNodes("user1")
assert.NoError(ct, err, "Failed to list nodes for user1 after relogin")
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after relogin, got %d nodes", len(allClients), len(user1Nodes))
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after auth key relogin")
assert.NoError(ct, err)
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all clients after re-login")
}, 20*time.Second, 1*time.Second)
// Collect expected node IDs for user1 after relogin
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
for _, node := range user1Nodes {
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
}
// Validate connection state after relogin as user1
requireAllClientsOnline(t, headscale, expectedUser1Nodes, true, "all user1 nodes should be connected after relogin", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedUser1Nodes, "all user1 nodes should have NetInfo and DERP after relogin", 3*time.Minute)
// Validate that user2 still has their original nodes after user1's re-authentication
// When nodes re-authenticate with a different user's pre-auth key, NEW nodes are created
// for the new user. The original nodes remain with the original user.
// Validate that all the old nodes are still present with user2
var user2Nodes []*v1.Node
t.Logf("Validating user2 node persistence after user1 relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user2Nodes, err = headscale.ListNodes("user2")
assert.NoError(ct, err, "Failed to list nodes for user2 after user1 relogin")
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d clients after user1 relogin, got %d nodes", len(allClients)/2, len(user2Nodes))
}, 30*time.Second, 2*time.Second, "validating user2 nodes persist after user1 relogin (should not be affected)")
assert.NoError(ct, err)
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should have half the clients")
}, 20*time.Second, 1*time.Second)
t.Logf("Validating client login states after user switch at %s", time.Now().Format(TimestampFormat))
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after auth key user switch", client.Hostname()))
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1", client.Hostname())
}, 30*time.Second, 2*time.Second)
}
}
@@ -348,7 +362,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
opts := []hsic.Option{
@@ -362,13 +376,13 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
requireNoErrHeadscaleEnv(t, err)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
@@ -382,25 +396,11 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
}
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
assertNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
listNodes, err := headscale.ListNodes()
assert.Len(t, allClients, len(listNodes))
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, client := range allClients {
@@ -411,10 +411,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
assertNoErrLogout(t, err)
t.Logf("all clients logged out")
@@ -428,7 +425,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
}
userMap, err := headscale.MapUsers()
require.NoError(t, err)
assertNoErr(t, err)
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
@@ -446,8 +443,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
"expire",
key.GetKey(),
})
require.NoError(t, err)
require.NoError(t, err)
assertNoErr(t, err)
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
assert.ErrorContains(t, err, "authkey expired")
@@ -455,4 +451,3 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
})
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,15 @@
package integration
import (
"fmt"
"net/netip"
"slices"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
@@ -37,16 +33,16 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
assertNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
assertNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
@@ -58,7 +54,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
@@ -67,7 +63,7 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
assertNoErr(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnvWithLoginURL(
@@ -76,16 +72,16 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
assertNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
assertNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
@@ -97,22 +93,15 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
validateInitialConnection(t, headscale, expectedNodes)
assertNoErrGetHeadscale(t, err)
var listNodes []*v1.Node
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after web authentication")
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node count matches client count after web authentication")
assert.NoError(ct, err)
assert.Len(ct, listNodes, len(allClients), "Node count should match client count after login")
}, 20*time.Second, 1*time.Second)
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
@@ -133,10 +122,7 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
validateLogoutComplete(t, headscale, expectedNodes)
assertNoErrLogout(t, err)
t.Logf("all clients logged out")
@@ -149,20 +135,8 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
t.Logf("all clients logged in again")
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after web flow logout")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after logout - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 60*time.Second, 2*time.Second, "validating node persistence in database after web flow logout")
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
// Validate connection state after relogin
validateReloginComplete(t, headscale, expectedNodes)
allIps, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
assertNoErrListClientIPs(t, err)
allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@@ -171,6 +145,14 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
success = pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err)
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count after re-login")
}, 20*time.Second, 1*time.Second)
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
@@ -198,166 +180,3 @@ func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
t.Logf("all clients IPs are the same")
}
// TestAuthWebFlowLogoutAndReloginNewUser tests the scenario where multiple Tailscale clients
// initially authenticate using the web-based authentication flow (where users visit a URL
// in their browser to authenticate), then all clients log out and log back in as a different user.
//
// This test validates the "user switching" behavior in headscale's web authentication flow:
// - Multiple clients authenticate via web flow, each to their respective users (user1, user2)
// - All clients log out simultaneously
// - All clients log back in via web flow, but this time they all authenticate as user1
// - The test verifies that user1 ends up with all the client nodes
// - The test verifies that user2's original nodes still exist in the database but are offline
// - The test verifies network connectivity works after the user switch
//
// This scenario is important for organizations that need to reassign devices between users
// or when consolidating multiple user accounts. It ensures that headscale properly handles
// the security implications of user switching while maintaining node persistence in the database.
//
// The test uses headscale's web authentication flow, which is the most user-friendly method
// where authentication happens through a web browser rather than pre-shared keys or OIDC.
func TestAuthWebFlowLogoutAndReloginNewUser(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("webflowrelnewuser"),
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
validateInitialConnection(t, headscale, expectedNodes)
var listNodes []*v1.Node
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after initial web authentication")
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node count matches client count after initial web authentication")
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
// Log out all clients
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
validateLogoutComplete(t, headscale, expectedNodes)
t.Logf("all clients logged out")
// Log all clients back in as user1 using web flow
// We manually iterate over all clients and authenticate each one as user1
// This tests the cross-user re-authentication behavior where ALL clients
// (including those originally from user2) are registered to user1
for _, client := range allClients {
loginURL, err := client.LoginWithURL(headscale.GetEndpoint())
if err != nil {
t.Fatalf("failed to get login URL for client %s: %s", client.Hostname(), err)
}
body, err := doLoginURL(client.Hostname(), loginURL)
if err != nil {
t.Fatalf("failed to complete login for client %s: %s", client.Hostname(), err)
}
// Register all clients as user1 (this is where cross-user registration happens)
// This simulates: headscale nodes register --user user1 --key <key>
scenario.runHeadscaleRegister("user1", body)
}
// Wait for all clients to reach running state
for _, client := range allClients {
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
if err != nil {
t.Fatalf("%s tailscale node has not reached running: %s", client.Hostname(), err)
}
}
t.Logf("all clients logged back in as user1")
var user1Nodes []*v1.Node
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user1Nodes, err = headscale.ListNodes("user1")
assert.NoError(ct, err, "Failed to list nodes for user1 after web flow relogin")
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after web flow relogin, got %d nodes", len(allClients), len(user1Nodes))
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after web flow user switch relogin")
// Collect expected node IDs for user1 after relogin
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
for _, node := range user1Nodes {
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
}
// Validate connection state after relogin as user1
validateReloginComplete(t, headscale, expectedUser1Nodes)
// Validate that user2's old nodes still exist in database (but are expired/offline)
// When CLI registration creates new nodes for user1, user2's old nodes remain
var user2Nodes []*v1.Node
t.Logf("Validating user2 old nodes remain in database after CLI registration to user1 at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user2Nodes, err = headscale.ListNodes("user2")
assert.NoError(ct, err, "Failed to list nodes for user2 after CLI registration to user1")
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d old nodes (likely expired) after CLI registration to user1, got %d nodes", len(allClients)/2, len(user2Nodes))
}, 30*time.Second, 2*time.Second, "validating user2 old nodes remain in database after CLI registration to user1")
t.Logf("Validating client login states after web flow user switch at %s", time.Now().Format(TimestampFormat))
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after web flow user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after web flow user switch", client.Hostname()))
}
// Test connectivity after user switch
allIps, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d after web flow user switch", success, len(allClients)*len(allIps))
}

Some files were not shown because too many files have changed in this diff Show More