mirror of
https://github.com/juanfont/headscale.git
synced 2026-03-04 04:10:03 +01:00
Compare commits
43 Commits
dependabot
...
copilot/de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5fd393c507 | ||
|
|
aafe727cb9 | ||
|
|
2ac534dd30 | ||
|
|
c09556dd22 | ||
|
|
f9bb88ad24 | ||
|
|
456a5d5cce | ||
|
|
ddbd3e14ba | ||
|
|
0a43aab8f5 | ||
|
|
4bd614a559 | ||
|
|
19a33394f6 | ||
|
|
84fe3de251 | ||
|
|
450a7b15ec | ||
|
|
64b7142e22 | ||
|
|
52d27d58f0 | ||
|
|
e68e2288f7 | ||
|
|
c808587de0 | ||
|
|
2bf1200483 | ||
|
|
66826232ff | ||
|
|
1cdea7ed9b | ||
|
|
2c9e98d3f5 | ||
|
|
8becb7e54a | ||
|
|
ed38d00aaa | ||
|
|
8010cc574e | ||
|
|
c97d0ff23d | ||
|
|
047dbda136 | ||
|
|
2a1392fb5b | ||
|
|
46477b8021 | ||
|
|
c87471136b | ||
|
|
e7a28a14af | ||
|
|
4912769ab3 | ||
|
|
c07cc491bf | ||
|
|
c2a58a304d | ||
|
|
fddc7117e4 | ||
|
|
881a6b9227 | ||
|
|
3fbde7a1b6 | ||
|
|
c4a8c038cd | ||
|
|
022098fe4e | ||
|
|
bd35fcf338 | ||
|
|
2d680b5ebb | ||
|
|
ed3a9c8d6d | ||
|
|
4de56c40d8 | ||
|
|
40b3d54c1f | ||
|
|
30d12dafed |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -94,6 +94,8 @@ jobs:
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Run go cross compile
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
run:
|
||||
env ${{ matrix.env }} nix develop --command -- go build -o "headscale"
|
||||
./cmd/headscale
|
||||
|
||||
@@ -62,6 +62,7 @@ jobs:
|
||||
'**/flake.lock') }}
|
||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
||||
- name: Run Integration Test
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
run:
|
||||
nix develop --command -- hi run --stats --ts-memory-limit=300 --hs-memory-limit=1500 "^${{ inputs.test }}$" \
|
||||
--timeout=120m \
|
||||
|
||||
14
.github/workflows/test-integration.yaml
vendored
14
.github/workflows/test-integration.yaml
vendored
@@ -23,6 +23,12 @@ jobs:
|
||||
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
||||
- TestACLAutogroupMember
|
||||
- TestACLAutogroupTagged
|
||||
- TestACLAutogroupSelf
|
||||
- TestACLPolicyPropagationOverTime
|
||||
- TestAPIAuthenticationBypass
|
||||
- TestAPIAuthenticationBypassCurl
|
||||
- TestGRPCAuthenticationBypass
|
||||
- TestCLIWithConfigAuthenticationBypass
|
||||
- TestAuthKeyLogoutAndReloginSameUser
|
||||
- TestAuthKeyLogoutAndReloginNewUser
|
||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||
@@ -31,8 +37,11 @@ jobs:
|
||||
- TestOIDC024UserCreation
|
||||
- TestOIDCAuthenticationWithPKCE
|
||||
- TestOIDCReloginSameNodeNewUser
|
||||
- TestOIDCFollowUpUrl
|
||||
- TestOIDCReloginSameNodeSameUser
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndRelogin
|
||||
- TestAuthWebFlowLogoutAndReloginSameUser
|
||||
- TestAuthWebFlowLogoutAndReloginNewUser
|
||||
- TestUserCommand
|
||||
- TestPreAuthKeyCommand
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
@@ -53,6 +62,7 @@ jobs:
|
||||
- TestDERPServerScenario
|
||||
- TestDERPServerWebsocketScenario
|
||||
- TestPingAllByIP
|
||||
- TestPingAllByIPRandomClientPort
|
||||
- TestPingAllByIPPublicDERP
|
||||
- TestEphemeral
|
||||
- TestEphemeralInAlternateTimezone
|
||||
@@ -61,6 +71,7 @@ jobs:
|
||||
- TestTaildrop
|
||||
- TestUpdateHostnameFromClient
|
||||
- TestExpireNode
|
||||
- TestSetNodeExpiryInFuture
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
@@ -79,6 +90,7 @@ jobs:
|
||||
- TestSSHNoSSHConfigured
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
- TestSSHAutogroupSelf
|
||||
uses: ./.github/workflows/integration-test-template.yml
|
||||
with:
|
||||
test: ${{ matrix.test }}
|
||||
|
||||
@@ -2,12 +2,39 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.24
|
||||
- go mod tidy -compat=1.25
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: true
|
||||
header: |
|
||||
## Upgrade
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](https://headscale.net/stable/setup/upgrade/) to update your existing Headscale installation.
|
||||
|
||||
**It's best to update from one stable version to the next** (e.g., 0.24.0 → 0.25.1 → 0.26.1) in case you are multiple releases behind. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the changelog above for version-specific upgrade instructions and breaking changes.
|
||||
|
||||
### Backup Your Database
|
||||
|
||||
**Always backup your database before upgrading.** Here's how to backup a SQLite database:
|
||||
|
||||
```bash
|
||||
# Stop headscale
|
||||
systemctl stop headscale
|
||||
|
||||
# Backup sqlite database
|
||||
cp /var/lib/headscale/db.sqlite /var/lib/headscale/db.sqlite.backup
|
||||
|
||||
# Backup sqlite WAL/SHM files (if they exist)
|
||||
cp /var/lib/headscale/db.sqlite-wal /var/lib/headscale/db.sqlite-wal.backup
|
||||
cp /var/lib/headscale/db.sqlite-shm /var/lib/headscale/db.sqlite-shm.backup
|
||||
|
||||
# Start headscale (migration will run automatically)
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
@@ -118,6 +145,8 @@ kos:
|
||||
- "{{ .Tag }}"
|
||||
- '{{ trimprefix .Tag "v" }}'
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
creation_time: "{{.CommitTimestamp}}"
|
||||
ko_data_creation_time: "{{.CommitTimestamp}}"
|
||||
|
||||
- id: ghcr-debug
|
||||
repositories:
|
||||
|
||||
88
CHANGELOG.md
88
CHANGELOG.md
@@ -2,15 +2,22 @@
|
||||
|
||||
## Next
|
||||
|
||||
### Changes
|
||||
|
||||
- Expire nodes with a custom timestamp
|
||||
[#2828](https://github.com/juanfont/headscale/pull/2828)
|
||||
|
||||
## 0.27.0 (2025-10-27)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
### Database integrity improvements
|
||||
|
||||
This release includes a significant database migration that addresses longstanding
|
||||
issues with the database schema and data integrity that has accumulated over the
|
||||
years. The migration introduces a `schema.sql` file as the source of truth for
|
||||
the expected database schema to ensure new migrations that will cause divergence
|
||||
does not occur again.
|
||||
This release includes a significant database migration that addresses
|
||||
longstanding issues with the database schema and data integrity that has
|
||||
accumulated over the years. The migration introduces a `schema.sql` file as the
|
||||
source of truth for the expected database schema to ensure new migrations that
|
||||
will cause divergence does not occur again.
|
||||
|
||||
These issues arose from a combination of factors discovered over time: SQLite
|
||||
foreign keys not being enforced for many early versions, all migrations being
|
||||
@@ -22,8 +29,9 @@ enforced throughout the migration process.
|
||||
We are only improving SQLite databases with this change - PostgreSQL databases
|
||||
are not affected.
|
||||
|
||||
Please read the [PR description](https://github.com/juanfont/headscale/pull/2617)
|
||||
for more technical details about the issues and solutions.
|
||||
Please read the
|
||||
[PR description](https://github.com/juanfont/headscale/pull/2617) for more
|
||||
technical details about the issues and solutions.
|
||||
|
||||
**SQLite Database Backup Example:**
|
||||
|
||||
@@ -45,9 +53,35 @@ systemctl start headscale
|
||||
### DERPMap update frequency
|
||||
|
||||
The default DERPMap update frequency has been changed from 24 hours to 3 hours.
|
||||
If you set the `derp.update_frequency` configuration option, it is recommended to change
|
||||
it to `3h` to ensure that the headscale instance gets the latest DERPMap updates when
|
||||
upstream is changed.
|
||||
If you set the `derp.update_frequency` configuration option, it is recommended
|
||||
to change it to `3h` to ensure that the headscale instance gets the latest
|
||||
DERPMap updates when upstream is changed.
|
||||
|
||||
### Autogroups
|
||||
|
||||
This release adds support for the three missing autogroups: `self`
|
||||
(experimental), `member`, and `tagged`. Please refer to the
|
||||
[documentation](https://tailscale.com/kb/1018/autogroups/) for a detailed
|
||||
explanation.
|
||||
|
||||
`autogroup:self` is marked as experimental and should be used with caution, but
|
||||
we need help testing it. Experimental here means two things; first, generating
|
||||
the packet filter from policies that use `autogroup:self` is very expensive, and
|
||||
it might perform, or straight up not work on Headscale installations with a
|
||||
large number of nodes. Second, the implementation might have bugs or edge cases
|
||||
we are not aware of, meaning that nodes or users might gain _more_ access than
|
||||
expected. Please report bugs.
|
||||
|
||||
### Node store (in memory database)
|
||||
|
||||
Under the hood, we have added a new datastructure to store nodes in memory. This
|
||||
datastructure is called `NodeStore` and aims to reduce the reading and writing
|
||||
of nodes to the database layer. We have not benchmarked it, but expect it to
|
||||
improve performance for read heavy workloads. We think of it as, "worst case" we
|
||||
have moved the bottle neck somewhere else, and "best case" we should see a good
|
||||
improvement in compute resource usage at the expense of memory usage. We are
|
||||
quite excited for this change and think it will make it easier for us to improve
|
||||
the code base over time and make it more correct and efficient.
|
||||
|
||||
### BREAKING
|
||||
|
||||
@@ -55,6 +89,20 @@ upstream is changed.
|
||||
[#2692](https://github.com/juanfont/headscale/pull/2692)
|
||||
- Policy: Zero or empty destination port is no longer allowed
|
||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||
- Stricter hostname validation [#2383](https://github.com/juanfont/headscale/pull/2383)
|
||||
- Hostnames must be valid DNS labels (2-63 characters, alphanumeric and
|
||||
hyphens only, cannot start/end with hyphen)
|
||||
- **Client Registration (New Nodes)**: Invalid hostnames are automatically
|
||||
renamed to `invalid-XXXXXX` format
|
||||
- `my-laptop` → accepted as-is
|
||||
- `My-Laptop` → `my-laptop` (lowercased)
|
||||
- `my_laptop` → `invalid-a1b2c3` (underscore not allowed)
|
||||
- `test@host` → `invalid-d4e5f6` (@ not allowed)
|
||||
- `laptop-🚀` → `invalid-j1k2l3` (emoji not allowed)
|
||||
- **Hostinfo Updates / CLI**: Invalid hostnames are rejected with an error
|
||||
- Valid names are accepted or lowercased
|
||||
- Names with invalid characters, too short (<2), too long (>63), or
|
||||
starting/ending with hyphen are rejected
|
||||
|
||||
### Changes
|
||||
|
||||
@@ -67,8 +115,8 @@ upstream is changed.
|
||||
[#2765](https://github.com/juanfont/headscale/pull/2765)
|
||||
- DERPmap update frequency default changed from 24h to 3h
|
||||
[#2741](https://github.com/juanfont/headscale/pull/2741)
|
||||
- DERPmap update mechanism has been improved with retry,
|
||||
and is now failing conservatively, preserving the old map upon failure.
|
||||
- DERPmap update mechanism has been improved with retry, and is now failing
|
||||
conservatively, preserving the old map upon failure.
|
||||
[#2741](https://github.com/juanfont/headscale/pull/2741)
|
||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
||||
@@ -77,8 +125,6 @@ upstream is changed.
|
||||
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
|
||||
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
|
||||
[#2614](https://github.com/juanfont/headscale/pull/2614)
|
||||
- Support client verify for DERP
|
||||
[#2046](https://github.com/juanfont/headscale/pull/2046)
|
||||
- Remove redundant check regarding `noise` config
|
||||
[#2658](https://github.com/juanfont/headscale/pull/2658)
|
||||
- Refactor OpenID Connect documentation
|
||||
@@ -90,9 +136,15 @@ upstream is changed.
|
||||
- OIDC: Use group claim from UserInfo
|
||||
[#2663](https://github.com/juanfont/headscale/pull/2663)
|
||||
- OIDC: Update user with claims from UserInfo _before_ comparing with allowed
|
||||
groups, email and domain [#2663](https://github.com/juanfont/headscale/pull/2663)
|
||||
- Policy will now reject invalid fields, making it easier to spot spelling errors
|
||||
[#2764](https://github.com/juanfont/headscale/pull/2764)
|
||||
groups, email and domain
|
||||
[#2663](https://github.com/juanfont/headscale/pull/2663)
|
||||
- Policy will now reject invalid fields, making it easier to spot spelling
|
||||
errors [#2764](https://github.com/juanfont/headscale/pull/2764)
|
||||
- Add FAQ entry on how to recover from an invalid policy in the database
|
||||
[#2776](https://github.com/juanfont/headscale/pull/2776)
|
||||
- EXPERIMENTAL: Add support for `autogroup:self`
|
||||
[#2789](https://github.com/juanfont/headscale/pull/2789)
|
||||
- Add healthcheck command [#2659](https://github.com/juanfont/headscale/pull/2659)
|
||||
|
||||
## 0.26.1 (2025-06-06)
|
||||
|
||||
@@ -159,7 +211,7 @@ new policy code passes all of our tests.
|
||||
- Error messages should be more descriptive and informative.
|
||||
- There is still work to be here, but it is already improved with "typing"
|
||||
(e.g. only Users can be put in Groups)
|
||||
- All users must contain an `@` character.
|
||||
- All users in the policy must contain an `@` character.
|
||||
- If your user naturally contains and `@`, like an email, this will just work.
|
||||
- If its based on usernames, or other identifiers not containing an `@`, an
|
||||
`@` should be appended at the end. For example, if your user is `john`, it
|
||||
|
||||
@@ -528,3 +528,4 @@ assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
- **Integration Tests**: Require Docker and can consume significant disk space - use headscale-integration-tester agent
|
||||
- **Performance**: NodeStore optimizations are critical for scale - be careful with changes to state management
|
||||
- **Quality Assurance**: Always use appropriate specialized agents for testing and validation tasks
|
||||
- **NEVER create gists in the user's name**: Do not use the `create_gist` tool - present information directly in the response instead
|
||||
|
||||
@@ -12,7 +12,7 @@ WORKDIR /go/src/tailscale
|
||||
ARG TARGETARCH
|
||||
RUN GOARCH=$TARGETARCH go install -v ./cmd/derper
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.24-bookworm
|
||||
FROM docker.io/golang:1.25-trixie
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends --yes less jq sqlite3 dnsutils \
|
||||
RUN apt-get --update install --no-install-recommends --yes less jq sqlite3 dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN GOARCH=$TARGETARCH go install -tags="${BUILD_TAGS}" -ldflags="\
|
||||
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
|
||||
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl
|
||||
|
||||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
|
||||
29
cmd/headscale/cli/health.go
Normal file
29
cmd/headscale/cli/health.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(healthCmd)
|
||||
}
|
||||
|
||||
var healthCmd = &cobra.Command{
|
||||
Use: "health",
|
||||
Short: "Check the health of the Headscale server",
|
||||
Long: "Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.Health(ctx, &v1.HealthRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(err, "Error checking health", output)
|
||||
}
|
||||
|
||||
SuccessOutput(response, "", output)
|
||||
},
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -51,6 +52,7 @@ func init() {
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
@@ -289,12 +291,37 @@ var expireNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
expiry, err := cmd.Flags().GetString("expiry")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
expiryTime := time.Now()
|
||||
if expiry != "" {
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
Expiry: timestamppb.New(expiryTime),
|
||||
}
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -75,8 +76,9 @@ func initConfig() {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
!versionInfo.Dirty {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
|
||||
}
|
||||
res, err := latest.Check(githubTag, versionInfo.Version)
|
||||
if err == nil && res.Outdated {
|
||||
@@ -91,6 +93,43 @@ func initConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
var prereleases = []string{"alpha", "beta", "rc", "dev"}
|
||||
|
||||
func isPreReleaseVersion(version string) bool {
|
||||
for _, unstable := range prereleases {
|
||||
if strings.Contains(version, unstable) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filterPreReleasesIfStable returns a function that filters out
|
||||
// pre-release tags if the current version is stable.
|
||||
// If the current version is a pre-release, it does not filter anything.
|
||||
// versionFunc is a function that returns the current version string, it is
|
||||
// a func for testability.
|
||||
func filterPreReleasesIfStable(versionFunc func() string) func(string) bool {
|
||||
return func(tag string) bool {
|
||||
version := versionFunc()
|
||||
|
||||
// If we are on a pre-release version, then we do not filter anything
|
||||
// as we want to recommend the user the latest pre-release.
|
||||
if isPreReleaseVersion(version) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we are on a stable release, filter out pre-releases.
|
||||
for _, ignore := range prereleases {
|
||||
if strings.Contains(tag, ignore) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
|
||||
293
cmd/headscale/cli/root_test.go
Normal file
293
cmd/headscale/cli/root_test.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterPreReleasesIfStable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentVersion string
|
||||
tag string
|
||||
expectedFilter bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "stable version filters alpha tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, alpha tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version filters beta tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-beta.2",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, beta tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version filters rc tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, rc tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version allows stable tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on stable release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows alpha tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-alpha.2",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows beta tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-beta.1",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows rc tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "alpha version allows stable tag",
|
||||
currentVersion: "0.23.0-alpha.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on alpha release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows alpha tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows beta tag",
|
||||
currentVersion: "0.23.0-beta.2",
|
||||
tag: "v0.24.0-beta.3",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows rc tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0-rc.1",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "beta version allows stable tag",
|
||||
currentVersion: "0.23.0-beta.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on beta release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows alpha tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, alpha tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows beta tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0-beta.1",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, beta tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows rc tag",
|
||||
currentVersion: "0.23.0-rc.2",
|
||||
tag: "v0.24.0-rc.3",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, rc tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "rc version allows stable tag",
|
||||
currentVersion: "0.23.0-rc.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on rc release, stable tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "stable version with patch filters alpha",
|
||||
currentVersion: "0.23.1",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "Stable version with patch number should filter alpha tags",
|
||||
},
|
||||
{
|
||||
name: "stable version with patch allows stable",
|
||||
currentVersion: "0.23.1",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "Stable version with patch number should allow stable tags",
|
||||
},
|
||||
{
|
||||
name: "tag with alpha substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-alpha.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with alpha in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "tag with beta substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-beta.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with beta in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "tag with rc substring in version number",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v1.0.0-rc.1",
|
||||
expectedFilter: true,
|
||||
description: "Tags with rc in version string should be filtered on stable",
|
||||
},
|
||||
{
|
||||
name: "empty tag on stable version",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "",
|
||||
expectedFilter: false,
|
||||
description: "Empty tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows all tags",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0-alpha.1",
|
||||
expectedFilter: false,
|
||||
description: "Dev versions should not filter any tags (pre-release allows all)",
|
||||
},
|
||||
{
|
||||
name: "stable version filters dev tag",
|
||||
currentVersion: "0.23.0",
|
||||
tag: "v0.24.0-dev",
|
||||
expectedFilter: true,
|
||||
description: "When on stable release, dev tags should be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows dev tag",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0-dev.1",
|
||||
expectedFilter: false,
|
||||
description: "When on dev release, dev tags should not be filtered",
|
||||
},
|
||||
{
|
||||
name: "dev version allows stable tag",
|
||||
currentVersion: "0.23.0-dev",
|
||||
tag: "v0.24.0",
|
||||
expectedFilter: false,
|
||||
description: "When on dev release, stable tags should not be filtered",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := filterPreReleasesIfStable(func() string { return tt.currentVersion })(tt.tag)
|
||||
if result != tt.expectedFilter {
|
||||
t.Errorf("%s: got %v, want %v\nDescription: %s\nCurrent version: %s, Tag: %s",
|
||||
tt.name,
|
||||
result,
|
||||
tt.expectedFilter,
|
||||
tt.description,
|
||||
tt.currentVersion,
|
||||
tt.tag,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPreReleaseVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
expected bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "stable version",
|
||||
version: "0.23.0",
|
||||
expected: false,
|
||||
description: "Stable version should not be pre-release",
|
||||
},
|
||||
{
|
||||
name: "alpha version",
|
||||
version: "0.23.0-alpha.1",
|
||||
expected: true,
|
||||
description: "Alpha version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "beta version",
|
||||
version: "0.23.0-beta.1",
|
||||
expected: true,
|
||||
description: "Beta version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "rc version",
|
||||
version: "0.23.0-rc.1",
|
||||
expected: true,
|
||||
description: "RC version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with alpha substring",
|
||||
version: "0.23.0-alphabetical",
|
||||
expected: true,
|
||||
description: "Version containing 'alpha' should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with beta substring",
|
||||
version: "0.23.0-betamax",
|
||||
expected: true,
|
||||
description: "Version containing 'beta' should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "dev version",
|
||||
version: "0.23.0-dev",
|
||||
expected: true,
|
||||
description: "Dev version should be pre-release",
|
||||
},
|
||||
{
|
||||
name: "empty version",
|
||||
version: "",
|
||||
expected: false,
|
||||
description: "Empty version should not be pre-release",
|
||||
},
|
||||
{
|
||||
name: "version with patch number",
|
||||
version: "0.23.1",
|
||||
expected: false,
|
||||
description: "Stable version with patch should not be pre-release",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isPreReleaseVersion(tt.version)
|
||||
if result != tt.expected {
|
||||
t.Errorf("%s: got %v, want %v\nDescription: %s\nVersion: %s",
|
||||
tt.name,
|
||||
result,
|
||||
tt.expected,
|
||||
tt.description,
|
||||
tt.version,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func detectGoVersion() string {
|
||||
|
||||
content, err := os.ReadFile(goModPath)
|
||||
if err != nil {
|
||||
return "1.24"
|
||||
return "1.25"
|
||||
}
|
||||
|
||||
lines := splitLines(string(content))
|
||||
@@ -89,7 +89,7 @@ func detectGoVersion() string {
|
||||
}
|
||||
}
|
||||
|
||||
return "1.24"
|
||||
return "1.25"
|
||||
}
|
||||
|
||||
// splitLines splits a string into lines without using strings.Split.
|
||||
|
||||
@@ -81,7 +81,7 @@ func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
|
||||
// Create file
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -60,7 +60,9 @@ prefixes:
|
||||
v6: fd7a:115c:a1e0::/48
|
||||
|
||||
# Strategy used for allocation of IPs to nodes, available options:
|
||||
# - sequential (default): assigns the next free IP from the previous given IP.
|
||||
# - sequential (default): assigns the next free IP from the previous given
|
||||
# IP. A best-effort approach is used and Headscale might leave holes in the
|
||||
# IP range or fill up existing holes in the IP range.
|
||||
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
|
||||
allocation: sequential
|
||||
|
||||
@@ -391,11 +393,13 @@ unix_socket_permission: "0770"
|
||||
# method: S256
|
||||
|
||||
# Logtail configuration
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
|
||||
# to instruct tailscale nodes to log their activity to a remote server.
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the
|
||||
# control panel to instruct tailscale nodes to log their activity to a remote
|
||||
# server. To disable logging on the client side, please refer to:
|
||||
# https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging
|
||||
logtail:
|
||||
# Enable logtail for this headscales clients.
|
||||
# As there is currently no support for overriding the log server in headscale, this is
|
||||
# Enable logtail for tailscale nodes of this Headscale instance.
|
||||
# As there is currently no support for overriding the log server in Headscale, this is
|
||||
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
|
||||
enabled: false
|
||||
|
||||
|
||||
@@ -44,6 +44,15 @@ For convenience, we also [build container images with headscale](../setup/instal
|
||||
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
|
||||
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
|
||||
|
||||
## What is the recommended update path? Can I skip multiple versions while updating?
|
||||
|
||||
Please follow the steps outlined in the [upgrade guide](../setup/upgrade.md) to update your existing Headscale
|
||||
installation. Its best to update from one stable version to the next (e.g. 0.24.0 → 0.25.1 → 0.26.1) in case
|
||||
you are multiple releases behind. You should always pick the latest available patch release.
|
||||
|
||||
Be sure to check the [changelog](https://github.com/juanfont/headscale/blob/main/CHANGELOG.md) for version specific
|
||||
upgrade instructions and breaking changes.
|
||||
|
||||
## Scaling / How many clients does Headscale support?
|
||||
|
||||
It depends. As often stated, Headscale is not enterprise software and our focus
|
||||
@@ -134,3 +143,35 @@ in their output of `tailscale status`. Traffic is still filtered according to th
|
||||
ping` which is always allowed in either direction.
|
||||
|
||||
See also <https://tailscale.com/kb/1087/device-visibility>.
|
||||
|
||||
## My policy is stored in the database and Headscale refuses to start due to an invalid policy. How can I recover?
|
||||
|
||||
Headscale checks if the policy is valid during startup and refuses to start if it detects an error. The error message
|
||||
indicates which part of the policy is invalid. Follow these steps to fix your policy:
|
||||
|
||||
- Dump the policy to a file: `headscale policy get --bypass-grpc-and-access-database-directly > policy.json`
|
||||
- Edit and fixup `policy.json`. Use the command `headscale policy check --file policy.json` to validate the policy.
|
||||
- Load the modified policy: `headscale policy set --bypass-grpc-and-access-database-directly --file policy.json`
|
||||
- Start Headscale as usual.
|
||||
|
||||
!!! warning "Full server configuration required"
|
||||
|
||||
The above commands to get/set the policy require a complete server configuration file including database settings. A
|
||||
minimal config to [control Headscale via remote CLI](../ref/remote-cli.md) is not sufficient. You may use `headscale
|
||||
-c /path/to/config.yaml` to specify the path to an alternative configuration file.
|
||||
|
||||
## How can I avoid to send logs to Tailscale Inc?
|
||||
|
||||
A Tailscale client [collects logs about its operation and connection attempts with other
|
||||
clients](https://tailscale.com/kb/1011/log-mesh-traffic#client-logs) and sends them to a central log service operated by
|
||||
Tailscale Inc.
|
||||
|
||||
Headscale, by default, instructs clients to disable log submission to the central log service. This configuration is
|
||||
applied by a client once it successfully connected with Headscale. See the configuration option `logtail.enabled` in the
|
||||
[configuration file](../ref/configuration.md) for details.
|
||||
|
||||
Alternatively, logging can also be disabled on the client side. This is independent of Headscale and opting out of
|
||||
client logging disables log submission early during client startup. The configuration is operating system specific and
|
||||
is usually achieved by setting the environment variable `TS_NO_LOGS_NO_SUPPORT=true` or by passing the flag
|
||||
`--no-logs-no-support` to `tailscaled`. See
|
||||
<https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging> for details.
|
||||
|
||||
@@ -23,7 +23,7 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
|
||||
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
||||
- [x] ACL management via API
|
||||
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
|
||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`, `autogroup:self`
|
||||
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||
|
||||
@@ -194,13 +194,94 @@ Here are the ACL's to implement the same permissions as above:
|
||||
"dst": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// We still have to allow internal users communications since nothing guarantees that each user have
|
||||
// their own users.
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
|
||||
// Allow users to access their own devices using autogroup:self (see below for more details about performance impact)
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Autogroups
|
||||
|
||||
Headscale supports several autogroups that automatically include users, destinations, or devices with specific properties. Autogroups provide a convenient way to write ACL rules without manually listing individual users or devices.
|
||||
|
||||
### `autogroup:internet`
|
||||
|
||||
Allows access to the internet through [exit nodes](routes.md#exit-node). Can only be used in ACL destinations.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:users"],
|
||||
"dst": ["autogroup:internet:*"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:member`
|
||||
|
||||
Includes all users who are direct members of the tailnet. Does not include users from shared devices.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["tag:prod-app-servers:80,443"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:tagged`
|
||||
|
||||
Includes all devices that have at least one tag.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:tagged"],
|
||||
"dst": ["tag:monitoring:9090"]
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:self`
|
||||
**(EXPERIMENTAL)**
|
||||
|
||||
!!! warning "The current implementation of `autogroup:self` is inefficient"
|
||||
|
||||
Includes devices where the same user is authenticated on both the source and destination. Does not include tagged devices. Can only be used in ACL destinations.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
```
|
||||
*Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.*
|
||||
|
||||
If you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.
|
||||
```json
|
||||
{
|
||||
// The following rules allow internal users to communicate with their
|
||||
// own nodes in case autogroup:self is causing performance issues.
|
||||
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
|
||||
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
|
||||
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
|
||||
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
|
||||
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
|
||||
}
|
||||
```
|
||||
|
||||
### `autogroup:nonroot`
|
||||
|
||||
Used in Tailscale SSH rules to allow access to any user except root. Can only be used in the `users` field of SSH rules.
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self"],
|
||||
"users": ["autogroup:nonroot"]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# DNS
|
||||
|
||||
Headscale supports [most DNS features](../about/features.md) from Tailscale. DNS related settings can be configured
|
||||
within `dns` section of the [configuration file](./configuration.md).
|
||||
within the `dns` section of the [configuration file](./configuration.md).
|
||||
|
||||
## Setting extra DNS records
|
||||
|
||||
|
||||
@@ -67,12 +67,6 @@ headscale apikeys expire --prefix "<PREFIX>"
|
||||
export HEADSCALE_CLI_API_KEY="<API_KEY_FROM_PREVIOUS_STEP>"
|
||||
```
|
||||
|
||||
!!! bug
|
||||
|
||||
Headscale currently requires at least an empty configuration file when environment variables are used to
|
||||
specify connection details. See [issue 2193](https://github.com/juanfont/headscale/issues/2193) for more
|
||||
information.
|
||||
|
||||
This instructs the `headscale` binary to connect to a remote instance at `<HEADSCALE_ADDRESS>:<PORT>`, instead of
|
||||
connecting to the local instance.
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
--volume "$(pwd)/run:/var/run/headscale" \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
--health-cmd "CMD headscale health" \
|
||||
docker.io/headscale/headscale:<VERSION> \
|
||||
serve
|
||||
```
|
||||
@@ -66,6 +67,8 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
|
||||
- <HEADSCALE_PATH>/lib:/var/lib/headscale
|
||||
- <HEADSCALE_PATH>/run:/var/run/headscale
|
||||
command: serve
|
||||
healthcheck:
|
||||
test: ["CMD", "headscale", "health"]
|
||||
```
|
||||
|
||||
1. Verify headscale is running:
|
||||
|
||||
@@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
|
||||
|
||||
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
|
||||
distributions are Ubuntu 22.04 or newer, Debian 12 or newer.
|
||||
|
||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||
|
||||
@@ -57,14 +57,14 @@ managed by systemd.
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
sudo wget --output-document=/usr/local/bin/headscale \
|
||||
sudo wget --output-document=/usr/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
1. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
sudo chmod +x /usr/local/bin/headscale
|
||||
sudo chmod +x /usr/bin/headscale
|
||||
```
|
||||
|
||||
1. Add a dedicated local user to run headscale:
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1755829505,
|
||||
"narHash": "sha256-4/Jd+LkQ2ssw8luQVkqVs9spDBVE6h/u/hC/tzngsPo=",
|
||||
"lastModified": 1760533177,
|
||||
"narHash": "sha256-OwM1sFustLHx+xmTymhucZuNhtq98fHIbfO8Swm5L8A=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "f937f8ecd1c70efd7e9f90ba13dfb400cf559de4",
|
||||
"rev": "35f590344ff791e6b1d6d6b8f3523467c9217caf",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
13
flake.nix
13
flake.nix
@@ -18,8 +18,8 @@
|
||||
{
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo124Module;
|
||||
vendorHash = "sha256-hIY6asY3rOIqf/5P6lFmnNCDWcqNPJaj+tqJuOvGJlo=";
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
|
||||
in {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
@@ -97,9 +97,10 @@
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
goreleaser = prev.goreleaser.override {
|
||||
buildGoModule = buildGo;
|
||||
};
|
||||
# The package uses buildGo125Module, not the convention.
|
||||
# goreleaser = prev.goreleaser.override {
|
||||
# buildGoModule = buildGo;
|
||||
# };
|
||||
|
||||
gotestsum = prev.gotestsum.override {
|
||||
buildGoModule = buildGo;
|
||||
@@ -124,7 +125,7 @@
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_24 gnumake];
|
||||
buildDeps = with pkgs; [git go_1_25 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
@@ -21,11 +22,94 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type HealthRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthRequest) Reset() {
|
||||
*x = HealthRequest{}
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HealthRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HealthRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type HealthResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
DatabaseConnectivity bool `protobuf:"varint,1,opt,name=database_connectivity,json=databaseConnectivity,proto3" json:"database_connectivity,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthResponse) Reset() {
|
||||
*x = HealthResponse{}
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HealthResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HealthResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *HealthResponse) GetDatabaseConnectivity() bool {
|
||||
if x != nil {
|
||||
return x.DatabaseConnectivity
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
|
||||
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
|
||||
"\rHealthRequest\"E\n" +
|
||||
"\x0eHealthResponse\x123\n" +
|
||||
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x80\x17\n" +
|
||||
"\x10HeadscaleService\x12h\n" +
|
||||
"\n" +
|
||||
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
|
||||
@@ -56,109 +140,127 @@ const file_headscale_v1_headscale_proto_rawDesc = "" +
|
||||
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
|
||||
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
|
||||
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policy\x12[\n" +
|
||||
"\x06Health\x12\x1b.headscale.v1.HealthRequest\x1a\x1c.headscale.v1.HealthResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/healthB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_headscale_v1_headscale_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_headscale_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_headscale_v1_headscale_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_headscale_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_headscale_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)))
|
||||
})
|
||||
return file_headscale_v1_headscale_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_headscale_v1_headscale_proto_goTypes = []any{
|
||||
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
|
||||
(*HealthRequest)(nil), // 0: headscale.v1.HealthRequest
|
||||
(*HealthResponse)(nil), // 1: headscale.v1.HealthResponse
|
||||
(*CreateUserRequest)(nil), // 2: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 3: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 4: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 8: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 9: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 10: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 11: headscale.v1.SetTagsRequest
|
||||
(*SetApprovedRoutesRequest)(nil), // 12: headscale.v1.SetApprovedRoutesRequest
|
||||
(*RegisterNodeRequest)(nil), // 13: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 14: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 15: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 16: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 17: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 18: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
|
||||
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
|
||||
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
|
||||
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 33: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 34: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
|
||||
(*SetApprovedRoutesResponse)(nil), // 36: headscale.v1.SetApprovedRoutesResponse
|
||||
(*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
|
||||
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
|
||||
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
|
||||
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
1, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
|
||||
2, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
|
||||
3, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
4, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
5, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
6, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
24, // [24:48] is the sub-list for method output_type
|
||||
0, // [0:24] is the sub-list for method input_type
|
||||
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
|
||||
3, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
|
||||
4, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
|
||||
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
|
||||
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
8, // 6: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
9, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
10, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
11, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
12, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
|
||||
13, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
14, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
15, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
16, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
17, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
18, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
|
||||
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
|
||||
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
|
||||
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 31: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
33, // 32: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
34, // 33: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
35, // 34: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
36, // 35: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
|
||||
37, // 36: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
38, // 37: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
39, // 38: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
40, // 39: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
41, // 40: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
42, // 41: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
|
||||
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
|
||||
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
|
||||
25, // [25:50] is the sub-list for method output_type
|
||||
0, // [0:25] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -180,12 +282,13 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_headscale_v1_headscale_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_headscale_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
|
||||
@@ -471,6 +471,8 @@ func local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||
|
||||
func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq ExpireNodeRequest
|
||||
@@ -485,6 +487,12 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -503,6 +511,12 @@ func local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.ExpireNode(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -809,6 +823,24 @@ func local_request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler r
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.Health(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_HeadscaleService_Health_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq HealthRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.Health(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
// RegisterHeadscaleServiceHandlerServer registers the http handlers for service HeadscaleService to "mux".
|
||||
// UnaryRPC :call HeadscaleServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -1295,6 +1327,26 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1743,6 +1795,23 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
|
||||
}
|
||||
forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_HeadscaleService_Health_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/Health", runtime.WithHTTPPathPattern("/api/v1/health"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_HeadscaleService_Health_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_HeadscaleService_Health_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1771,6 +1840,7 @@ var (
|
||||
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
|
||||
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
|
||||
pattern_HeadscaleService_Health_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "health"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -1798,4 +1868,5 @@ var (
|
||||
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
|
||||
forward_HeadscaleService_Health_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
|
||||
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
|
||||
HeadscaleService_Health_FullMethodName = "/headscale.v1.HeadscaleService/Health"
|
||||
)
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
@@ -78,6 +79,8 @@ type HeadscaleServiceClient interface {
|
||||
// --- Policy start ---
|
||||
GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error)
|
||||
SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error)
|
||||
// --- Health start ---
|
||||
Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error)
|
||||
}
|
||||
|
||||
type headscaleServiceClient struct {
|
||||
@@ -328,6 +331,16 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HealthResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_Health_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility.
|
||||
@@ -361,6 +374,8 @@ type HeadscaleServiceServer interface {
|
||||
// --- Policy start ---
|
||||
GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error)
|
||||
SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error)
|
||||
// --- Health start ---
|
||||
Health(context.Context, *HealthRequest) (*HealthResponse, error)
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
@@ -443,6 +458,9 @@ func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicy
|
||||
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
|
||||
|
||||
@@ -896,6 +914,24 @@ func _HeadscaleService_SetPolicy_Handler(srv interface{}, ctx context.Context, d
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).Health(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_Health_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).Health(ctx, req.(*HealthRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -999,6 +1035,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "SetPolicy",
|
||||
Handler: _HeadscaleService_SetPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Health",
|
||||
Handler: _HeadscaleService_Health_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "headscale/v1/headscale.proto",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
@@ -729,6 +729,7 @@ func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
|
||||
type ExpireNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -770,6 +771,13 @@ func (x *ExpireNodeRequest) GetNodeId() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExpireNodeResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
@@ -1349,9 +1357,10 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\",\n" +
|
||||
"\x11DeleteNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"\x14\n" +
|
||||
"\x12DeleteNodeResponse\",\n" +
|
||||
"\x12DeleteNodeResponse\"`\n" +
|
||||
"\x11ExpireNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"<\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x122\n" +
|
||||
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\"<\n" +
|
||||
"\x12ExpireNodeResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"G\n" +
|
||||
"\x11RenameNodeRequest\x12\x17\n" +
|
||||
@@ -1439,16 +1448,17 @@ var file_headscale_v1_node_proto_depIdxs = []int32{
|
||||
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 10: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 11: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
25, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
|
||||
1, // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 15: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
16, // [16:16] is the sub-list for method output_type
|
||||
16, // [16:16] is the sub-list for method input_type
|
||||
16, // [16:16] is the sub-list for extension type_name
|
||||
16, // [16:16] is the sub-list for extension extendee
|
||||
0, // [0:16] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_node_proto_init() }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -164,6 +164,29 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/health": {
|
||||
"get": {
|
||||
"summary": "--- Health start ---",
|
||||
"operationId": "HeadscaleService_Health",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1HealthResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/node": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListNodes",
|
||||
@@ -383,6 +406,13 @@
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "expiry",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
@@ -1056,6 +1086,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1HealthResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"databaseConnectivity": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListApiKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
138
go.mod
138
go.mod
@@ -1,61 +1,59 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.24.4
|
||||
|
||||
toolchain go1.24.6
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/arl/statsviz v0.6.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.2
|
||||
github.com/chasefleming/elem-go v0.30.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
github.com/creachadair/command v0.1.22
|
||||
github.com/arl/statsviz v0.7.2
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/chasefleming/elem-go v0.31.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/creachadair/command v0.2.0
|
||||
github.com/creachadair/flax v0.0.5
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/docker/docker v28.2.2+incompatible
|
||||
github.com/docker/docker v28.5.1+incompatible
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
|
||||
github.com/jagottsicher/termcolor v1.0.2
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
|
||||
github.com/ory/dockertest/v3 v3.12.0
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.65.0
|
||||
github.com/pterm/pterm v0.12.81
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.66.1
|
||||
github.com/pterm/pterm v0.12.82
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/samber/lo v1.51.0
|
||||
github.com/sasha-s/go-deadlock v0.3.5
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/samber/lo v1.52.0
|
||||
github.com/sasha-s/go-deadlock v0.3.6
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
|
||||
google.golang.org/grpc v1.75.1
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.0
|
||||
gorm.io/gorm v1.31.0
|
||||
tailscale.com v1.86.5
|
||||
zgo.at/zcache/v2 v2.2.0
|
||||
zgo.at/zcache/v2 v2.4.1
|
||||
zombiezen.com/go/postgrestest v1.0.1
|
||||
)
|
||||
|
||||
@@ -77,17 +75,17 @@ require (
|
||||
// together, e.g:
|
||||
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
|
||||
require (
|
||||
modernc.org/libc v1.62.1 // indirect
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.10.0 // indirect
|
||||
modernc.org/sqlite v1.37.0
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
@@ -111,17 +109,18 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.24.3 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.1.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/cli v28.5.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
@@ -130,13 +129,12 @@ require (
|
||||
github.com/gaissmai/bart v0.18.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
@@ -144,10 +142,10 @@ require (
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gookit/color v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
@@ -155,20 +153,20 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.4 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
@@ -181,27 +179,25 @@ require (
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runc v1.3.0 // indirect
|
||||
github.com/opencontainers/runc v1.3.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus-community/pro-bing v0.4.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
@@ -211,7 +207,7 @@ require (
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
@@ -219,22 +215,22 @@ require (
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
|
||||
golang.org/x/mod v0.26.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
|
||||
)
|
||||
|
||||
|
||||
314
go.sum
314
go.sum
@@ -8,8 +8,8 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
|
||||
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
|
||||
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
|
||||
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
|
||||
@@ -37,8 +37,8 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
|
||||
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
|
||||
github.com/arl/statsviz v0.7.2 h1:xnuIfRiXE4kvxEcfGL+IE3mKH1BXNHuE+eJELIh7oOA=
|
||||
github.com/arl/statsviz v0.7.2/go.mod h1:XlrbiT7xYT03xaW9JMMfD8KFUhBOESJwfyNJu83PbB0=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
|
||||
@@ -82,12 +82,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
|
||||
github.com/chasefleming/elem-go v0.30.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f6vg71RUilJAA4=
|
||||
github.com/chasefleming/elem-go v0.31.0 h1:vZsuKmKdv6idnUbu3awMruxTiFqZ/ertFJFAyBCkVhI=
|
||||
github.com/chasefleming/elem-go v0.31.0/go.mod h1:UBmmZfso2LkXA0HZInbcwsmhE/LXFClEcBPNCGeARtA=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
@@ -99,8 +99,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
|
||||
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
@@ -114,16 +116,18 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
|
||||
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
|
||||
github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbqI9cA=
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.24.3 h1:X7cM2ymZSyl4IVWnfyXLxRXMJ6awhbcWvtLPhfnTaqI=
|
||||
github.com/creachadair/mds v0.24.3/go.mod h1:0oeHt9QWu8VfnmskOL4zi2CumjEvB29ScmtOmdrhFeU=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@@ -141,12 +145,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
|
||||
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
|
||||
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY=
|
||||
github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
@@ -170,25 +174,25 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8=
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
|
||||
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
|
||||
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
@@ -199,8 +203,6 @@ github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
|
||||
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
|
||||
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -223,22 +225,24 @@ github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdF
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0=
|
||||
github.com/gookit/assert v0.1.1/go.mod h1:jS5bmIVQZTIwk42uXl4lyj4iaaxx32tqH16CFj0VX2E=
|
||||
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
|
||||
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
|
||||
github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=
|
||||
github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
@@ -255,8 +259,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
|
||||
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
|
||||
@@ -274,10 +278,10 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -312,8 +316,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
|
||||
@@ -340,8 +344,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk=
|
||||
@@ -350,16 +354,16 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
|
||||
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
|
||||
github.com/opencontainers/runc v1.3.2 h1:GUwgo0Fx9M/pl2utaSYlJfdBcXAB/CZXDxe322lvJ3Y=
|
||||
github.com/opencontainers/runc v1.3.2/go.mod h1:F7UQQEsxcjUNnFpT1qPLHZBKYP7yWwk6hq8suLy9cl0=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
|
||||
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
|
||||
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 h1:QTvNkZ5ylY0PGgA+Lih+GdboMLY/G9SEGLMEGVjTVA4=
|
||||
github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
@@ -376,14 +380,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
|
||||
@@ -391,15 +395,13 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
|
||||
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
|
||||
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
|
||||
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
|
||||
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/pterm/pterm v0.12.82 h1:+D9wYhCaeaK0FIQoZtqbNQuNpe2lB2tajKKsTd5paVQ=
|
||||
github.com/pterm/pterm v0.12.82/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
@@ -409,29 +411,28 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
|
||||
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw=
|
||||
github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
|
||||
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
@@ -442,8 +443,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
|
||||
@@ -464,6 +465,8 @@ github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
@@ -485,8 +488,8 @@ github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
|
||||
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -499,79 +502,70 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
|
||||
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
|
||||
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -592,8 +586,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -601,42 +595,40 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
@@ -652,8 +644,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
|
||||
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
|
||||
@@ -662,26 +654,28 @@ honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
|
||||
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
|
||||
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4=
|
||||
modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
|
||||
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
|
||||
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
||||
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
|
||||
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
|
||||
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
|
||||
modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4=
|
||||
modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
@@ -690,7 +684,7 @@ software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tailscale.com v1.86.5 h1:yBtWFjuLYDmxVnfnvPbZNZcKADCYgNfMd0rUAOA9XCs=
|
||||
tailscale.com v1.86.5/go.mod h1:Lm8dnzU2i/Emw15r6sl3FRNp/liSQ/nYw6ZSQvIdZ1M=
|
||||
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
|
||||
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zgo.at/zcache/v2 v2.4.1 h1:Dfjoi8yI0Uq7NCc4lo2kaQJJmp9Mijo21gef+oJstbY=
|
||||
zgo.at/zcache/v2 v2.4.1/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
|
||||
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
|
||||
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=
|
||||
|
||||
@@ -380,53 +380,45 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
if err := func() error {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("HTTP authentication invoked")
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := req.Header.Get("Authorization")
|
||||
authHeader := req.Header.Get("Authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
writeUnauthorized := func(statusCode int) {
|
||||
writer.WriteHeader(statusCode)
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
|
||||
log.Error().Err(err).Msg("writing HTTP response failed")
|
||||
}
|
||||
}
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Info().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write HTTP response")
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("failed to validate token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", req.RemoteAddr).
|
||||
Msg("invalid token")
|
||||
writeUnauthorized(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -454,6 +446,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
||||
|
||||
router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/version", h.VersionHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -25,26 +27,84 @@ type AuthProvider interface {
|
||||
|
||||
func (h *Headscale) handleRegister(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, ok := h.state.GetNodeByNodeKey(regReq.NodeKey)
|
||||
// Check for logout/expiry FIRST, before checking auth key.
|
||||
// Tailscale clients may send logout requests with BOTH a past expiry AND an auth key.
|
||||
// A past expiry takes precedence - it's a logout regardless of other fields.
|
||||
if !req.Expiry.IsZero() && req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Time("expiry", req.Expiry).
|
||||
Bool("has_auth", req.Auth != nil).
|
||||
Msg("Detected logout attempt with past expiry")
|
||||
|
||||
if ok {
|
||||
resp, err := h.handleExistingNode(node.AsStruct(), regReq, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
// This is a logout attempt (expiry in the past)
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Msg("Found existing node for logout, calling handleLogout")
|
||||
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling logout: %w", err)
|
||||
}
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Msg("Logout attempt but node not found in NodeStore")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if regReq.Followup != "" {
|
||||
return h.waitForFollowup(ctx, regReq)
|
||||
// If the register request does not contain a Auth struct, it means we are logging
|
||||
// out an existing node (legacy logout path for clients that send Auth=nil).
|
||||
if req.Auth == nil {
|
||||
// If the register request present a NodeKey that is currently in use, we will
|
||||
// check if the node needs to be sent to re-auth, or if the node is logging out.
|
||||
// We do not look up nodes by [key.MachinePublic] as it might belong to multiple
|
||||
// nodes, separated by users and this path is handling expiring/logout paths.
|
||||
if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok {
|
||||
resp, err := h.handleLogout(node, req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling existing node: %w", err)
|
||||
}
|
||||
|
||||
// If resp is not nil, we have a response to return to the node.
|
||||
// If resp is nil, we should proceed and see if the node is trying to re-auth.
|
||||
if resp != nil {
|
||||
return resp, nil
|
||||
}
|
||||
} else {
|
||||
// If the register request is not attempting to register a node, and
|
||||
// we cannot match it with an existing node, we consider that unexpected
|
||||
// as only register nodes should attempt to log out.
|
||||
log.Debug().
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Bool("unexpected", true).
|
||||
Msg("received register request with no auth, and no existing node")
|
||||
}
|
||||
}
|
||||
|
||||
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
|
||||
resp, err := h.handleRegisterWithAuthKey(regReq, machineKey)
|
||||
// If the [tailcfg.RegisterRequest] has a Followup URL, it means that the
|
||||
// node has already started the registration process and we should wait for
|
||||
// it to finish the original registration.
|
||||
if req.Followup != "" {
|
||||
return h.waitForFollowup(ctx, req, machineKey)
|
||||
}
|
||||
|
||||
// Pre authenticated keys are handled slightly different than interactive
|
||||
// logins as they can be done fully sync and we can respond to the node with
|
||||
// the result as it is waiting.
|
||||
if isAuthKey(req) {
|
||||
resp, err := h.handleRegisterWithAuthKey(req, machineKey)
|
||||
if err != nil {
|
||||
// Preserve HTTPError types so they can be handled properly by the HTTP layer
|
||||
var httpErr HTTPError
|
||||
@@ -58,7 +118,7 @@ func (h *Headscale) handleRegister(
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
resp, err := h.handleRegisterInteractive(regReq, machineKey)
|
||||
resp, err := h.handleRegisterInteractive(req, machineKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("handling register interactive: %w", err)
|
||||
}
|
||||
@@ -66,20 +126,34 @@ func (h *Headscale) handleRegister(
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleExistingNode(
|
||||
node *types.Node,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
// handleLogout checks if the [tailcfg.RegisterRequest] is a
|
||||
// logout attempt from a node. If the node is not attempting to
|
||||
func (h *Headscale) handleLogout(
|
||||
node types.NodeView,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
if node.MachineKey != machineKey {
|
||||
// Fail closed if it looks like this is an attempt to modify a node where
|
||||
// the node key and the machine key the noise session was started with does
|
||||
// not align.
|
||||
if node.MachineKey() != machineKey {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node exist with different machine key", nil)
|
||||
}
|
||||
|
||||
expired := node.IsExpired()
|
||||
// Note: We do NOT return early if req.Auth is set, because Tailscale clients
|
||||
// may send logout requests with BOTH a past expiry AND an auth key.
|
||||
// A past expiry indicates logout, regardless of whether Auth is present.
|
||||
// The expiry check below will handle the logout logic.
|
||||
|
||||
// If the node is expired and this is not a re-authentication attempt,
|
||||
// force the client to re-authenticate
|
||||
if expired && regReq.Auth == nil {
|
||||
// force the client to re-authenticate.
|
||||
// TODO(kradalby): I wonder if this is a path we ever hit?
|
||||
if node.IsExpired() {
|
||||
log.Trace().Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Interface("reg.req", req).
|
||||
Bool("unexpected", true).
|
||||
Msg("Node key expired, forcing re-authentication")
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
@@ -87,49 +161,76 @@ func (h *Headscale) handleExistingNode(
|
||||
}, nil
|
||||
}
|
||||
|
||||
if !expired && !regReq.Expiry.IsZero() {
|
||||
requestExpiry := regReq.Expiry
|
||||
// If we get here, the node is not currently expired, and not trying to
|
||||
// do an auth.
|
||||
// The node is likely logging out, but before we run that logic, we will validate
|
||||
// that the node is not attempting to tamper/extend their expiry.
|
||||
// If it is not, we will expire the node or in the case of an ephemeral node, delete it.
|
||||
|
||||
// The client is trying to extend their key, this is not allowed.
|
||||
if requestExpiry.After(time.Now()) {
|
||||
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
|
||||
}
|
||||
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if requestExpiry.Before(time.Now()) {
|
||||
if node.IsEphemeral() {
|
||||
c, err := h.state.DeleteNode(node.View())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
updatedNode, c, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
// CRITICAL: Use the updated node view for the response
|
||||
// The original node object has stale expiry information
|
||||
node = updatedNode.AsStruct()
|
||||
// The client is trying to extend their key, this is not allowed.
|
||||
if req.Expiry.After(time.Now()) {
|
||||
return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil)
|
||||
}
|
||||
|
||||
return nodeToRegisterResponse(node), nil
|
||||
// If the request expiry is in the past, we consider it a logout.
|
||||
if req.Expiry.Before(time.Now()) {
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Bool("is_ephemeral", node.IsEphemeral()).
|
||||
Bool("has_authkey", node.AuthKey().Valid()).
|
||||
Time("req.expiry", req.Expiry).
|
||||
Msg("Processing logout request with past expiry")
|
||||
|
||||
if node.IsEphemeral() {
|
||||
log.Info().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Msg("Deleting ephemeral node during logout")
|
||||
|
||||
c, err := h.state.DeleteNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
NodeKeyExpired: true,
|
||||
MachineAuthorized: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Str("node.name", node.Hostname()).
|
||||
Msg("Node is not ephemeral, setting expiry instead of deleting")
|
||||
}
|
||||
|
||||
// Update the internal state with the nodes new expiry, meaning it is
|
||||
// logged out.
|
||||
updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), req.Expiry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting node expiry: %w", err)
|
||||
}
|
||||
|
||||
h.Change(c)
|
||||
|
||||
return nodeToRegisterResponse(updatedNode), nil
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
// isAuthKey reports if the register request is a registration request
|
||||
// using an pre auth key.
|
||||
func isAuthKey(req tailcfg.RegisterRequest) bool {
|
||||
return req.Auth != nil && req.Auth.AuthKey != ""
|
||||
}
|
||||
|
||||
func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse {
|
||||
return &tailcfg.RegisterResponse{
|
||||
// TODO(kradalby): Only send for user-owned nodes
|
||||
// and not tagged nodes when tags is working.
|
||||
User: *node.User.TailscaleUser(),
|
||||
Login: *node.User.TailscaleLogin(),
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
|
||||
// Headscale does not implement the concept of machine authorization
|
||||
@@ -141,9 +242,10 @@ func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
|
||||
|
||||
func (h *Headscale) waitForFollowup(
|
||||
ctx context.Context,
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
fu, err := url.Parse(regReq.Followup)
|
||||
fu, err := url.Parse(req.Followup)
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
|
||||
}
|
||||
@@ -159,21 +261,68 @@ func (h *Headscale) waitForFollowup(
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
|
||||
case node := <-reg.Registered:
|
||||
if node == nil {
|
||||
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
|
||||
// registration is expired in the cache, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
return nodeToRegisterResponse(node), nil
|
||||
return nodeToRegisterResponse(node.View()), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
|
||||
// if the follow-up registration isn't found anymore, instruct the client to try a new registration
|
||||
return h.reqToNewRegisterResponse(req, machineKey)
|
||||
}
|
||||
|
||||
// reqToNewRegisterResponse refreshes the registration flow by creating a new
|
||||
// registration ID and returning the corresponding AuthURL so the client can
|
||||
// restart the authentication process.
|
||||
func (h *Headscale) reqToNewRegisterResponse(
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
newRegID, err := types.NewRegistrationID()
|
||||
if err != nil {
|
||||
return nil, NewHTTPError(http.StatusInternalServerError, "failed to generate registration ID", err)
|
||||
}
|
||||
|
||||
// Ensure we have a valid hostname
|
||||
hostname := util.EnsureHostname(
|
||||
req.Hostinfo,
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
|
||||
// Ensure we have valid hostinfo
|
||||
hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
)
|
||||
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
log.Info().Msgf("New followup node registration using key: %s", newRegID)
|
||||
h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister)
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
AuthURL: h.authProvider.AuthURL(newRegID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterWithAuthKey(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
node, changed, err := h.state.HandleNodeFromPreAuthKey(
|
||||
regReq,
|
||||
req,
|
||||
machineKey,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -225,18 +374,26 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
||||
// h.Change(policyChange)
|
||||
// }
|
||||
|
||||
user := node.User()
|
||||
|
||||
return &tailcfg.RegisterResponse{
|
||||
resp := &tailcfg.RegisterResponse{
|
||||
MachineAuthorized: true,
|
||||
NodeKeyExpired: node.IsExpired(),
|
||||
User: *user.TailscaleUser(),
|
||||
Login: *user.TailscaleLogin(),
|
||||
}, nil
|
||||
User: node.UserView().TailscaleUser(),
|
||||
Login: node.UserView().TailscaleLogin(),
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Interface("reg.resp", resp).
|
||||
Interface("reg.req", req).
|
||||
Str("node.name", node.Hostname()).
|
||||
Uint64("node.id", node.ID().Uint64()).
|
||||
Msg("RegisterResponse")
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleRegisterInteractive(
|
||||
regReq tailcfg.RegisterRequest,
|
||||
req tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (*tailcfg.RegisterResponse, error) {
|
||||
registrationId, err := types.NewRegistrationID()
|
||||
@@ -244,19 +401,42 @@ func (h *Headscale) handleRegisterInteractive(
|
||||
return nil, fmt.Errorf("generating registration ID: %w", err)
|
||||
}
|
||||
|
||||
nodeToRegister := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
Hostname: regReq.Hostinfo.Hostname,
|
||||
// Ensure we have a valid hostname
|
||||
hostname := util.EnsureHostname(
|
||||
req.Hostinfo,
|
||||
machineKey.String(),
|
||||
req.NodeKey.String(),
|
||||
)
|
||||
|
||||
// Ensure we have valid hostinfo
|
||||
hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{})
|
||||
if req.Hostinfo == nil {
|
||||
log.Warn().
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with nil hostinfo, generated default hostname")
|
||||
} else if req.Hostinfo.Hostname == "" {
|
||||
log.Warn().
|
||||
Str("machine.key", machineKey.ShortString()).
|
||||
Str("node.key", req.NodeKey.ShortString()).
|
||||
Str("generated.hostname", hostname).
|
||||
Msg("Received registration request with empty hostname, generated default")
|
||||
}
|
||||
hostinfo.Hostname = hostname
|
||||
|
||||
nodeToRegister := types.NewRegisterNode(
|
||||
types.Node{
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey,
|
||||
NodeKey: regReq.NodeKey,
|
||||
Hostinfo: regReq.Hostinfo,
|
||||
NodeKey: req.NodeKey,
|
||||
Hostinfo: hostinfo,
|
||||
LastSeen: ptr.To(time.Now()),
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
}
|
||||
)
|
||||
|
||||
if !regReq.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = ®Req.Expiry
|
||||
if !req.Expiry.IsZero() {
|
||||
nodeToRegister.Node.Expiry = &req.Expiry
|
||||
}
|
||||
|
||||
h.state.SetRegistrationCacheEntry(
|
||||
|
||||
3006
hscontrol/auth_test.go
Normal file
3006
hscontrol/auth_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
package capver
|
||||
|
||||
//Generated DO NOT EDIT
|
||||
// Generated DO NOT EDIT
|
||||
|
||||
import "tailscale.com/tailcfg"
|
||||
|
||||
@@ -37,16 +37,15 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
|
||||
"v1.84.2": 116,
|
||||
}
|
||||
|
||||
|
||||
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
|
||||
90: "v1.64.0",
|
||||
95: "v1.66.0",
|
||||
97: "v1.68.0",
|
||||
102: "v1.70.0",
|
||||
104: "v1.72.0",
|
||||
106: "v1.74.0",
|
||||
109: "v1.78.0",
|
||||
113: "v1.80.0",
|
||||
115: "v1.82.0",
|
||||
116: "v1.84.0",
|
||||
90: "v1.64.0",
|
||||
95: "v1.66.0",
|
||||
97: "v1.68.0",
|
||||
102: "v1.70.0",
|
||||
104: "v1.72.0",
|
||||
106: "v1.74.0",
|
||||
109: "v1.78.0",
|
||||
113: "v1.80.0",
|
||||
115: "v1.82.0",
|
||||
116: "v1.84.0",
|
||||
}
|
||||
|
||||
@@ -932,6 +932,26 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Drop all tables that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
ID: "202510311551",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldTable := range []string{"namespaces", "machines", "shared_machines", "kvs", "pre_auth_key_acl_tags", "routes"} {
|
||||
err := tx.Migrator().DropTable(oldTable)
|
||||
if err != nil {
|
||||
log.Trace().Str("table", oldTable).
|
||||
Err(err).
|
||||
Msg("Error dropping old table, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
@@ -962,7 +982,17 @@ AND auth_key_id NOT IN (
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
|
||||
opts := squibble.DigestOptions{
|
||||
IgnoreTables: []string{
|
||||
// Litestream tables, these are inserted by
|
||||
// litestream and not part of our schema
|
||||
// https://litestream.io/how-it-works
|
||||
"_litestream_lock",
|
||||
"_litestream_seq",
|
||||
},
|
||||
}
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,19 +5,19 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/types/change"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -27,6 +27,8 @@ const (
|
||||
NodeGivenNameTrimSize = 2
|
||||
)
|
||||
|
||||
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
ErrNodeRouteIsNotAvailable = errors.New("route is not available on node")
|
||||
@@ -261,6 +263,10 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
|
||||
func RenameNode(tx *gorm.DB,
|
||||
nodeID types.NodeID, newName string,
|
||||
) error {
|
||||
if err := util.ValidateHostname(newName); err != nil {
|
||||
return fmt.Errorf("renaming node: %w", err)
|
||||
}
|
||||
|
||||
// Check if the new name is unique
|
||||
var count int64
|
||||
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
|
||||
@@ -378,6 +384,14 @@ func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *n
|
||||
node.IPv4 = ipv4
|
||||
node.IPv6 = ipv6
|
||||
|
||||
var err error
|
||||
node.Hostname, err = util.NormaliseHostname(node.Hostname)
|
||||
if err != nil {
|
||||
newHostname := util.InvalidString()
|
||||
log.Info().Err(err).Str("invalid-hostname", node.Hostname).Str("new-hostname", newHostname).Msgf("Invalid hostname, replacing")
|
||||
node.Hostname = newHostname
|
||||
}
|
||||
|
||||
if node.GivenName == "" {
|
||||
givenName, err := EnsureUniqueGivenName(tx, node.Hostname)
|
||||
if err != nil {
|
||||
@@ -434,7 +448,10 @@ func NodeSave(tx *gorm.DB, node *types.Node) error {
|
||||
}
|
||||
|
||||
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
||||
suppliedName = util.ConvertWithFQDNRules(suppliedName)
|
||||
// Strip invalid DNS characters for givenName
|
||||
suppliedName = strings.ToLower(suppliedName)
|
||||
suppliedName = invalidDNSRegex.ReplaceAllString(suppliedName, "")
|
||||
|
||||
if len(suppliedName) > util.LabelHostnameLength {
|
||||
return "", types.ErrHostnameTooLong
|
||||
}
|
||||
@@ -494,41 +511,6 @@ func EnsureUniqueGivenName(
|
||||
return givenName, nil
|
||||
}
|
||||
|
||||
// ExpireExpiredNodes checks for nodes that have expired since the last check
|
||||
// and returns a time to be used for the next check, a StateUpdate
|
||||
// containing the expired nodes, and a boolean indicating if any nodes were found.
|
||||
func ExpireExpiredNodes(tx *gorm.DB,
|
||||
lastCheck time.Time,
|
||||
) (time.Time, []change.ChangeSet, bool) {
|
||||
// use the time of the start of the function to ensure we
|
||||
// dont miss some nodes by returning it _after_ we have
|
||||
// checked everything.
|
||||
started := time.Now()
|
||||
|
||||
expired := make([]*tailcfg.PeerChange, 0)
|
||||
var updates []change.ChangeSet
|
||||
|
||||
nodes, err := ListNodes(tx)
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), nil, false
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.IsExpired() && node.Expiry.After(lastCheck) {
|
||||
expired = append(expired, &tailcfg.PeerChange{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: node.Expiry,
|
||||
})
|
||||
updates = append(updates, change.KeyExpiry(node.ID))
|
||||
}
|
||||
}
|
||||
|
||||
if len(expired) > 0 {
|
||||
return started, updates, true
|
||||
}
|
||||
|
||||
return started, nil, false
|
||||
}
|
||||
|
||||
// EphemeralGarbageCollector is a garbage collector that will delete nodes after
|
||||
// a certain amount of time.
|
||||
// It is used to delete ephemeral nodes that have disconnected and should be
|
||||
|
||||
@@ -640,7 +640,7 @@ func TestListEphemeralNodes(t *testing.T) {
|
||||
assert.Equal(t, nodeEph.Hostname, ephemeralNodes[0].Hostname)
|
||||
}
|
||||
|
||||
func TestRenameNode(t *testing.T) {
|
||||
func TestNodeNaming(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
if err != nil {
|
||||
t.Fatalf("creating db: %s", err)
|
||||
@@ -672,6 +672,26 @@ func TestRenameNode(t *testing.T) {
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
// Using non-ASCII characters in the hostname can
|
||||
// break your network, so they should be replaced when registering
|
||||
// a node.
|
||||
// https://github.com/juanfont/headscale/issues/2343
|
||||
nodeInvalidHostname := types.Node{
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "我的电脑",
|
||||
UserID: user2.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
}
|
||||
|
||||
nodeShortHostname := types.Node{
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "a",
|
||||
UserID: user2.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
}
|
||||
|
||||
err = db.DB.Save(&node).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -684,7 +704,11 @@ func TestRenameNode(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNodeForTest(tx, node2, nil, nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RegisterNodeForTest(tx, nodeInvalidHostname, ptr.To(mpp("100.64.0.66/32").Addr()), nil)
|
||||
_, err = RegisterNodeForTest(tx, nodeShortHostname, ptr.To(mpp("100.64.0.67/32").Addr()), nil)
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -692,10 +716,12 @@ func TestRenameNode(t *testing.T) {
|
||||
nodes, err := db.ListNodes()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Len(t, nodes, 4)
|
||||
|
||||
t.Logf("node1 %s %s", nodes[0].Hostname, nodes[0].GivenName)
|
||||
t.Logf("node2 %s %s", nodes[1].Hostname, nodes[1].GivenName)
|
||||
t.Logf("node3 %s %s", nodes[2].Hostname, nodes[2].GivenName)
|
||||
t.Logf("node4 %s %s", nodes[3].Hostname, nodes[3].GivenName)
|
||||
|
||||
assert.Equal(t, nodes[0].Hostname, nodes[0].GivenName)
|
||||
assert.NotEqual(t, nodes[1].Hostname, nodes[1].GivenName)
|
||||
@@ -707,6 +733,10 @@ func TestRenameNode(t *testing.T) {
|
||||
assert.Len(t, nodes[1].Hostname, 4)
|
||||
assert.Len(t, nodes[0].GivenName, 4)
|
||||
assert.Len(t, nodes[1].GivenName, 13)
|
||||
assert.Contains(t, nodes[2].Hostname, "invalid-") // invalid chars
|
||||
assert.Contains(t, nodes[2].GivenName, "invalid-")
|
||||
assert.Contains(t, nodes[3].Hostname, "invalid-") // too short
|
||||
assert.Contains(t, nodes[3].GivenName, "invalid-")
|
||||
|
||||
// Nodes can be renamed to a unique name
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
@@ -716,7 +746,7 @@ func TestRenameNode(t *testing.T) {
|
||||
|
||||
nodes, err = db.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Len(t, nodes, 4)
|
||||
assert.Equal(t, "test", nodes[0].Hostname)
|
||||
assert.Equal(t, "newname", nodes[0].GivenName)
|
||||
|
||||
@@ -728,7 +758,7 @@ func TestRenameNode(t *testing.T) {
|
||||
|
||||
nodes, err = db.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, nodes, 2)
|
||||
assert.Len(t, nodes, 4)
|
||||
assert.Equal(t, "test", nodes[0].Hostname)
|
||||
assert.Equal(t, "newname", nodes[0].GivenName)
|
||||
assert.Equal(t, "test", nodes[1].GivenName)
|
||||
@@ -738,6 +768,149 @@ func TestRenameNode(t *testing.T) {
|
||||
return RenameNode(tx, nodes[0].ID, "test")
|
||||
})
|
||||
assert.ErrorContains(t, err, "name is not unique")
|
||||
|
||||
// Rename invalid chars
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename too short
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[3].ID, "a")
|
||||
})
|
||||
assert.ErrorContains(t, err, "at least 2 characters")
|
||||
|
||||
// Rename with emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "hostname-with-💩")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename with only emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "🚀")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
}
|
||||
|
||||
func TestRenameNodeComprehensive(t *testing.T) {
|
||||
db, err := newSQLiteTestDB()
|
||||
if err != nil {
|
||||
t.Fatalf("creating db: %s", err)
|
||||
}
|
||||
|
||||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
err = db.DB.Save(&node).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.DB.Transaction(func(tx *gorm.DB) error {
|
||||
_, err := RegisterNodeForTest(tx, node, nil, nil)
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := db.ListNodes()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, nodes, 1)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
newName string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "uppercase_rejected",
|
||||
newName: "User2-Host",
|
||||
wantErr: "must be lowercase",
|
||||
},
|
||||
{
|
||||
name: "underscore_rejected",
|
||||
newName: "test_node",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "at_sign_uppercase_rejected",
|
||||
newName: "Test@Host",
|
||||
wantErr: "must be lowercase",
|
||||
},
|
||||
{
|
||||
name: "at_sign_rejected",
|
||||
newName: "test@host",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_rejected",
|
||||
newName: "server-北京-01",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "chinese_only_rejected",
|
||||
newName: "我的电脑",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "emoji_with_text_rejected",
|
||||
newName: "laptop-🚀",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_rejected",
|
||||
newName: "测试💻机器",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "only_emojis_rejected",
|
||||
newName: "🎉🎊",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "only_at_signs_rejected",
|
||||
newName: "@@@",
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "starts_with_dash_rejected",
|
||||
newName: "-test",
|
||||
wantErr: "cannot start or end with a hyphen",
|
||||
},
|
||||
{
|
||||
name: "ends_with_dash_rejected",
|
||||
newName: "test-",
|
||||
wantErr: "cannot start or end with a hyphen",
|
||||
},
|
||||
{
|
||||
name: "too_long_hostname_rejected",
|
||||
newName: "this-is-a-very-long-hostname-that-exceeds-sixty-three-characters-limit",
|
||||
wantErr: "must not exceed 63 characters",
|
||||
},
|
||||
{
|
||||
name: "too_short_hostname_rejected",
|
||||
newName: "a",
|
||||
wantErr: "at least 2 characters",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, tt.newName)
|
||||
})
|
||||
assert.ErrorContains(t, err, tt.wantErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListPeers(t *testing.T) {
|
||||
|
||||
40
hscontrol/db/testdata/sqlite/headscale_0.26.1_dump_schema-to-0.27.0-old-table-cleanup.sql
vendored
Normal file
40
hscontrol/db/testdata/sqlite/headscale_0.26.1_dump_schema-to-0.27.0-old-table-cleanup.sql
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
PRAGMA foreign_keys=OFF;
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
INSERT INTO migrations VALUES('202312101416');
|
||||
INSERT INTO migrations VALUES('202312101430');
|
||||
INSERT INTO migrations VALUES('202402151347');
|
||||
INSERT INTO migrations VALUES('2024041121742');
|
||||
INSERT INTO migrations VALUES('202406021630');
|
||||
INSERT INTO migrations VALUES('202409271400');
|
||||
INSERT INTO migrations VALUES('202407191627');
|
||||
INSERT INTO migrations VALUES('202408181235');
|
||||
INSERT INTO migrations VALUES('202501221827');
|
||||
INSERT INTO migrations VALUES('202501311657');
|
||||
INSERT INTO migrations VALUES('202502070949');
|
||||
INSERT INTO migrations VALUES('202502131714');
|
||||
INSERT INTO migrations VALUES('202502171819');
|
||||
INSERT INTO migrations VALUES('202505091439');
|
||||
INSERT INTO migrations VALUES('202505141324');
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
DELETE FROM sqlite_sequence;
|
||||
INSERT INTO sqlite_sequence VALUES('nodes',0);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
|
||||
-- Create all the old tables we have had and ensure they are clean up.
|
||||
CREATE TABLE `namespaces` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,PRIMARY KEY (`id`));
|
||||
COMMIT;
|
||||
14
hscontrol/db/testdata/sqlite/headscale_0.26.1_schema-litestream.sql
vendored
Normal file
14
hscontrol/db/testdata/sqlite/headscale_0.26.1_schema-litestream.sql
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
CREATE TABLE _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);
|
||||
CREATE TABLE _litestream_lock (id INTEGER);
|
||||
@@ -26,8 +26,7 @@ func (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) {
|
||||
// CreateUser creates a new User. Returns error if could not be created
|
||||
// or another user already exists.
|
||||
func CreateUser(tx *gorm.DB, user types.User) (*types.User, error) {
|
||||
err := util.ValidateUsername(user.Name)
|
||||
if err != nil {
|
||||
if err := util.ValidateHostname(user.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tx.Create(&user).Error; err != nil {
|
||||
@@ -93,8 +92,7 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.ValidateUsername(newName)
|
||||
if err != nil {
|
||||
if err = util.ValidateHostname(newName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -185,7 +185,6 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestShuffleDERPMapEdgeCases(t *testing.T) {
|
||||
|
||||
@@ -416,9 +416,12 @@ func (api headscaleV1APIServer) ExpireNode(
|
||||
ctx context.Context,
|
||||
request *v1.ExpireNodeRequest,
|
||||
) (*v1.ExpireNodeResponse, error) {
|
||||
now := time.Now()
|
||||
expiry := time.Now()
|
||||
if request.GetExpiry() != nil {
|
||||
expiry = request.GetExpiry().AsTime()
|
||||
}
|
||||
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), now)
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), expiry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -741,7 +744,7 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
hostinfo := tailcfg.Hostinfo{
|
||||
RoutableIPs: routes,
|
||||
OS: "TestOS",
|
||||
Hostname: "DebugTestNode",
|
||||
Hostname: request.GetName(),
|
||||
}
|
||||
|
||||
registrationId, err := types.RegistrationIDFromString(request.GetKey())
|
||||
@@ -749,8 +752,8 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newNode := types.RegisterNode{
|
||||
Node: types.Node{
|
||||
newNode := types.NewRegisterNode(
|
||||
types.Node{
|
||||
NodeKey: key.NewNode().Public(),
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
Hostname: request.GetName(),
|
||||
@@ -761,8 +764,7 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
|
||||
Hostinfo: &hostinfo,
|
||||
},
|
||||
Registered: make(chan *types.Node),
|
||||
}
|
||||
)
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
@@ -774,4 +776,24 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
||||
return &v1.DebugCreateNodeResponse{Node: newNode.Node.Proto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) Health(
|
||||
ctx context.Context,
|
||||
request *v1.HealthRequest,
|
||||
) (*v1.HealthResponse, error) {
|
||||
var healthErr error
|
||||
response := &v1.HealthResponse{}
|
||||
|
||||
if err := api.h.state.PingDB(ctx); err != nil {
|
||||
healthErr = fmt.Errorf("database ping failed: %w", err)
|
||||
} else {
|
||||
response.DatabaseConnectivity = true
|
||||
}
|
||||
|
||||
if healthErr != nil {
|
||||
log.Error().Err(healthErr).Msg("Health check failed")
|
||||
}
|
||||
|
||||
return response, healthErr
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
|
||||
@@ -201,6 +201,24 @@ func (h *Headscale) RobotsHandler(
|
||||
}
|
||||
}
|
||||
|
||||
// VersionHandler returns version information about the Headscale server
|
||||
// Listens in /version.
|
||||
func (h *Headscale) VersionHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
versionInfo := types.GetVersionInfo()
|
||||
if err := json.NewEncoder(writer).Encode(versionInfo); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write version response")
|
||||
}
|
||||
}
|
||||
|
||||
var codeStyleRegisterWebAPI = styles.Props{
|
||||
styles.Display: "block",
|
||||
styles.Padding: "20px",
|
||||
|
||||
@@ -88,16 +88,9 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
|
||||
// TODO(kradalby): This can potentially be a peer update of the old and new subnet router.
|
||||
mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
} else {
|
||||
// CRITICAL FIX: Read actual online status from NodeStore when available,
|
||||
// fall back to deriving from change type for unit tests or when NodeStore is empty
|
||||
var onlineStatus bool
|
||||
if node, found := mapper.state.GetNodeByID(c.NodeID); found && node.IsOnline().Valid() {
|
||||
// Use actual NodeStore status when available (production case)
|
||||
onlineStatus = node.IsOnline().Get()
|
||||
} else {
|
||||
// Fall back to deriving from change type (unit test case or initial setup)
|
||||
onlineStatus = c.Change == change.NodeCameOnline
|
||||
}
|
||||
// Trust the change type for online/offline status to avoid race conditions
|
||||
// between NodeStore updates and change processing
|
||||
onlineStatus := c.Change == change.NodeCameOnline
|
||||
|
||||
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
|
||||
{
|
||||
@@ -108,11 +101,33 @@ func generateMapResponse(nodeID types.NodeID, version tailcfg.CapabilityVersion,
|
||||
}
|
||||
|
||||
case change.NodeNewOrUpdate:
|
||||
mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
// If the node is the one being updated, we send a self update that preserves peer information
|
||||
// to ensure the node sees changes to its own properties (e.g., hostname/DNS name changes)
|
||||
// without losing its view of peer status during rapid reconnection cycles
|
||||
if c.IsSelfUpdate(nodeID) {
|
||||
mapResp, err = mapper.selfMapResponse(nodeID, version)
|
||||
} else {
|
||||
mapResp, err = mapper.peerChangeResponse(nodeID, version, c.NodeID)
|
||||
}
|
||||
|
||||
case change.NodeRemove:
|
||||
mapResp, err = mapper.peerRemovedResponse(nodeID, c.NodeID)
|
||||
|
||||
case change.NodeKeyExpiry:
|
||||
// If the node is the one whose key is expiring, we send a "full" self update
|
||||
// as nodes will ignore patch updates about themselves (?).
|
||||
if c.IsSelfUpdate(nodeID) {
|
||||
mapResp, err = mapper.selfMapResponse(nodeID, version)
|
||||
// mapResp, err = mapper.fullMapResponse(nodeID, version)
|
||||
} else {
|
||||
mapResp, err = mapper.peerChangedPatchResponse(nodeID, []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: c.NodeID.NodeID(),
|
||||
KeyExpiry: c.NodeExpiry,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
default:
|
||||
// The following will always hit this:
|
||||
// change.Full, change.Policy
|
||||
|
||||
@@ -73,7 +73,6 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
|
||||
|
||||
// Use the worker pool for controlled concurrency instead of direct generation
|
||||
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
|
||||
|
||||
if err != nil {
|
||||
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
|
||||
nodeConn.removeConnectionByChannel(c)
|
||||
@@ -602,7 +601,7 @@ func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
|
||||
|
||||
mc.updateCount.Add(1)
|
||||
|
||||
log.Info().Uint64("node.id", mc.id.Uint64()).
|
||||
log.Debug().Uint64("node.id", mc.id.Uint64()).
|
||||
Int("successful_sends", successCount).
|
||||
Int("failed_connections", len(failedConnections)).
|
||||
Int("remaining_connections", len(mc.connections)).
|
||||
|
||||
@@ -1028,7 +1028,9 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
|
||||
|
||||
// Add multiple changes rapidly to test batching
|
||||
batcher.AddWork(change.DERPSet)
|
||||
batcher.AddWork(change.KeyExpiry(testNodes[1].n.ID))
|
||||
// Use a valid expiry time for testing since test nodes don't have expiry set
|
||||
testExpiry := time.Now().Add(24 * time.Hour)
|
||||
batcher.AddWork(change.KeyExpiry(testNodes[1].n.ID, testExpiry))
|
||||
batcher.AddWork(change.DERPSet)
|
||||
batcher.AddWork(change.NodeAdded(testNodes[1].n.ID))
|
||||
batcher.AddWork(change.DERPSet)
|
||||
@@ -1278,7 +1280,9 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
||||
|
||||
// Add node-specific work occasionally
|
||||
if i%10 == 0 {
|
||||
batcher.AddWork(change.KeyExpiry(testNode.n.ID))
|
||||
// Use a valid expiry time for testing since test nodes don't have expiry set
|
||||
testExpiry := time.Now().Add(24 * time.Hour)
|
||||
batcher.AddWork(change.KeyExpiry(testNode.n.ID, testExpiry))
|
||||
}
|
||||
|
||||
// Rapid removal creates race between worker and removal
|
||||
@@ -1493,7 +1497,9 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
||||
if i%7 == 0 && len(allNodes) > 0 {
|
||||
// Node-specific changes using real nodes
|
||||
node := allNodes[i%len(allNodes)]
|
||||
batcher.AddWork(change.KeyExpiry(node.n.ID))
|
||||
// Use a valid expiry time for testing since test nodes don't have expiry set
|
||||
testExpiry := time.Now().Add(24 * time.Hour)
|
||||
batcher.AddWork(change.KeyExpiry(node.n.ID, testExpiry))
|
||||
}
|
||||
|
||||
// Small delay to allow some batching
|
||||
|
||||
@@ -28,6 +28,7 @@ type debugType string
|
||||
|
||||
const (
|
||||
fullResponseDebug debugType = "full"
|
||||
selfResponseDebug debugType = "self"
|
||||
patchResponseDebug debugType = "patch"
|
||||
removeResponseDebug debugType = "remove"
|
||||
changeResponseDebug debugType = "change"
|
||||
@@ -68,24 +69,17 @@ func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVers
|
||||
|
||||
// WithSelfNode adds the requesting node to the response.
|
||||
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
nodeView, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
if !ok {
|
||||
b.addError(errors.New("node not found"))
|
||||
return b
|
||||
}
|
||||
|
||||
// Always use batcher's view of online status for self node
|
||||
// The batcher respects grace periods for logout scenarios
|
||||
node := nodeView.AsStruct()
|
||||
// if b.mapper.batcher != nil {
|
||||
// node.IsOnline = ptr.To(b.mapper.batcher.IsConnected(b.nodeID))
|
||||
// }
|
||||
|
||||
_, matchers := b.mapper.state.Filter()
|
||||
tailnode, err := tailNode(
|
||||
node.View(), b.capVer, b.mapper.state,
|
||||
nv, b.capVer, b.mapper.state,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(node.View(), b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
@@ -186,14 +180,21 @@ func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
filter, _ := b.mapper.state.Filter()
|
||||
// FilterForNode returns rules already reduced to only those relevant for this node.
|
||||
// For autogroup:self policies, it returns per-node compiled rules.
|
||||
// For global policies, it returns the global filter reduced for this node.
|
||||
filter, err := b.mapper.state.FilterForNode(node)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
return b
|
||||
}
|
||||
|
||||
// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
|
||||
// Currently, we do not send incremental package filters, however using the
|
||||
// new PacketFilters field and "base" allows us to send a full update when we
|
||||
// have to send an empty list, avoiding the hack in the else block.
|
||||
b.resp.PacketFilters = map[string][]tailcfg.FilterRule{
|
||||
"base": policy.ReduceFilterRules(node, filter),
|
||||
"base": filter,
|
||||
}
|
||||
|
||||
return b
|
||||
@@ -232,12 +233,19 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) (
|
||||
return nil, errors.New("node not found")
|
||||
}
|
||||
|
||||
filter, matchers := b.mapper.state.Filter()
|
||||
// Get unreduced matchers for peer relationship determination.
|
||||
// MatchersForNode returns unreduced matchers that include all rules where the node
|
||||
// could be either source or destination. This is different from FilterForNode which
|
||||
// returns reduced rules for packet filtering (only rules where node is destination).
|
||||
matchers, err := b.mapper.state.MatchersForNode(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are filter rules present, see if there are any nodes that cannot
|
||||
// access each-other at all and remove them from the peers.
|
||||
var changedViews views.Slice[types.NodeView]
|
||||
if len(filter) > 0 {
|
||||
if len(matchers) > 0 {
|
||||
changedViews = policy.ReduceNodes(node, peers, matchers)
|
||||
} else {
|
||||
changedViews = peers
|
||||
|
||||
@@ -158,6 +158,26 @@ func (m *mapper) fullMapResponse(
|
||||
Build()
|
||||
}
|
||||
|
||||
func (m *mapper) selfMapResponse(
|
||||
nodeID types.NodeID,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
ma, err := m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(selfResponseDebug).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithSelfNode().
|
||||
Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the peers to nil, to ensure the node does not think
|
||||
// its getting a new list.
|
||||
ma.Peers = nil
|
||||
|
||||
return ma, err
|
||||
}
|
||||
|
||||
func (m *mapper) derpMapResponse(
|
||||
nodeID types.NodeID,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
@@ -190,7 +210,6 @@ func (m *mapper) peerChangeResponse(
|
||||
return m.NewMapResponseBuilder(nodeID).
|
||||
WithDebugType(changeResponseDebug).
|
||||
WithCapabilityVersion(capVer).
|
||||
WithSelfNode().
|
||||
WithUserProfiles(peers).
|
||||
WithPeerChanges(peers).
|
||||
Build()
|
||||
|
||||
@@ -331,6 +331,12 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
||||
verb := "Reauthenticated"
|
||||
newNode, err := a.handleRegistration(user, *registrationId, nodeExpiry)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNodeNotFoundRegistrationCache) {
|
||||
log.Debug().Caller().Str("registration_id", registrationId.String()).Msg("registration session expired before authorization completed")
|
||||
httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", err))
|
||||
|
||||
return
|
||||
}
|
||||
httpError(writer, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -13,6 +13,12 @@ import (
|
||||
type PolicyManager interface {
|
||||
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
|
||||
Filter() ([]tailcfg.FilterRule, []matcher.Match)
|
||||
// FilterForNode returns filter rules for a specific node, handling autogroup:self
|
||||
FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
|
||||
// MatchersForNode returns matchers for peer relationship determination (unreduced)
|
||||
MatchersForNode(node types.NodeView) ([]matcher.Match, error)
|
||||
// BuildPeerMap constructs peer relationship maps for the given nodes
|
||||
BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView
|
||||
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
|
||||
SetPolicy([]byte) (bool, error)
|
||||
SetUsers(users []types.User) (bool, error)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
@@ -79,66 +78,6 @@ func BuildPeerMap(
|
||||
return ret
|
||||
}
|
||||
|
||||
// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations
|
||||
// that are not relevant to that particular node.
|
||||
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
|
||||
ret := []tailcfg.FilterRule{}
|
||||
|
||||
for _, rule := range rules {
|
||||
// record if the rule is actually relevant for the given node.
|
||||
var dests []tailcfg.NetPortRange
|
||||
DEST_LOOP:
|
||||
for _, dest := range rule.DstPorts {
|
||||
expanded, err := util.ParseIPSet(dest.IP, nil)
|
||||
// Fail closed, if we can't parse it, then we should not allow
|
||||
// access.
|
||||
if err != nil {
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
if node.InIPSet(expanded) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
// If the node exposes routes, ensure they are note removed
|
||||
// when the filters are reduced.
|
||||
if node.Hostinfo().Valid() {
|
||||
routableIPs := node.Hostinfo().RoutableIPs()
|
||||
if routableIPs.Len() > 0 {
|
||||
for _, routableIP := range routableIPs.All() {
|
||||
if expanded.OverlapsPrefix(routableIP) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also check approved subnet routes - nodes should have access
|
||||
// to subnets they're approved to route traffic for.
|
||||
subnetRoutes := node.SubnetRoutes()
|
||||
|
||||
for _, subnetRoute := range subnetRoutes {
|
||||
if expanded.OverlapsPrefix(subnetRoute) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(dests) > 0 {
|
||||
ret = append(ret, tailcfg.FilterRule{
|
||||
SrcIPs: rule.SrcIPs,
|
||||
DstPorts: dests,
|
||||
IPProto: rule.IPProto,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
|
||||
// and returns the new list of approved routes.
|
||||
// The approved routes will include:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
@@ -11,12 +10,9 @@ import (
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
var ap = func(ipStr string) *netip.Addr {
|
||||
@@ -29,817 +25,6 @@ var p = func(prefStr string) netip.Prefix {
|
||||
return ip
|
||||
}
|
||||
|
||||
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
|
||||
// we use headscale "autogroup:internet".
|
||||
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
|
||||
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
|
||||
}
|
||||
|
||||
func TestTheInternet(t *testing.T) {
|
||||
internetSet := util.TheInternet()
|
||||
|
||||
internetPrefs := internetSet.Prefixes()
|
||||
|
||||
for i := range internetPrefs {
|
||||
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
|
||||
t.Errorf(
|
||||
"prefix from internet set %q != hsExit list %q",
|
||||
internetPrefs[i].String(),
|
||||
hsExitNodeDestForTest[i].IP,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if len(internetPrefs) != len(hsExitNodeDestForTest) {
|
||||
t.Fatalf(
|
||||
"expected same length of prefixes, internet: %d, hsExit: %d",
|
||||
len(internetPrefs),
|
||||
len(hsExitNodeDestForTest),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceFilterRules(t *testing.T) {
|
||||
users := types.Users{
|
||||
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
|
||||
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
|
||||
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
|
||||
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
|
||||
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
node *types.Node
|
||||
peers types.Nodes
|
||||
pol string
|
||||
want []tailcfg.FilterRule
|
||||
}{
|
||||
{
|
||||
name: "host1-can-reach-host2-no-rules",
|
||||
pol: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"100.64.0.1"
|
||||
],
|
||||
"dst": [
|
||||
"100.64.0.2:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
|
||||
User: users[0],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
|
||||
User: users[0],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
{
|
||||
name: "1604-subnet-routers-are-preserved",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:admins": [
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:admins"
|
||||
],
|
||||
"dst": [
|
||||
"group:admins:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:admins"
|
||||
],
|
||||
"dst": [
|
||||
"10.33.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.33.0.0/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
"100.64.0.2/32",
|
||||
"fd7a:115c:a1e0::1/128",
|
||||
"fd7a:115c:a1e0::2/128",
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.1/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::1/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
"100.64.0.2/32",
|
||||
"fd7a:115c:a1e0::1/128",
|
||||
"fd7a:115c:a1e0::2/128",
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "10.33.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-client",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
// "internal" exit node
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-exit",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: hsExitNodeDestForTest,
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"0.0.0.0/5:*",
|
||||
"8.0.0.0/7:*",
|
||||
"11.0.0.0/8:*",
|
||||
"12.0.0.0/6:*",
|
||||
"16.0.0.0/4:*",
|
||||
"32.0.0.0/3:*",
|
||||
"64.0.0.0/2:*",
|
||||
"128.0.0.0/3:*",
|
||||
"160.0.0.0/5:*",
|
||||
"168.0.0.0/6:*",
|
||||
"172.0.0.0/12:*",
|
||||
"172.32.0.0/11:*",
|
||||
"172.64.0.0/10:*",
|
||||
"172.128.0.0/9:*",
|
||||
"173.0.0.0/8:*",
|
||||
"174.0.0.0/7:*",
|
||||
"176.0.0.0/4:*",
|
||||
"192.0.0.0/9:*",
|
||||
"192.128.0.0/11:*",
|
||||
"192.160.0.0/13:*",
|
||||
"192.169.0.0/16:*",
|
||||
"192.170.0.0/15:*",
|
||||
"192.172.0.0/14:*",
|
||||
"192.176.0.0/12:*",
|
||||
"192.192.0.0/10:*",
|
||||
"193.0.0.0/8:*",
|
||||
"194.0.0.0/7:*",
|
||||
"196.0.0.0/6:*",
|
||||
"200.0.0.0/5:*",
|
||||
"208.0.0.0/4:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"8.0.0.0/8:*",
|
||||
"16.0.0.0/8:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/8",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "16.0.0.0/8",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"8.0.0.0/16:*",
|
||||
"16.0.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "16.0.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1817-reduce-breaks-32-mask",
|
||||
pol: `
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:access-servers": ["user100@"],
|
||||
},
|
||||
"groups": {
|
||||
"group:access": [
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"dns1": "172.16.0.21/32",
|
||||
"vlan1": "172.16.0.0/24"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:access"
|
||||
],
|
||||
"dst": [
|
||||
"tag:access-servers:*",
|
||||
"dns1:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
|
||||
},
|
||||
ForcedTags: []string{"tag:access-servers"},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "172.16.0.21/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2365-only-route-policy",
|
||||
pol: `
|
||||
{
|
||||
"hosts": {
|
||||
"router": "100.64.0.1/32",
|
||||
"node": "100.64.0.2/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"*"
|
||||
],
|
||||
"dst": [
|
||||
"router:8000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"node"
|
||||
],
|
||||
"dst": [
|
||||
"172.26.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[3],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
|
||||
},
|
||||
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.pol)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm PolicyManager
|
||||
var err error
|
||||
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
|
||||
require.NoError(t, err)
|
||||
got, _ := pm.Filter()
|
||||
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
|
||||
got = ReduceFilterRules(tt.node.View(), got)
|
||||
|
||||
if diff := cmp.Diff(tt.want, got); diff != "" {
|
||||
log.Trace().Interface("got", got).Msg("result")
|
||||
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceNodes(t *testing.T) {
|
||||
type args struct {
|
||||
nodes types.Nodes
|
||||
|
||||
71
hscontrol/policy/policyutil/reduce.go
Normal file
71
hscontrol/policy/policyutil/reduce.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package policyutil
|
||||
|
||||
import (
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// ReduceFilterRules takes a node and a set of global filter rules and removes all rules
|
||||
// and destinations that are not relevant to that particular node.
|
||||
//
|
||||
// IMPORTANT: This function is designed for global filters only. Per-node filters
|
||||
// (from autogroup:self policies) are already node-specific and should not be passed
|
||||
// to this function. Use PolicyManager.FilterForNode() instead, which handles both cases.
|
||||
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
|
||||
ret := []tailcfg.FilterRule{}
|
||||
|
||||
for _, rule := range rules {
|
||||
// record if the rule is actually relevant for the given node.
|
||||
var dests []tailcfg.NetPortRange
|
||||
DEST_LOOP:
|
||||
for _, dest := range rule.DstPorts {
|
||||
expanded, err := util.ParseIPSet(dest.IP, nil)
|
||||
// Fail closed, if we can't parse it, then we should not allow
|
||||
// access.
|
||||
if err != nil {
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
if node.InIPSet(expanded) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
|
||||
// If the node exposes routes, ensure they are note removed
|
||||
// when the filters are reduced.
|
||||
if node.Hostinfo().Valid() {
|
||||
routableIPs := node.Hostinfo().RoutableIPs()
|
||||
if routableIPs.Len() > 0 {
|
||||
for _, routableIP := range routableIPs.All() {
|
||||
if expanded.OverlapsPrefix(routableIP) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also check approved subnet routes - nodes should have access
|
||||
// to subnets they're approved to route traffic for.
|
||||
subnetRoutes := node.SubnetRoutes()
|
||||
|
||||
for _, subnetRoute := range subnetRoutes {
|
||||
if expanded.OverlapsPrefix(subnetRoute) {
|
||||
dests = append(dests, dest)
|
||||
continue DEST_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(dests) > 0 {
|
||||
ret = append(ret, tailcfg.FilterRule{
|
||||
SrcIPs: rule.SrcIPs,
|
||||
DstPorts: dests,
|
||||
IPProto: rule.IPProto,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
841
hscontrol/policy/policyutil/reduce_test.go
Normal file
841
hscontrol/policy/policyutil/reduce_test.go
Normal file
@@ -0,0 +1,841 @@
|
||||
package policyutil_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
var ap = func(ipStr string) *netip.Addr {
|
||||
ip := netip.MustParseAddr(ipStr)
|
||||
return &ip
|
||||
}
|
||||
|
||||
var p = func(prefStr string) netip.Prefix {
|
||||
ip := netip.MustParsePrefix(prefStr)
|
||||
return ip
|
||||
}
|
||||
|
||||
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
|
||||
// we use headscale "autogroup:internet".
|
||||
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
|
||||
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
|
||||
}
|
||||
|
||||
func TestTheInternet(t *testing.T) {
|
||||
internetSet := util.TheInternet()
|
||||
|
||||
internetPrefs := internetSet.Prefixes()
|
||||
|
||||
for i := range internetPrefs {
|
||||
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
|
||||
t.Errorf(
|
||||
"prefix from internet set %q != hsExit list %q",
|
||||
internetPrefs[i].String(),
|
||||
hsExitNodeDestForTest[i].IP,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if len(internetPrefs) != len(hsExitNodeDestForTest) {
|
||||
t.Fatalf(
|
||||
"expected same length of prefixes, internet: %d, hsExit: %d",
|
||||
len(internetPrefs),
|
||||
len(hsExitNodeDestForTest),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReduceFilterRules(t *testing.T) {
|
||||
users := types.Users{
|
||||
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
|
||||
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
|
||||
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
|
||||
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
|
||||
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
node *types.Node
|
||||
peers types.Nodes
|
||||
pol string
|
||||
want []tailcfg.FilterRule
|
||||
}{
|
||||
{
|
||||
name: "host1-can-reach-host2-no-rules",
|
||||
pol: `
|
||||
{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"100.64.0.1"
|
||||
],
|
||||
"dst": [
|
||||
"100.64.0.2:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
|
||||
User: users[0],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
|
||||
User: users[0],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
{
|
||||
name: "1604-subnet-routers-are-preserved",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:admins": [
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:admins"
|
||||
],
|
||||
"dst": [
|
||||
"group:admins:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:admins"
|
||||
],
|
||||
"dst": [
|
||||
"10.33.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("10.33.0.0/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
"100.64.0.2/32",
|
||||
"fd7a:115c:a1e0::1/128",
|
||||
"fd7a:115c:a1e0::2/128",
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.1/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::1/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{
|
||||
"100.64.0.1/32",
|
||||
"100.64.0.2/32",
|
||||
"fd7a:115c:a1e0::1/128",
|
||||
"fd7a:115c:a1e0::2/128",
|
||||
},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "10.33.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-client",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
// "internal" exit node
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-exit",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"autogroup:internet:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: hsExitNodeDestForTest,
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"0.0.0.0/5:*",
|
||||
"8.0.0.0/7:*",
|
||||
"11.0.0.0/8:*",
|
||||
"12.0.0.0/6:*",
|
||||
"16.0.0.0/4:*",
|
||||
"32.0.0.0/3:*",
|
||||
"64.0.0.0/2:*",
|
||||
"128.0.0.0/3:*",
|
||||
"160.0.0.0/5:*",
|
||||
"168.0.0.0/6:*",
|
||||
"172.0.0.0/12:*",
|
||||
"172.32.0.0/11:*",
|
||||
"172.64.0.0/10:*",
|
||||
"172.128.0.0/9:*",
|
||||
"173.0.0.0/8:*",
|
||||
"174.0.0.0/7:*",
|
||||
"176.0.0.0/4:*",
|
||||
"192.0.0.0/9:*",
|
||||
"192.128.0.0/11:*",
|
||||
"192.160.0.0/13:*",
|
||||
"192.169.0.0/16:*",
|
||||
"192.170.0.0/15:*",
|
||||
"192.172.0.0/14:*",
|
||||
"192.176.0.0/12:*",
|
||||
"192.192.0.0/10:*",
|
||||
"193.0.0.0/8:*",
|
||||
"194.0.0.0/7:*",
|
||||
"196.0.0.0/6:*",
|
||||
"200.0.0.0/5:*",
|
||||
"208.0.0.0/4:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: tsaddr.ExitRoutes(),
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"8.0.0.0/8:*",
|
||||
"16.0.0.0/8:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/8",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "16.0.0.0/8",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
|
||||
pol: `
|
||||
{
|
||||
"groups": {
|
||||
"group:team": [
|
||||
"user3@",
|
||||
"user2@",
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"internal": "100.64.0.100/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"internal:*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:team"
|
||||
],
|
||||
"dst": [
|
||||
"8.0.0.0/16:*",
|
||||
"16.0.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
|
||||
},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[2],
|
||||
},
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "8.0.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "16.0.0.0/16",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1817-reduce-breaks-32-mask",
|
||||
pol: `
|
||||
{
|
||||
"tagOwners": {
|
||||
"tag:access-servers": ["user100@"],
|
||||
},
|
||||
"groups": {
|
||||
"group:access": [
|
||||
"user1@"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"dns1": "172.16.0.21/32",
|
||||
"vlan1": "172.16.0.0/24"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"proto": "",
|
||||
"src": [
|
||||
"group:access"
|
||||
],
|
||||
"dst": [
|
||||
"tag:access-servers:*",
|
||||
"dns1:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.100"),
|
||||
IPv6: ap("fd7a:115c:a1e0::100"),
|
||||
User: users[3],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
|
||||
},
|
||||
ForcedTags: []string{"tag:access-servers"},
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
|
||||
DstPorts: []tailcfg.NetPortRange{
|
||||
{
|
||||
IP: "100.64.0.100/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "fd7a:115c:a1e0::100/128",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
{
|
||||
IP: "172.16.0.21/32",
|
||||
Ports: tailcfg.PortRangeAny,
|
||||
},
|
||||
},
|
||||
IPProto: []int{6, 17},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2365-only-route-policy",
|
||||
pol: `
|
||||
{
|
||||
"hosts": {
|
||||
"router": "100.64.0.1/32",
|
||||
"node": "100.64.0.2/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"*"
|
||||
],
|
||||
"dst": [
|
||||
"router:8000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": [
|
||||
"node"
|
||||
],
|
||||
"dst": [
|
||||
"172.26.0.0/16:*"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
`,
|
||||
node: &types.Node{
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: users[3],
|
||||
},
|
||||
peers: types.Nodes{
|
||||
&types.Node{
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: users[1],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
|
||||
},
|
||||
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
|
||||
},
|
||||
},
|
||||
want: []tailcfg.FilterRule{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {
|
||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
||||
var pm policy.PolicyManager
|
||||
var err error
|
||||
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
|
||||
require.NoError(t, err)
|
||||
got, _ := pm.Filter()
|
||||
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
|
||||
got = policyutil.ReduceFilterRules(tt.node.View(), got)
|
||||
|
||||
if diff := cmp.Diff(tt.want, got); diff != "" {
|
||||
log.Trace().Interface("got", got).Msg("result")
|
||||
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,6 +82,159 @@ func (pol *Policy) compileFilterRules(
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// compileFilterRulesForNode compiles filter rules for a specific node.
|
||||
func (pol *Policy) compileFilterRulesForNode(
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
if pol == nil {
|
||||
return tailcfg.FilterAllowAll, nil
|
||||
}
|
||||
|
||||
var rules []tailcfg.FilterRule
|
||||
|
||||
for _, acl := range pol.ACLs {
|
||||
if acl.Action != ActionAccept {
|
||||
return nil, ErrInvalidAction
|
||||
}
|
||||
|
||||
rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("compiling ACL")
|
||||
continue
|
||||
}
|
||||
|
||||
if rule != nil {
|
||||
rules = append(rules, *rule)
|
||||
}
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
|
||||
// autogroup:self per-node while supporting all other alias types normally.
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) (*tailcfg.FilterRule, error) {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var srcIPs netipx.IPSetBuilder
|
||||
|
||||
// Resolve sources to only include devices from the same user as the target node.
|
||||
for _, src := range acl.Sources {
|
||||
// autogroup:self is not allowed in sources
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
|
||||
}
|
||||
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving source ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips != nil {
|
||||
if hasAutogroupSelfInDst {
|
||||
// Instead of iterating all addresses (which could be millions),
|
||||
// check each node's IPs against the source set
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No autogroup:self in destination, use all resolved sources
|
||||
srcIPs.AddSet(ips)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcSet, err := srcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcSet == nil || len(srcSet.Prefixes()) == 0 {
|
||||
// No sources resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
protocols, _ := acl.Protocol.parseProtocol()
|
||||
|
||||
var destPorts []tailcfg.NetPortRange
|
||||
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
for _, port := range dest.Ports {
|
||||
for _, ip := range n.IPs() {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: ip.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := dest.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
|
||||
if ips == nil {
|
||||
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
|
||||
continue
|
||||
}
|
||||
|
||||
prefixes := ips.Prefixes()
|
||||
|
||||
for _, pref := range prefixes {
|
||||
for _, port := range dest.Ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: pref.String(),
|
||||
Ports: port,
|
||||
}
|
||||
destPorts = append(destPorts, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(destPorts) == 0 {
|
||||
// No destinations resolved, skip this rule
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
return &tailcfg.FilterRule{
|
||||
SrcIPs: ipSetToPrefixStringList(srcSet),
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
||||
return tailcfg.SSHAction{
|
||||
Reject: !accept,
|
||||
@@ -107,13 +260,38 @@ func (pol *Policy) compileSSHPolicy(
|
||||
var rules []*tailcfg.SSHRule
|
||||
|
||||
for index, rule := range pol.SSHs {
|
||||
// Check if any destination uses autogroup:self
|
||||
hasAutogroupSelfInDst := false
|
||||
for _, dst := range rule.Destinations {
|
||||
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
hasAutogroupSelfInDst = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If autogroup:self is used, skip tagged nodes
|
||||
if hasAutogroupSelfInDst && node.IsTagged() {
|
||||
continue
|
||||
}
|
||||
|
||||
var dest netipx.IPSetBuilder
|
||||
for _, src := range rule.Destinations {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
// Handle autogroup:self specially
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
// For autogroup:self, only include the target user's untagged devices
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
n.AppendToIPSet(&dest)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ips, err := src.Resolve(pol, users, nodes)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
|
||||
continue
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
dest.AddSet(ips)
|
||||
}
|
||||
|
||||
destSet, err := dest.IPSet()
|
||||
@@ -142,6 +320,33 @@ func (pol *Policy) compileSSHPolicy(
|
||||
continue // Skip this rule if we can't resolve sources
|
||||
}
|
||||
|
||||
// If autogroup:self is in destinations, filter sources to same user only
|
||||
if hasAutogroupSelfInDst {
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
// Instead of iterating all addresses, check each node's IPs
|
||||
for _, n := range nodes.All() {
|
||||
if n.User().ID == node.User().ID && !n.IsTagged() {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcIPs, err = filteredSrcIPs.IPSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
|
||||
// No valid sources after filtering, skip this rule
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range util.IPSetAddrIter(srcIPs) {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: addr.String(),
|
||||
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,6 +16,14 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// aliasWithPorts creates an AliasWithPorts structure from an alias and ports.
|
||||
func aliasWithPorts(alias Alias, ports ...tailcfg.PortRange) AliasWithPorts {
|
||||
return AliasWithPorts{
|
||||
Alias: alias,
|
||||
Ports: ports,
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsing(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "testuser"},
|
||||
@@ -786,8 +795,547 @@ func TestSSHJSONSerialization(t *testing.T) {
|
||||
assert.NotContains(t, string(jsonData), `"sshUsers": null`, "SSH users should not be null")
|
||||
}
|
||||
|
||||
func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{
|
||||
User: users[0],
|
||||
IPv4: ap("100.64.0.1"),
|
||||
},
|
||||
{
|
||||
User: users[0],
|
||||
IPv4: ap("100.64.0.2"),
|
||||
},
|
||||
{
|
||||
User: users[1],
|
||||
IPv4: ap("100.64.0.3"),
|
||||
},
|
||||
{
|
||||
User: users[1],
|
||||
IPv4: ap("100.64.0.4"),
|
||||
},
|
||||
// Tagged device for user1
|
||||
{
|
||||
User: users[0],
|
||||
IPv4: ap("100.64.0.5"),
|
||||
ForcedTags: []string{"tag:test"},
|
||||
},
|
||||
// Tagged device for user2
|
||||
{
|
||||
User: users[1],
|
||||
IPv4: ap("100.64.0.6"),
|
||||
ForcedTags: []string{"tag:test"},
|
||||
},
|
||||
}
|
||||
|
||||
// Test: Tailscale intended usage pattern (autogroup:member + autogroup:self)
|
||||
policy2 := &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []Alias{agp("autogroup:member")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy2.validate()
|
||||
if err != nil {
|
||||
t.Fatalf("policy validation failed: %v", err)
|
||||
}
|
||||
|
||||
// Test compilation for user1's first node
|
||||
node1 := nodes[0].View()
|
||||
|
||||
rules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(rules) != 1 {
|
||||
t.Fatalf("expected 1 rule, got %d", len(rules))
|
||||
}
|
||||
|
||||
// Check that the rule includes:
|
||||
// - Sources: only user1's untagged devices (filtered by autogroup:self semantics)
|
||||
// - Destinations: only user1's untagged devices (autogroup:self)
|
||||
rule := rules[0]
|
||||
|
||||
// Sources should ONLY include user1's untagged devices (100.64.0.1, 100.64.0.2)
|
||||
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
|
||||
|
||||
for _, expectedIP := range expectedSourceIPs {
|
||||
found := false
|
||||
|
||||
addr := netip.MustParseAddr(expectedIP)
|
||||
for _, prefix := range rule.SrcIPs {
|
||||
pref := netip.MustParsePrefix(prefix)
|
||||
if pref.Contains(addr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("expected source IP %s to be covered by generated prefixes %v", expectedIP, rule.SrcIPs)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that other users' devices and tagged devices are not included in sources
|
||||
excludedSourceIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
|
||||
for _, excludedIP := range excludedSourceIPs {
|
||||
addr := netip.MustParseAddr(excludedIP)
|
||||
for _, prefix := range rule.SrcIPs {
|
||||
pref := netip.MustParsePrefix(prefix)
|
||||
if pref.Contains(addr) {
|
||||
t.Errorf("SECURITY VIOLATION: source IP %s should not be included but found in prefix %s", excludedIP, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expectedDestIPs := []string{"100.64.0.1", "100.64.0.2"}
|
||||
|
||||
actualDestIPs := make([]string, 0, len(rule.DstPorts))
|
||||
for _, dst := range rule.DstPorts {
|
||||
actualDestIPs = append(actualDestIPs, dst.IP)
|
||||
}
|
||||
|
||||
for _, expectedIP := range expectedDestIPs {
|
||||
found := false
|
||||
|
||||
for _, actualIP := range actualDestIPs {
|
||||
if actualIP == expectedIP {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("expected destination IP %s to be included, got: %v", expectedIP, actualDestIPs)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that other users' devices and tagged devices are not in destinations
|
||||
excludedDestIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
|
||||
for _, excludedIP := range excludedDestIPs {
|
||||
for _, actualIP := range actualDestIPs {
|
||||
if actualIP == excludedIP {
|
||||
t.Errorf("SECURITY: destination IP %s should not be included but found in destinations", excludedIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutogroupSelfInSourceIsRejected(t *testing.T) {
|
||||
// Test that autogroup:self cannot be used in sources (per Tailscale spec)
|
||||
policy := &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []Alias{agp("autogroup:self")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(agp("autogroup:member"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
if err == nil {
|
||||
t.Error("expected validation error when using autogroup:self in sources")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "autogroup:self") {
|
||||
t.Errorf("expected error message to mention autogroup:self, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAutogroupSelfWithSpecificUserSource verifies that when autogroup:self is in
|
||||
// the destination and a specific user is in the source, only that user's devices
|
||||
// are allowed (and only if they match the target user).
|
||||
func TestAutogroupSelfWithSpecificUserSource(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1")},
|
||||
{User: users[0], IPv4: ap("100.64.0.2")},
|
||||
{User: users[1], IPv4: ap("100.64.0.3")},
|
||||
{User: users[1], IPv4: ap("100.64.0.4")},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []Alias{up("user1@")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For user1's node: sources should be user1's devices
|
||||
node1 := nodes[0].View()
|
||||
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rules, 1)
|
||||
|
||||
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
|
||||
for _, expectedIP := range expectedSourceIPs {
|
||||
found := false
|
||||
addr := netip.MustParseAddr(expectedIP)
|
||||
|
||||
for _, prefix := range rules[0].SrcIPs {
|
||||
pref := netip.MustParsePrefix(prefix)
|
||||
if pref.Contains(addr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, found, "expected source IP %s to be present", expectedIP)
|
||||
}
|
||||
|
||||
actualDestIPs := make([]string, 0, len(rules[0].DstPorts))
|
||||
for _, dst := range rules[0].DstPorts {
|
||||
actualDestIPs = append(actualDestIPs, dst.IP)
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expectedSourceIPs, actualDestIPs)
|
||||
|
||||
node2 := nodes[2].View()
|
||||
rules2, err := policy.compileFilterRulesForNode(users, node2, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, rules2, "user2's node should have no rules (user1@ devices can't match user2's self)")
|
||||
}
|
||||
|
||||
// TestAutogroupSelfWithGroupSource verifies that when a group is used as source
|
||||
// and autogroup:self as destination, only group members who are the same user
|
||||
// as the target are allowed.
|
||||
func TestAutogroupSelfWithGroupSource(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "user3"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1")},
|
||||
{User: users[0], IPv4: ap("100.64.0.2")},
|
||||
{User: users[1], IPv4: ap("100.64.0.3")},
|
||||
{User: users[1], IPv4: ap("100.64.0.4")},
|
||||
{User: users[2], IPv4: ap("100.64.0.5")},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user1@"), Username("user2@")},
|
||||
},
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []Alias{gp("group:admins")},
|
||||
Destinations: []AliasWithPorts{
|
||||
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// (group:admins has user1+user2, but autogroup:self filters to same user)
|
||||
node1 := nodes[0].View()
|
||||
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, rules, 1)
|
||||
|
||||
expectedSrcIPs := []string{"100.64.0.1", "100.64.0.2"}
|
||||
for _, expectedIP := range expectedSrcIPs {
|
||||
found := false
|
||||
addr := netip.MustParseAddr(expectedIP)
|
||||
|
||||
for _, prefix := range rules[0].SrcIPs {
|
||||
pref := netip.MustParsePrefix(prefix)
|
||||
if pref.Contains(addr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, found, "expected source IP %s for user1", expectedIP)
|
||||
}
|
||||
|
||||
node3 := nodes[4].View()
|
||||
rules3, err := policy.compileFilterRulesForNode(users, node3, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, rules3, "user3 should have no rules")
|
||||
}
|
||||
|
||||
// Helper function to create IP addresses for testing
|
||||
func createAddr(ip string) *netip.Addr {
|
||||
addr, _ := netip.ParseAddr(ip)
|
||||
return &addr
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfInDestination verifies that SSH policies work correctly
|
||||
// with autogroup:self in destinations
|
||||
func TestSSHWithAutogroupSelfInDestination(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
// User1's nodes
|
||||
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-node1"},
|
||||
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-node2"},
|
||||
// User2's nodes
|
||||
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-node1"},
|
||||
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-node2"},
|
||||
// Tagged node for user1 (should be excluded)
|
||||
{User: users[0], IPv4: ap("100.64.0.5"), Hostname: "user1-tagged", ForcedTags: []string{"tag:server"}},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:member")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")},
|
||||
Users: []SSHUser{"autogroup:nonroot"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test for user1's first node
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
require.Len(t, sshPolicy.Rules, 1)
|
||||
|
||||
rule := sshPolicy.Rules[0]
|
||||
|
||||
// Principals should only include user1's untagged devices
|
||||
require.Len(t, rule.Principals, 2, "should have 2 principals (user1's 2 untagged nodes)")
|
||||
|
||||
principalIPs := make([]string, len(rule.Principals))
|
||||
for i, p := range rule.Principals {
|
||||
principalIPs[i] = p.NodeIP
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
|
||||
|
||||
// Test for user2's first node
|
||||
node3 := nodes[2].View()
|
||||
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy2)
|
||||
require.Len(t, sshPolicy2.Rules, 1)
|
||||
|
||||
rule2 := sshPolicy2.Rules[0]
|
||||
|
||||
// Principals should only include user2's untagged devices
|
||||
require.Len(t, rule2.Principals, 2, "should have 2 principals (user2's 2 untagged nodes)")
|
||||
|
||||
principalIPs2 := make([]string, len(rule2.Principals))
|
||||
for i, p := range rule2.Principals {
|
||||
principalIPs2[i] = p.NodeIP
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"100.64.0.3", "100.64.0.4"}, principalIPs2)
|
||||
|
||||
// Test for tagged node (should have no SSH rules)
|
||||
node5 := nodes[4].View()
|
||||
sshPolicy3, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
if sshPolicy3 != nil {
|
||||
assert.Empty(t, sshPolicy3.Rules, "tagged nodes should not get SSH rules with autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfAndSpecificUser verifies that when a specific user
|
||||
// is in the source and autogroup:self in destination, only that user's devices
|
||||
// can SSH (and only if they match the target user)
|
||||
func TestSSHWithAutogroupSelfAndSpecificUser(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1")},
|
||||
{User: users[0], IPv4: ap("100.64.0.2")},
|
||||
{User: users[1], IPv4: ap("100.64.0.3")},
|
||||
{User: users[1], IPv4: ap("100.64.0.4")},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{up("user1@")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")},
|
||||
Users: []SSHUser{"ubuntu"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For user1's node: should allow SSH from user1's devices
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
require.Len(t, sshPolicy.Rules, 1)
|
||||
|
||||
rule := sshPolicy.Rules[0]
|
||||
require.Len(t, rule.Principals, 2, "user1 should have 2 principals")
|
||||
|
||||
principalIPs := make([]string, len(rule.Principals))
|
||||
for i, p := range rule.Principals {
|
||||
principalIPs[i] = p.NodeIP
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
|
||||
|
||||
// For user2's node: should have no rules (user1's devices can't match user2's self)
|
||||
node3 := nodes[2].View()
|
||||
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
if sshPolicy2 != nil {
|
||||
assert.Empty(t, sshPolicy2.Rules, "user2 should have no SSH rules since source is user1")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfAndGroup verifies SSH with group sources and autogroup:self destinations
|
||||
func TestSSHWithAutogroupSelfAndGroup(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "user3"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1")},
|
||||
{User: users[0], IPv4: ap("100.64.0.2")},
|
||||
{User: users[1], IPv4: ap("100.64.0.3")},
|
||||
{User: users[1], IPv4: ap("100.64.0.4")},
|
||||
{User: users[2], IPv4: ap("100.64.0.5")},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
Groups: Groups{
|
||||
Group("group:admins"): []Username{Username("user1@"), Username("user2@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{gp("group:admins")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")},
|
||||
Users: []SSHUser{"root"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For user1's node: should allow SSH from user1's devices only (not user2's)
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
require.Len(t, sshPolicy.Rules, 1)
|
||||
|
||||
rule := sshPolicy.Rules[0]
|
||||
require.Len(t, rule.Principals, 2, "user1 should have 2 principals (only user1's nodes)")
|
||||
|
||||
principalIPs := make([]string, len(rule.Principals))
|
||||
for i, p := range rule.Principals {
|
||||
principalIPs[i] = p.NodeIP
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs)
|
||||
|
||||
// For user3's node: should have no rules (not in group:admins)
|
||||
node5 := nodes[4].View()
|
||||
sshPolicy2, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
if sshPolicy2 != nil {
|
||||
assert.Empty(t, sshPolicy2.Rules, "user3 should have no SSH rules (not in group)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSSHWithAutogroupSelfExcludesTaggedDevices verifies that tagged devices
|
||||
// are excluded from both sources and destinations when autogroup:self is used
|
||||
func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1"},
|
||||
}
|
||||
|
||||
nodes := types.Nodes{
|
||||
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "untagged1"},
|
||||
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "untagged2"},
|
||||
{User: users[0], IPv4: ap("100.64.0.3"), Hostname: "tagged1", ForcedTags: []string{"tag:server"}},
|
||||
{User: users[0], IPv4: ap("100.64.0.4"), Hostname: "tagged2", ForcedTags: []string{"tag:web"}},
|
||||
}
|
||||
|
||||
policy := &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:server"): Owners{up("user1@")},
|
||||
Tag("tag:web"): Owners{up("user1@")},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: SSHSrcAliases{agp("autogroup:member")},
|
||||
Destinations: SSHDstAliases{agp("autogroup:self")},
|
||||
Users: []SSHUser{"admin"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := policy.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
// For untagged node: should only get principals from other untagged nodes
|
||||
node1 := nodes[0].View()
|
||||
sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sshPolicy)
|
||||
require.Len(t, sshPolicy.Rules, 1)
|
||||
|
||||
rule := sshPolicy.Rules[0]
|
||||
require.Len(t, rule.Principals, 2, "should only have 2 principals (untagged nodes)")
|
||||
|
||||
principalIPs := make([]string, len(rule.Principals))
|
||||
for i, p := range rule.Principals {
|
||||
principalIPs[i] = p.NodeIP
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs,
|
||||
"should only include untagged devices")
|
||||
|
||||
// For tagged node: should get no SSH rules
|
||||
node3 := nodes[2].View()
|
||||
sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
if sshPolicy2 != nil {
|
||||
assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go4.org/netipx"
|
||||
@@ -38,6 +39,12 @@ type PolicyManager struct {
|
||||
|
||||
// Lazy map of SSH policies
|
||||
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
|
||||
|
||||
// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)
|
||||
compiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule
|
||||
// Lazy map of per-node filter rules (reduced, for packet filters)
|
||||
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
|
||||
usesAutogroupSelf bool
|
||||
}
|
||||
|
||||
// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.
|
||||
@@ -50,10 +57,13 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
|
||||
}
|
||||
|
||||
pm := PolicyManager{
|
||||
pol: policy,
|
||||
users: users,
|
||||
nodes: nodes,
|
||||
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
|
||||
pol: policy,
|
||||
users: users,
|
||||
nodes: nodes,
|
||||
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
|
||||
compiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
|
||||
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
|
||||
usesAutogroupSelf: policy.usesAutogroupSelf(),
|
||||
}
|
||||
|
||||
_, err = pm.updateLocked()
|
||||
@@ -72,8 +82,18 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||
// policies for nodes that have changed. Particularly if the only difference is
|
||||
// that nodes has been added or removed.
|
||||
clear(pm.sshPolicyMap)
|
||||
clear(pm.compiledFilterRulesMap)
|
||||
clear(pm.filterRulesMap)
|
||||
|
||||
filter, err := pm.pol.compileFilterRules(pm.users, pm.nodes)
|
||||
// Check if policy uses autogroup:self
|
||||
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
|
||||
|
||||
var filter []tailcfg.FilterRule
|
||||
|
||||
var err error
|
||||
|
||||
// Standard compilation for all policies
|
||||
filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("compiling filter rules: %w", err)
|
||||
}
|
||||
@@ -218,6 +238,197 @@ func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
|
||||
return pm.filter, pm.matchers
|
||||
}
|
||||
|
||||
// BuildPeerMap constructs peer relationship maps for the given nodes.
|
||||
// For global filters, it uses the global filter matchers for all nodes.
|
||||
// For autogroup:self policies (empty global filter), it builds per-node
|
||||
// peer maps using each node's specific filter rules.
|
||||
func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView {
|
||||
if pm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// If we have a global filter, use it for all nodes (normal case)
|
||||
if !pm.usesAutogroupSelf {
|
||||
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
|
||||
|
||||
// Build the map of all peers according to the matchers.
|
||||
// Compared to ReduceNodes, which builds the list per node, we end up with doing
|
||||
// the full work for every node O(n^2), while this will reduce the list as we see
|
||||
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
|
||||
for i := range nodes.Len() {
|
||||
for j := i + 1; j < nodes.Len(); j++ {
|
||||
if nodes.At(i).ID() == nodes.At(j).ID() {
|
||||
continue
|
||||
}
|
||||
|
||||
if nodes.At(i).CanAccess(pm.matchers, nodes.At(j)) || nodes.At(j).CanAccess(pm.matchers, nodes.At(i)) {
|
||||
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
|
||||
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// For autogroup:self (empty global filter), build per-node peer relationships
|
||||
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
|
||||
|
||||
// Pre-compute per-node matchers using unreduced compiled rules
|
||||
// We need unreduced rules to determine peer relationships correctly.
|
||||
// Reduced rules only show destinations where the node is the target,
|
||||
// but peer relationships require the full bidirectional access rules.
|
||||
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
|
||||
for _, node := range nodes.All() {
|
||||
filter, err := pm.compileFilterRulesForNodeLocked(node)
|
||||
if err != nil || len(filter) == 0 {
|
||||
continue
|
||||
}
|
||||
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
|
||||
}
|
||||
|
||||
// Check each node pair for peer relationships.
|
||||
// Start j at i+1 to avoid checking the same pair twice and creating duplicates.
|
||||
// We check both directions (i->j and j->i) since ACLs can be asymmetric.
|
||||
for i := range nodes.Len() {
|
||||
nodeI := nodes.At(i)
|
||||
matchersI, hasFilterI := nodeMatchers[nodeI.ID()]
|
||||
|
||||
for j := i + 1; j < nodes.Len(); j++ {
|
||||
nodeJ := nodes.At(j)
|
||||
matchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]
|
||||
|
||||
// Check if nodeI can access nodeJ
|
||||
if hasFilterI && nodeI.CanAccess(matchersI, nodeJ) {
|
||||
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
|
||||
}
|
||||
|
||||
// Check if nodeJ can access nodeI
|
||||
if hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI) {
|
||||
ret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node
|
||||
// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.
|
||||
// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.
|
||||
func (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
|
||||
if pm == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check if we have cached compiled rules
|
||||
if rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// Compile per-node rules with autogroup:self expanded
|
||||
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
|
||||
}
|
||||
|
||||
// Cache the unreduced compiled rules
|
||||
pm.compiledFilterRulesMap[node.ID()] = rules
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// filterForNodeLocked returns the filter rules for a specific node, already reduced
|
||||
// to only include rules relevant to that node.
|
||||
// This is a lock-free version of FilterForNode for internal use when the lock is already held.
|
||||
// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.
|
||||
func (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
|
||||
if pm == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !pm.usesAutogroupSelf {
|
||||
// For global filters, reduce to only rules relevant to this node.
|
||||
// Cache the reduced filter per node for efficiency.
|
||||
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// Use policyutil.ReduceFilterRules for global filter reduction.
|
||||
reducedFilter := policyutil.ReduceFilterRules(node, pm.filter)
|
||||
|
||||
pm.filterRulesMap[node.ID()] = reducedFilter
|
||||
return reducedFilter, nil
|
||||
}
|
||||
|
||||
// For autogroup:self, compile per-node rules then reduce them.
|
||||
// Check if we have cached reduced rules for this node.
|
||||
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
// Get unreduced compiled rules
|
||||
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reduce the compiled rules to only destinations relevant to this node
|
||||
reducedFilter := policyutil.ReduceFilterRules(node, compiledRules)
|
||||
|
||||
// Cache the reduced filter
|
||||
pm.filterRulesMap[node.ID()] = reducedFilter
|
||||
|
||||
return reducedFilter, nil
|
||||
}
|
||||
|
||||
// FilterForNode returns the filter rules for a specific node, already reduced
|
||||
// to only include rules relevant to that node.
|
||||
// If the policy uses autogroup:self, this returns node-specific compiled rules.
|
||||
// Otherwise, it returns the global filter reduced for this node.
|
||||
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
|
||||
if pm == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
return pm.filterForNodeLocked(node)
|
||||
}
|
||||
|
||||
// MatchersForNode returns the matchers for peer relationship determination for a specific node.
|
||||
// These are UNREDUCED matchers - they include all rules where the node could be either source or destination.
|
||||
// This is different from FilterForNode which returns REDUCED rules for packet filtering.
|
||||
//
|
||||
// For global policies: returns the global matchers (same for all nodes)
|
||||
// For autogroup:self: returns node-specific matchers from unreduced compiled rules
|
||||
func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
|
||||
if pm == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// For global policies, return the shared global matchers
|
||||
if !pm.usesAutogroupSelf {
|
||||
return pm.matchers, nil
|
||||
}
|
||||
|
||||
// For autogroup:self, get unreduced compiled rules and create matchers
|
||||
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create matchers from unreduced rules for peer relationship determination
|
||||
return matcher.MatchesFromFilterRules(compiledRules), nil
|
||||
}
|
||||
|
||||
// SetUsers updates the users in the policy manager and updates the filter rules.
|
||||
func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
|
||||
if pm == nil {
|
||||
@@ -228,7 +439,23 @@ func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
|
||||
defer pm.mu.Unlock()
|
||||
pm.users = users
|
||||
|
||||
return pm.updateLocked()
|
||||
// Clear SSH policy map when users change to force SSH policy recomputation
|
||||
// This ensures that if SSH policy compilation previously failed due to missing users,
|
||||
// it will be retried with the new user list
|
||||
clear(pm.sshPolicyMap)
|
||||
|
||||
changed, err := pm.updateLocked()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If SSH policies exist, force a policy change when users are updated
|
||||
// This ensures nodes get updated SSH policies even if other policy hashes didn't change
|
||||
if pm.pol != nil && pm.pol.SSHs != nil && len(pm.pol.SSHs) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// SetNodes updates the nodes in the policy manager and updates the filter rules.
|
||||
@@ -239,9 +466,41 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
|
||||
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
oldNodeCount := pm.nodes.Len()
|
||||
newNodeCount := nodes.Len()
|
||||
|
||||
// Invalidate cache entries for nodes that changed.
|
||||
// For autogroup:self: invalidate all nodes belonging to affected users (peer changes).
|
||||
// For global policies: invalidate only nodes whose properties changed (IPs, routes).
|
||||
pm.invalidateNodeCache(nodes)
|
||||
|
||||
pm.nodes = nodes
|
||||
|
||||
return pm.updateLocked()
|
||||
nodesChanged := oldNodeCount != newNodeCount
|
||||
|
||||
// When nodes are added/removed, we must recompile filters because:
|
||||
// 1. User/group aliases (like "user1@") resolve to node IPs
|
||||
// 2. Filter compilation needs nodes to generate rules
|
||||
// 3. Without nodes, filters compile to empty (0 rules)
|
||||
//
|
||||
// For autogroup:self: return true when nodes change even if the global filter
|
||||
// hash didn't change. The global filter is empty for autogroup:self (each node
|
||||
// has its own filter), so the hash never changes. But peer relationships DO
|
||||
// change when nodes are added/removed, so we must signal this to trigger updates.
|
||||
// For global policies: the filter must be recompiled to include the new nodes.
|
||||
if nodesChanged {
|
||||
// Recompile filter with the new node list
|
||||
_, err := pm.updateLocked()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Always return true when nodes changed, even if filter hash didn't change
|
||||
// (can happen with autogroup:self or when nodes are added but don't affect rules)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (pm *PolicyManager) NodeCanHaveTag(node types.NodeView, tag string) bool {
|
||||
@@ -383,3 +642,162 @@ func (pm *PolicyManager) DebugString() string {
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be
|
||||
// invalidated when using autogroup:self policies. This is much more efficient than clearing
|
||||
// the entire cache.
|
||||
func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) {
|
||||
// Build maps for efficient lookup
|
||||
oldNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range oldNodes.All() {
|
||||
oldNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
newNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range newNodes.All() {
|
||||
newNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
// Track which users are affected by changes
|
||||
affectedUsers := make(map[uint]struct{})
|
||||
|
||||
// Check for removed nodes
|
||||
for nodeID, oldNode := range oldNodeMap {
|
||||
if _, exists := newNodeMap[nodeID]; !exists {
|
||||
affectedUsers[oldNode.User().ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for added nodes
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if _, exists := oldNodeMap[nodeID]; !exists {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for modified nodes (user changes, tag changes, IP changes)
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
if oldNode, exists := oldNodeMap[nodeID]; exists {
|
||||
// Check if user changed
|
||||
if oldNode.User().ID != newNode.User().ID {
|
||||
affectedUsers[oldNode.User().ID] = struct{}{}
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
|
||||
// Check if tag status changed
|
||||
if oldNode.IsTagged() != newNode.IsTagged() {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
}
|
||||
|
||||
// Check if IPs changed (simple check - could be more sophisticated)
|
||||
oldIPs := oldNode.IPs()
|
||||
newIPs := newNode.IPs()
|
||||
if len(oldIPs) != len(newIPs) {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
} else {
|
||||
// Check if any IPs are different
|
||||
for i, oldIP := range oldIPs {
|
||||
if i >= len(newIPs) || oldIP != newIPs[i] {
|
||||
affectedUsers[newNode.User().ID] = struct{}{}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear cache entries for affected users only
|
||||
// For autogroup:self, we need to clear all nodes belonging to affected users
|
||||
// because autogroup:self rules depend on the entire user's device set
|
||||
for nodeID := range pm.filterRulesMap {
|
||||
// Find the user for this cached node
|
||||
var nodeUserID uint
|
||||
found := false
|
||||
|
||||
// Check in new nodes first
|
||||
for _, node := range newNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
nodeUserID = node.User().ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If not found in new nodes, check old nodes
|
||||
if !found {
|
||||
for _, node := range oldNodes.All() {
|
||||
if node.ID() == nodeID {
|
||||
nodeUserID = node.User().ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found the user and they're affected, clear this cache entry
|
||||
if found {
|
||||
if _, affected := affectedUsers[nodeUserID]; affected {
|
||||
delete(pm.compiledFilterRulesMap, nodeID)
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
} else {
|
||||
// Node not found in either old or new list, clear it
|
||||
delete(pm.compiledFilterRulesMap, nodeID)
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
if len(affectedUsers) > 0 {
|
||||
log.Debug().
|
||||
Int("affected_users", len(affectedUsers)).
|
||||
Int("remaining_cache_entries", len(pm.filterRulesMap)).
|
||||
Msg("Selectively cleared autogroup:self cache for affected users")
|
||||
}
|
||||
}
|
||||
|
||||
// invalidateNodeCache invalidates cache entries based on what changed.
|
||||
func (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {
|
||||
if pm.usesAutogroupSelf {
|
||||
// For autogroup:self, a node's filter depends on its peers (same user).
|
||||
// When any node in a user changes, all nodes for that user need invalidation.
|
||||
pm.invalidateAutogroupSelfCache(pm.nodes, newNodes)
|
||||
} else {
|
||||
// For global policies, a node's filter depends only on its own properties.
|
||||
// Only invalidate nodes whose properties actually changed.
|
||||
pm.invalidateGlobalPolicyCache(newNodes)
|
||||
}
|
||||
}
|
||||
|
||||
// invalidateGlobalPolicyCache invalidates only nodes whose properties affecting
|
||||
// ReduceFilterRules changed. For global policies, each node's filter is independent.
|
||||
func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.NodeView]) {
|
||||
oldNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range pm.nodes.All() {
|
||||
oldNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
newNodeMap := make(map[types.NodeID]types.NodeView)
|
||||
for _, node := range newNodes.All() {
|
||||
newNodeMap[node.ID()] = node
|
||||
}
|
||||
|
||||
// Invalidate nodes whose properties changed
|
||||
for nodeID, newNode := range newNodeMap {
|
||||
oldNode, existed := oldNodeMap[nodeID]
|
||||
if !existed {
|
||||
// New node - no cache entry yet, will be lazily calculated
|
||||
continue
|
||||
}
|
||||
|
||||
if newNode.HasNetworkChanges(oldNode) {
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deleted nodes from cache
|
||||
for nodeID := range pm.filterRulesMap {
|
||||
if _, exists := newNodeMap[nodeID]; !exists {
|
||||
delete(pm.filterRulesMap, nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -66,3 +67,375 @@ func TestPolicyManager(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
||||
users := types.Users{
|
||||
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"},
|
||||
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"},
|
||||
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
|
||||
}
|
||||
|
||||
policy := `{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
initialNodes := types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
}
|
||||
|
||||
for i, n := range initialNodes {
|
||||
n.ID = types.NodeID(i + 1)
|
||||
}
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add to cache by calling FilterForNode for each node
|
||||
for _, n := range initialNodes {
|
||||
_, err := pm.FilterForNode(n.View())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, len(initialNodes), len(pm.filterRulesMap))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
newNodes types.Nodes
|
||||
expectedCleared int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "no_changes",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 0,
|
||||
description: "No changes should clear no cache entries",
|
||||
},
|
||||
{
|
||||
name: "node_added",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user1-node3", "100.64.0.5", "fd7a:115c:a1e0::5", users[0], nil), // New node
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's existing nodes should be cleared
|
||||
description: "Adding a node should clear cache for that user's existing nodes",
|
||||
},
|
||||
{
|
||||
name: "node_removed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
// user1-node2 removed
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's remaining node + removed node should be cleared
|
||||
description: "Removing a node should clear cache for that user's remaining nodes",
|
||||
},
|
||||
{
|
||||
name: "user_changed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[2], nil), // Changed to user3
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared
|
||||
description: "Changing a node's user should clear cache for both old and new users",
|
||||
},
|
||||
{
|
||||
name: "ip_changed",
|
||||
newNodes: types.Nodes{
|
||||
node("user1-node1", "100.64.0.10", "fd7a:115c:a1e0::10", users[0], nil), // IP changed
|
||||
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
|
||||
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
|
||||
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
|
||||
},
|
||||
expectedCleared: 2, // user1's nodes should be cleared
|
||||
description: "Changing a node's IP should clear cache for that user's nodes",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for i, n := range tt.newNodes {
|
||||
found := false
|
||||
for _, origNode := range initialNodes {
|
||||
if n.Hostname == origNode.Hostname {
|
||||
n.ID = origNode.ID
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
n.ID = types.NodeID(len(initialNodes) + i + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule)
|
||||
for _, n := range initialNodes {
|
||||
_, err := pm.FilterForNode(n.View())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
initialCacheSize := len(pm.filterRulesMap)
|
||||
require.Equal(t, len(initialNodes), initialCacheSize)
|
||||
|
||||
pm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice())
|
||||
|
||||
// Verify the expected number of cache entries were cleared
|
||||
finalCacheSize := len(pm.filterRulesMap)
|
||||
clearedEntries := initialCacheSize - finalCacheSize
|
||||
require.Equal(t, tt.expectedCleared, clearedEntries, tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalidateGlobalPolicyCache tests the cache invalidation logic for global policies.
|
||||
func TestInvalidateGlobalPolicyCache(t *testing.T) {
|
||||
mustIPPtr := func(s string) *netip.Addr {
|
||||
ip := netip.MustParseAddr(s)
|
||||
return &ip
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
oldNodes types.Nodes
|
||||
newNodes types.Nodes
|
||||
initialCache map[types.NodeID][]tailcfg.FilterRule
|
||||
expectedCacheAfter map[types.NodeID]bool // true = should exist, false = should not exist
|
||||
}{
|
||||
{
|
||||
name: "node property changed - invalidates only that node",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
2: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: false, // Invalidated
|
||||
2: true, // Preserved
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple nodes changed",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.3")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
|
||||
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.88")}, // Changed
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: false, // Invalidated
|
||||
2: true, // Preserved
|
||||
3: false, // Invalidated
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node deleted - removes from cache",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
2: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: false, // Deleted
|
||||
2: true, // Preserved
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node added - no cache invalidation needed",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // New
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: true, // Preserved
|
||||
2: false, // Not in cache (new node)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no changes - preserves all cache",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
2: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "routes changed - invalidates that node only",
|
||||
oldNodes: types.Nodes{
|
||||
&types.Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
|
||||
},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
newNodes: types.Nodes{
|
||||
&types.Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, // Changed
|
||||
},
|
||||
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
|
||||
},
|
||||
initialCache: map[types.NodeID][]tailcfg.FilterRule{
|
||||
1: {},
|
||||
2: {},
|
||||
},
|
||||
expectedCacheAfter: map[types.NodeID]bool{
|
||||
1: false, // Invalidated
|
||||
2: true, // Preserved
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pm := &PolicyManager{
|
||||
nodes: tt.oldNodes.ViewSlice(),
|
||||
filterRulesMap: tt.initialCache,
|
||||
usesAutogroupSelf: false,
|
||||
}
|
||||
|
||||
pm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())
|
||||
|
||||
// Verify cache state
|
||||
for nodeID, shouldExist := range tt.expectedCacheAfter {
|
||||
_, exists := pm.filterRulesMap[nodeID]
|
||||
require.Equal(t, shouldExist, exists, "node %d cache existence mismatch", nodeID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAutogroupSelfReducedVsUnreducedRules verifies that:
|
||||
// 1. BuildPeerMap uses unreduced compiled rules for determining peer relationships
|
||||
// 2. FilterForNode returns reduced compiled rules for packet filters
|
||||
func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
|
||||
user1 := types.User{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"}
|
||||
user2 := types.User{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"}
|
||||
users := types.Users{user1, user2}
|
||||
|
||||
// Create two nodes
|
||||
node1 := node("node1", "100.64.0.1", "fd7a:115c:a1e0::1", user1, nil)
|
||||
node1.ID = 1
|
||||
node2 := node("node2", "100.64.0.2", "fd7a:115c:a1e0::2", user2, nil)
|
||||
node2.ID = 2
|
||||
nodes := types.Nodes{node1, node2}
|
||||
|
||||
// Policy with autogroup:self - all members can reach their own devices
|
||||
policyStr := `{
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["autogroup:member"],
|
||||
"dst": ["autogroup:self:*"]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
|
||||
|
||||
// Test FilterForNode returns reduced rules
|
||||
// For node1: should have rules where node1 is in destinations (its own IP)
|
||||
filterNode1, err := pm.FilterForNode(nodes[0].View())
|
||||
require.NoError(t, err)
|
||||
|
||||
// For node2: should have rules where node2 is in destinations (its own IP)
|
||||
filterNode2, err := pm.FilterForNode(nodes[1].View())
|
||||
require.NoError(t, err)
|
||||
|
||||
// FilterForNode should return reduced rules - verify they only contain the node's own IPs as destinations
|
||||
// For node1, destinations should only be node1's IPs
|
||||
node1IPs := []string{"100.64.0.1/32", "100.64.0.1", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::1"}
|
||||
for _, rule := range filterNode1 {
|
||||
for _, dst := range rule.DstPorts {
|
||||
require.Contains(t, node1IPs, dst.IP,
|
||||
"node1 filter should only contain node1's IPs as destinations")
|
||||
}
|
||||
}
|
||||
|
||||
// For node2, destinations should only be node2's IPs
|
||||
node2IPs := []string{"100.64.0.2/32", "100.64.0.2", "fd7a:115c:a1e0::2/128", "fd7a:115c:a1e0::2"}
|
||||
for _, rule := range filterNode2 {
|
||||
for _, dst := range rule.DstPorts {
|
||||
require.Contains(t, node2IPs, dst.IP,
|
||||
"node2 filter should only contain node2's IPs as destinations")
|
||||
}
|
||||
}
|
||||
|
||||
// Test BuildPeerMap uses unreduced rules
|
||||
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
|
||||
|
||||
// According to the policy, user1 can reach autogroup:self (which expands to node1's own IPs for node1)
|
||||
// So node1 should be able to reach itself, but since we're looking at peer relationships,
|
||||
// node1 should NOT have itself in the peer map (nodes don't peer with themselves)
|
||||
// node2 should also not have any peers since user2 has no rules allowing it to reach anyone
|
||||
|
||||
// Verify peer relationships based on unreduced rules
|
||||
// With unreduced rules, BuildPeerMap can properly determine that:
|
||||
// - node1 can access autogroup:self (its own IPs)
|
||||
// - node2 cannot access node1
|
||||
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
|
||||
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
|
||||
}
|
||||
|
||||
@@ -32,6 +32,8 @@ var policyJSONOpts = []json.Options{
|
||||
|
||||
const Wildcard = Asterix(0)
|
||||
|
||||
var ErrAutogroupSelfRequiresPerNodeResolution = errors.New("autogroup:self requires per-node resolution and cannot be resolved in this context")
|
||||
|
||||
type Asterix int
|
||||
|
||||
func (a Asterix) Validate() error {
|
||||
@@ -485,9 +487,7 @@ const (
|
||||
AutoGroupMember AutoGroup = "autogroup:member"
|
||||
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
|
||||
AutoGroupTagged AutoGroup = "autogroup:tagged"
|
||||
|
||||
// These are not yet implemented.
|
||||
AutoGroupSelf AutoGroup = "autogroup:self"
|
||||
AutoGroupSelf AutoGroup = "autogroup:self"
|
||||
)
|
||||
|
||||
var autogroups = []AutoGroup{
|
||||
@@ -495,6 +495,7 @@ var autogroups = []AutoGroup{
|
||||
AutoGroupMember,
|
||||
AutoGroupNonRoot,
|
||||
AutoGroupTagged,
|
||||
AutoGroupSelf,
|
||||
}
|
||||
|
||||
func (ag AutoGroup) Validate() error {
|
||||
@@ -590,6 +591,12 @@ func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[type
|
||||
|
||||
return build.IPSet()
|
||||
|
||||
case AutoGroupSelf:
|
||||
// autogroup:self represents all devices owned by the same user.
|
||||
// This cannot be resolved in the general context and should be handled
|
||||
// specially during policy compilation per-node for security.
|
||||
return nil, ErrAutogroupSelfRequiresPerNodeResolution
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown autogroup %q", ag)
|
||||
}
|
||||
@@ -1586,11 +1593,11 @@ type Policy struct {
|
||||
var (
|
||||
// TODO(kradalby): Add these checks for tagOwners and autoApprovers.
|
||||
autogroupForSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
|
||||
autogroupForSSHSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
||||
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged, AutoGroupSelf}
|
||||
autogroupForSSHUser = []AutoGroup{AutoGroupNonRoot}
|
||||
autogroupNotSupported = []AutoGroup{AutoGroupSelf}
|
||||
autogroupNotSupported = []AutoGroup{}
|
||||
)
|
||||
|
||||
func validateAutogroupSupported(ag *AutoGroup) error {
|
||||
@@ -1614,6 +1621,10 @@ func validateAutogroupForSrc(src *AutoGroup) error {
|
||||
return errors.New(`"autogroup:internet" used in source, it can only be used in ACL destinations`)
|
||||
}
|
||||
|
||||
if src.Is(AutoGroupSelf) {
|
||||
return errors.New(`"autogroup:self" used in source, it can only be used in ACL destinations`)
|
||||
}
|
||||
|
||||
if !slices.Contains(autogroupForSrc, *src) {
|
||||
return fmt.Errorf("autogroup %q is not supported for ACL sources, can be %v", *src, autogroupForSrc)
|
||||
}
|
||||
@@ -2112,3 +2123,40 @@ func validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules.
|
||||
func (p *Policy) usesAutogroupSelf() bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check ACL rules
|
||||
for _, acl := range p.ACLs {
|
||||
for _, src := range acl.Sources {
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dest := range acl.Destinations {
|
||||
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check SSH rules
|
||||
for _, ssh := range p.SSHs {
|
||||
for _, src := range ssh.Sources {
|
||||
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dest := range ssh.Destinations {
|
||||
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -459,7 +459,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||
],
|
||||
}
|
||||
`,
|
||||
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged]`,
|
||||
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`,
|
||||
},
|
||||
{
|
||||
name: "undefined-hostname-errors-2490",
|
||||
@@ -1881,6 +1881,38 @@ func TestResolvePolicy(t *testing.T) {
|
||||
mp("100.100.101.7/32"), // Multiple forced tags
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "autogroup-self",
|
||||
toResolve: ptr.To(AutoGroupSelf),
|
||||
nodes: types.Nodes{
|
||||
{
|
||||
User: users["testuser"],
|
||||
IPv4: ap("100.100.101.1"),
|
||||
},
|
||||
{
|
||||
User: users["testuser2"],
|
||||
IPv4: ap("100.100.101.2"),
|
||||
},
|
||||
{
|
||||
User: users["testuser"],
|
||||
ForcedTags: []string{"tag:test"},
|
||||
IPv4: ap("100.100.101.3"),
|
||||
},
|
||||
{
|
||||
User: users["testuser2"],
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RequestTags: []string{"tag:test"},
|
||||
},
|
||||
IPv4: ap("100.100.101.4"),
|
||||
},
|
||||
},
|
||||
pol: &Policy{
|
||||
TagOwners: TagOwners{
|
||||
Tag("tag:test"): Owners{ptr.To(Username("testuser@"))},
|
||||
},
|
||||
},
|
||||
wantErr: "autogroup:self requires per-node resolution",
|
||||
},
|
||||
{
|
||||
name: "autogroup-invalid",
|
||||
toResolve: ptr.To(AutoGroup("autogroup:invalid")),
|
||||
|
||||
@@ -197,11 +197,12 @@ func (m *mapSession) serveLongPoll() {
|
||||
m.keepAliveTicker = time.NewTicker(m.keepAlive)
|
||||
|
||||
// Process the initial MapRequest to update node state (endpoints, hostinfo, etc.)
|
||||
// CRITICAL: This must be done BEFORE calling Connect() to ensure routes are properly
|
||||
// synchronized. When nodes reconnect, they send their hostinfo with announced routes
|
||||
// in the MapRequest. We need this data in NodeStore before Connect() sets up the
|
||||
// primary routes, otherwise SubnetRoutes() returns empty and the node is removed
|
||||
// from AvailableRoutes.
|
||||
// This must be done BEFORE calling Connect() to ensure routes are properly synchronized.
|
||||
// When nodes reconnect, they send their hostinfo with announced routes in the MapRequest.
|
||||
// We need this data in NodeStore before Connect() sets up the primary routes, because
|
||||
// SubnetRoutes() calculates the intersection of announced and approved routes. If we
|
||||
// call Connect() first, SubnetRoutes() returns empty (no announced routes yet), causing
|
||||
// the node to be incorrectly removed from AvailableRoutes.
|
||||
mapReqChange, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req)
|
||||
if err != nil {
|
||||
m.errf(err, "failed to update node from initial MapRequest")
|
||||
|
||||
@@ -60,9 +60,6 @@ type DebugStringInfo struct {
|
||||
|
||||
// DebugOverview returns a comprehensive overview of the current state for debugging.
|
||||
func (s *State) DebugOverview() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
@@ -270,9 +267,6 @@ func (s *State) PolicyDebugString() string {
|
||||
|
||||
// DebugOverviewJSON returns a structured overview of the current state for debugging.
|
||||
func (s *State) DebugOverviewJSON() DebugOverviewInfo {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ func TestNodeStoreDebugString(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
|
||||
store.PutNode(node1)
|
||||
store.PutNode(node2)
|
||||
_ = store.PutNode(node1)
|
||||
_ = store.PutNode(node2)
|
||||
|
||||
return store
|
||||
},
|
||||
|
||||
460
hscontrol/state/ephemeral_test.go
Normal file
460
hscontrol/state/ephemeral_test.go
Normal file
@@ -0,0 +1,460 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode
|
||||
// are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes
|
||||
// are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view
|
||||
// after the node has been deleted from the NodeStore.
|
||||
func TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) {
|
||||
// Create a simple test node
|
||||
node := createTestNode(1, 1, "test-user", "test-node")
|
||||
|
||||
// Create NodeStore
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put the node in the store
|
||||
resultNode := store.PutNode(node)
|
||||
require.True(t, resultNode.Valid(), "initial PutNode should return valid node")
|
||||
|
||||
// Verify node exists
|
||||
retrievedNode, found := store.GetNode(node.ID)
|
||||
require.True(t, found)
|
||||
require.Equal(t, node.ID, retrievedNode.ID())
|
||||
|
||||
// Test scenario: UpdateNode is called, returns a node view from the batch,
|
||||
// but in the same batch a DeleteNode removes the node.
|
||||
// This simulates what happens when:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode
|
||||
// 2. At the same time, handleLogout calls DeleteNode
|
||||
// 3. They get batched together: [UPDATE, DELETE]
|
||||
// 4. UPDATE modifies the node, DELETE removes it
|
||||
// 5. UpdateNode returns a node view based on the state AFTER both operations
|
||||
// 6. If DELETE came after UPDATE, the returned node should be invalid
|
||||
|
||||
done := make(chan bool, 2)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
// Small delay to increase chance of batching together
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
store.DeleteNode(node.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// The key assertion: if UpdateNode and DeleteNode were batched together
|
||||
// with DELETE after UPDATE, then UpdateNode should return an invalid node
|
||||
// OR it should return a valid node but the node should no longer exist in the store
|
||||
|
||||
_, found = store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted from NodeStore")
|
||||
|
||||
// If the update happened before delete in the batch, the returned node might be invalid
|
||||
if updateOk {
|
||||
t.Logf("UpdateNode returned ok=true, valid=%v", updatedNode.Valid())
|
||||
// This is the bug scenario - UpdateNode thinks it succeeded but node is gone
|
||||
if updatedNode.Valid() {
|
||||
t.Logf("WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug")
|
||||
}
|
||||
} else {
|
||||
t.Logf("UpdateNode correctly returned ok=false (node deleted in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when
|
||||
// UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE,
|
||||
// the UpdateNode should return an invalid node view.
|
||||
func TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) {
|
||||
node := createTestNode(2, 1, "test-user", "test-node-2")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate the exact sequence: UpdateNode gets queued, then DeleteNode gets queued,
|
||||
// they batch together, and we check what UpdateNode returns
|
||||
|
||||
resultChan := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
// Start UpdateNode - it will block until batch is applied
|
||||
go func() {
|
||||
node, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
resultChan <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{node, ok}
|
||||
}()
|
||||
|
||||
// Give UpdateNode a moment to queue its work
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Now queue DeleteNode - should batch with the UPDATE
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get the result from UpdateNode
|
||||
result := <-resultChan
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted")
|
||||
|
||||
// The critical check: what did UpdateNode return?
|
||||
// After the commit c6b09289988f34398eb3157e31ba092eb8721a9f,
|
||||
// UpdateNode returns the node state from the batch.
|
||||
// If DELETE came after UPDATE in the batch, the node doesn't exist anymore,
|
||||
// so UpdateNode should return (invalid, false)
|
||||
t.Logf("UpdateNode returned: ok=%v, valid=%v", result.ok, result.node.Valid())
|
||||
|
||||
// This is the expected behavior - if node was deleted in same batch,
|
||||
// UpdateNode should return invalid node
|
||||
if result.ok && result.node.Valid() {
|
||||
t.Error("BUG: UpdateNode returned valid node even though it was deleted in same batch")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles
|
||||
// the race condition where a node is deleted after UpdateNode returns but before
|
||||
// persistNodeToDB is called. This reproduces the ephemeral node deletion bug.
|
||||
func TestPersistNodeToDBPreventsRaceCondition(t *testing.T) {
|
||||
node := createTestNode(3, 1, "test-user", "test-node-3")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate UpdateNode being called
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// Now delete the node (simulating ephemeral logout happening concurrently)
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Wait for deletion to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify node is deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
require.False(t, found, "node should be deleted")
|
||||
|
||||
// Now try to use the updatedNode from before the deletion
|
||||
// In the old code, this would re-insert the node into the database
|
||||
// With our fix, GetNode check in persistNodeToDB should prevent this
|
||||
|
||||
// Simulate what persistNodeToDB does - check if node still exists
|
||||
_, exists := store.GetNode(updatedNode.ID())
|
||||
if !exists {
|
||||
t.Log("SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node")
|
||||
} else {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion")
|
||||
}
|
||||
|
||||
// The key assertion: after deletion, attempting to persist the old updatedNode
|
||||
// should fail because the node no longer exists in NodeStore
|
||||
assert.False(t, exists, "persistNodeToDB should detect node was deleted and refuse to persist")
|
||||
}
|
||||
|
||||
// TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs
|
||||
// when an ephemeral node logs out. This reproduces the bug where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view
|
||||
// 2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode get batched together
|
||||
// 4. If UpdateNode's result is used to call persistNodeToDB after the deletion,
|
||||
// the node could be re-inserted into the database even though it was deleted
|
||||
func TestEphemeralNodeLogoutRaceCondition(t *testing.T) {
|
||||
ephemeralNode := createTestNode(4, 1, "test-user", "ephemeral-node")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 1,
|
||||
Key: "test-key",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put ephemeral node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate concurrent operations:
|
||||
// 1. UpdateNode (from UpdateNodeFromMapRequest during polling)
|
||||
// 2. DeleteNode (from handleLogout when client sends logout request)
|
||||
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
done := make(chan bool, 2)
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
time.Sleep(1 * time.Millisecond) // Slight delay to batch operations
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted from store
|
||||
_, found := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, found, "ephemeral node should be deleted from NodeStore")
|
||||
|
||||
// Critical assertion: if UpdateNode returned before DeleteNode completed,
|
||||
// the updatedNode might be valid but the node is actually deleted.
|
||||
// This is the bug - UpdateNodeFromMapRequest would get a valid node,
|
||||
// then try to persist it, re-inserting the deleted ephemeral node.
|
||||
if updateOk && updatedNode.Valid() {
|
||||
t.Log("UpdateNode returned valid node, but node is deleted - this is the race condition")
|
||||
|
||||
// In the real code, this would cause persistNodeToDB to be called with updatedNode
|
||||
// The fix in persistNodeToDB checks if the node still exists:
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "persistNodeToDB should check NodeStore and find node deleted")
|
||||
} else if !updateOk || !updatedNode.Valid() {
|
||||
t.Log("UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence
|
||||
// that causes ephemeral node logout failures:
|
||||
// 1. Client sends MapRequest with updated endpoint info
|
||||
// 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode
|
||||
// 3. Client sends logout request (past expiry)
|
||||
// 4. handleLogout calls DeleteNode for ephemeral node
|
||||
// 5. UpdateNode and DeleteNode batch together
|
||||
// 6. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 7. persistNodeToDB is called with the stale valid node
|
||||
// 8. Node gets re-inserted into database instead of staying deleted
|
||||
func TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) {
|
||||
ephemeralNode := createTestNode(5, 1, "test-user", "ephemeral-node-5")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 2,
|
||||
Key: "test-key-2",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Initial state: ephemeral node exists
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Step 1: UpdateNodeFromMapRequest calls UpdateNode
|
||||
// (simulating client sending MapRequest with endpoint updates)
|
||||
updateStarted := make(chan bool)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
go func() {
|
||||
updateStarted <- true
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
endpoint := netip.MustParseAddrPort("10.0.0.1:41641")
|
||||
n.Endpoints = []netip.AddrPort{endpoint}
|
||||
})
|
||||
}()
|
||||
|
||||
<-updateStarted
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Step 2: Logout happens - handleLogout calls DeleteNode
|
||||
// (simulating client sending logout with past expiry)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for batching to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Step 3: Check results
|
||||
_, nodeExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, nodeExists, "ephemeral node must be deleted after logout")
|
||||
|
||||
// Step 4: Simulate what happens if we try to persist the updatedNode
|
||||
if updateOk && updatedNode.Valid() {
|
||||
// This is the problematic path - UpdateNode returned a valid node
|
||||
// but the node was deleted in the same batch
|
||||
t.Log("UpdateNode returned valid node even though node was deleted")
|
||||
|
||||
// The fix: persistNodeToDB must check NodeStore before persisting
|
||||
_, checkExists := store.GetNode(updatedNode.ID())
|
||||
if checkExists {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion - should be impossible")
|
||||
} else {
|
||||
t.Log("SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist")
|
||||
}
|
||||
} else {
|
||||
t.Log("UpdateNode correctly indicated node was deleted (returned invalid or not-ok)")
|
||||
}
|
||||
|
||||
// Final assertion: node must not exist
|
||||
_, finalExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, finalExists, "ephemeral node must remain deleted")
|
||||
}
|
||||
|
||||
// TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when
|
||||
// UpdateNode and DeleteNode are batched together with DELETE after UPDATE,
|
||||
// UpdateNode returns ok=false to indicate the node was deleted.
|
||||
func TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) {
|
||||
node := createTestNode(6, 1, "test-user", "test-node-6")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Queue UpdateNode
|
||||
updateDone := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
go func() {
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
updateDone <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{updatedNode, ok}
|
||||
}()
|
||||
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Queue DeleteNode - should batch with UpdateNode
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get UpdateNode result
|
||||
result := <-updateDone
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, exists := store.GetNode(node.ID)
|
||||
assert.False(t, exists, "node should be deleted from store")
|
||||
|
||||
// UpdateNode should indicate the node was deleted
|
||||
// After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE
|
||||
// are in the same batch with DELETE after UPDATE, UpdateNode returns
|
||||
// the state after the batch is applied - which means the node doesn't exist
|
||||
assert.False(t, result.ok, "UpdateNode should return ok=false when node deleted in same batch")
|
||||
assert.False(t, result.node.Valid(), "UpdateNode should return invalid node when node deleted in same batch")
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB
|
||||
// checks if the node still exists in NodeStore before persisting to database.
|
||||
// This prevents the race condition where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node
|
||||
// 2. Ephemeral node logout calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode batch together
|
||||
// 4. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node
|
||||
// 6. persistNodeToDB must detect the node is deleted and refuse to persist
|
||||
func TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) {
|
||||
ephemeralNode := createTestNode(7, 1, "test-user", "ephemeral-node-7")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 3,
|
||||
Key: "test-key-3",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate the race:
|
||||
// 1. UpdateNode is called (from UpdateNodeFromMapRequest)
|
||||
updatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// 2. Node is deleted (from handleLogout for ephemeral node)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for deletion
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// 3. Verify node is deleted from store
|
||||
_, exists := store.GetNode(ephemeralNode.ID)
|
||||
require.False(t, exists, "node should be deleted from NodeStore")
|
||||
|
||||
// 4. Simulate what persistNodeToDB does - check if node still exists
|
||||
// The fix in persistNodeToDB checks NodeStore before persisting:
|
||||
// if !exists { return error }
|
||||
// This prevents re-inserting the deleted node into the database
|
||||
|
||||
// Verify the node from UpdateNode is valid but node is gone from store
|
||||
assert.True(t, updatedNode.Valid(), "UpdateNode returned a valid node view")
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "but node should be deleted from NodeStore")
|
||||
|
||||
// This is the critical test: persistNodeToDB must check NodeStore
|
||||
// and refuse to persist if the node doesn't exist anymore
|
||||
// The actual persistNodeToDB implementation does:
|
||||
// _, exists := s.nodeStore.GetNode(node.ID())
|
||||
// if !exists { return error }
|
||||
}
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// NetInfoFromMapRequest determines the correct NetInfo to use.
|
||||
// netInfoFromMapRequest determines the correct NetInfo to use.
|
||||
// Returns the NetInfo that should be used for this request.
|
||||
func NetInfoFromMapRequest(
|
||||
func netInfoFromMapRequest(
|
||||
nodeID types.NodeID,
|
||||
currentHostinfo *tailcfg.Hostinfo,
|
||||
reqHostinfo *tailcfg.Hostinfo,
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestNetInfoFromMapRequest(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := NetInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
|
||||
result := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
|
||||
|
||||
if tt.expectNetInfo == nil {
|
||||
assert.Nil(t, result, "expected nil NetInfo")
|
||||
@@ -100,14 +100,40 @@ func TestNetInfoPreservationInRegistrationFlow(t *testing.T) {
|
||||
}
|
||||
|
||||
// BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo)
|
||||
buggyResult := NetInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
|
||||
buggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
|
||||
assert.Nil(t, buggyResult, "Bug: Should return nil when using wrong hostinfo reference")
|
||||
|
||||
// CORRECT: Using the existing node's hostinfo (has NetInfo)
|
||||
correctResult := NetInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
|
||||
correctResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
|
||||
assert.NotNil(t, correctResult, "Fix: Should preserve NetInfo when using correct hostinfo reference")
|
||||
assert.Equal(t, 5, correctResult.PreferredDERP, "Should preserve the DERP region from existing node")
|
||||
})
|
||||
|
||||
t.Run("new_node_creation_for_different_user_should_preserve_netinfo", func(t *testing.T) {
|
||||
// This test covers the scenario where:
|
||||
// 1. A node exists for user1 with NetInfo
|
||||
// 2. The same machine logs in as user2 (different user)
|
||||
// 3. A NEW node is created for user2 (pre-auth key flow)
|
||||
// 4. The new node should preserve NetInfo from the old node
|
||||
|
||||
// Existing node for user1 with NetInfo
|
||||
existingNodeUser1Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 7},
|
||||
}
|
||||
|
||||
// New registration request for user2 (no NetInfo yet)
|
||||
newNodeUser2Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
// NetInfo is nil - registration request doesn't include it
|
||||
}
|
||||
|
||||
// When creating a new node for user2, we should preserve NetInfo from user1's node
|
||||
result := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo)
|
||||
assert.NotNil(t, result, "New node for user2 should preserve NetInfo from user1's node")
|
||||
assert.Equal(t, 7, result.PreferredDERP, "Should preserve DERP region from existing node")
|
||||
})
|
||||
}
|
||||
|
||||
// Simple helper function for tests
|
||||
|
||||
@@ -15,14 +15,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
batchSize = 10
|
||||
batchSize = 100
|
||||
batchTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
const (
|
||||
put = 1
|
||||
del = 2
|
||||
update = 3
|
||||
put = 1
|
||||
del = 2
|
||||
update = 3
|
||||
rebuildPeerMaps = 4
|
||||
)
|
||||
|
||||
const prometheusNamespace = "headscale"
|
||||
@@ -121,10 +122,11 @@ type Snapshot struct {
|
||||
nodesByID map[types.NodeID]types.Node
|
||||
|
||||
// calculated from nodesByID
|
||||
nodesByNodeKey map[key.NodePublic]types.NodeView
|
||||
peersByNode map[types.NodeID][]types.NodeView
|
||||
nodesByUser map[types.UserID][]types.NodeView
|
||||
allNodes []types.NodeView
|
||||
nodesByNodeKey map[key.NodePublic]types.NodeView
|
||||
nodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView
|
||||
peersByNode map[types.NodeID][]types.NodeView
|
||||
nodesByUser map[types.UserID][]types.NodeView
|
||||
allNodes []types.NodeView
|
||||
}
|
||||
|
||||
// PeersFunc is a function that takes a list of nodes and returns a map
|
||||
@@ -135,26 +137,31 @@ type PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView
|
||||
|
||||
// work represents a single operation to be performed on the NodeStore.
|
||||
type work struct {
|
||||
op int
|
||||
nodeID types.NodeID
|
||||
node types.Node
|
||||
updateFn UpdateNodeFunc
|
||||
result chan struct{}
|
||||
op int
|
||||
nodeID types.NodeID
|
||||
node types.Node
|
||||
updateFn UpdateNodeFunc
|
||||
result chan struct{}
|
||||
nodeResult chan types.NodeView // Channel to return the resulting node after batch application
|
||||
// For rebuildPeerMaps operation
|
||||
rebuildResult chan struct{}
|
||||
}
|
||||
|
||||
// PutNode adds or updates a node in the store.
|
||||
// If the node already exists, it will be replaced.
|
||||
// If the node does not exist, it will be added.
|
||||
// This is a blocking operation that waits for the write to complete.
|
||||
func (s *NodeStore) PutNode(n types.Node) {
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
func (s *NodeStore) PutNode(n types.Node) types.NodeView {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("put"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: put,
|
||||
nodeID: n.ID,
|
||||
node: n,
|
||||
result: make(chan struct{}),
|
||||
op: put,
|
||||
nodeID: n.ID,
|
||||
node: n,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
@@ -162,7 +169,10 @@ func (s *NodeStore) PutNode(n types.Node) {
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("put").Inc()
|
||||
|
||||
return resultNode
|
||||
}
|
||||
|
||||
// UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it.
|
||||
@@ -173,6 +183,7 @@ type UpdateNodeFunc func(n *types.Node)
|
||||
// This is analogous to a database "transaction", or, the caller should
|
||||
// rather collect all data they want to change, and then call this function.
|
||||
// Fewer calls are better.
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
//
|
||||
// TODO(kradalby): Technically we could have a version of this that modifies the node
|
||||
// in the current snapshot if _we know_ that the change will not affect the peer relationships.
|
||||
@@ -181,15 +192,16 @@ type UpdateNodeFunc func(n *types.Node)
|
||||
// a lock around the nodesByID map to ensure that no other writes are happening
|
||||
// while we are modifying the node. Which mean we would need to implement read-write locks
|
||||
// on all read operations.
|
||||
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) {
|
||||
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("update"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: update,
|
||||
nodeID: nodeID,
|
||||
updateFn: updateFn,
|
||||
result: make(chan struct{}),
|
||||
op: update,
|
||||
nodeID: nodeID,
|
||||
updateFn: updateFn,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
@@ -197,7 +209,11 @@ func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("update").Inc()
|
||||
|
||||
// Return the node and whether it exists (is valid)
|
||||
return resultNode, resultNode.Valid()
|
||||
}
|
||||
|
||||
// DeleteNode removes a node from the store by its ID.
|
||||
@@ -282,18 +298,39 @@ func (s *NodeStore) applyBatch(batch []work) {
|
||||
nodes := make(map[types.NodeID]types.Node)
|
||||
maps.Copy(nodes, s.data.Load().nodesByID)
|
||||
|
||||
for _, w := range batch {
|
||||
// Track which work items need node results
|
||||
nodeResultRequests := make(map[types.NodeID][]*work)
|
||||
|
||||
// Track rebuildPeerMaps operations
|
||||
var rebuildOps []*work
|
||||
|
||||
for i := range batch {
|
||||
w := &batch[i]
|
||||
switch w.op {
|
||||
case put:
|
||||
nodes[w.nodeID] = w.node
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case update:
|
||||
// Update the specific node identified by nodeID
|
||||
if n, exists := nodes[w.nodeID]; exists {
|
||||
w.updateFn(&n)
|
||||
nodes[w.nodeID] = n
|
||||
}
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case del:
|
||||
delete(nodes, w.nodeID)
|
||||
// For delete operations, send an invalid NodeView if requested
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case rebuildPeerMaps:
|
||||
// rebuildPeerMaps doesn't modify nodes, it just forces the snapshot rebuild
|
||||
// below to recalculate peer relationships using the current peersFunc
|
||||
rebuildOps = append(rebuildOps, w)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,8 +340,33 @@ func (s *NodeStore) applyBatch(batch []work) {
|
||||
// Update node count gauge
|
||||
nodeStoreNodesCount.Set(float64(len(nodes)))
|
||||
|
||||
// Send the resulting nodes to all work items that requested them
|
||||
for nodeID, workItems := range nodeResultRequests {
|
||||
if node, exists := nodes[nodeID]; exists {
|
||||
nodeView := node.View()
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- nodeView
|
||||
close(w.nodeResult)
|
||||
}
|
||||
} else {
|
||||
// Node was deleted or doesn't exist
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- types.NodeView{} // Send invalid view
|
||||
close(w.nodeResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal completion for rebuildPeerMaps operations
|
||||
for _, w := range rebuildOps {
|
||||
close(w.rebuildResult)
|
||||
}
|
||||
|
||||
// Signal completion for all other work items
|
||||
for _, w := range batch {
|
||||
close(w.result)
|
||||
if w.op != rebuildPeerMaps {
|
||||
close(w.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,9 +385,10 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
|
||||
}
|
||||
|
||||
newSnap := Snapshot{
|
||||
nodesByID: nodes,
|
||||
allNodes: allNodes,
|
||||
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
|
||||
nodesByID: nodes,
|
||||
allNodes: allNodes,
|
||||
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
|
||||
nodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView),
|
||||
|
||||
// peersByNode is most likely the most expensive operation,
|
||||
// it will use the list of all nodes, combined with the
|
||||
@@ -339,11 +402,19 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
|
||||
nodesByUser: make(map[types.UserID][]types.NodeView),
|
||||
}
|
||||
|
||||
// Build nodesByUser and nodesByNodeKey maps
|
||||
// Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps
|
||||
for _, n := range nodes {
|
||||
nodeView := n.View()
|
||||
newSnap.nodesByUser[types.UserID(n.UserID)] = append(newSnap.nodesByUser[types.UserID(n.UserID)], nodeView)
|
||||
userID := types.UserID(n.UserID)
|
||||
|
||||
newSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView)
|
||||
newSnap.nodesByNodeKey[n.NodeKey] = nodeView
|
||||
|
||||
// Build machine key index
|
||||
if newSnap.nodesByMachineKey[n.MachineKey] == nil {
|
||||
newSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView)
|
||||
}
|
||||
newSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView
|
||||
}
|
||||
|
||||
return newSnap
|
||||
@@ -382,19 +453,40 @@ func (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bo
|
||||
return nodeView, exists
|
||||
}
|
||||
|
||||
// GetNodeByMachineKey returns a node by its machine key. The bool indicates if the node exists.
|
||||
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic) (types.NodeView, bool) {
|
||||
// GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists.
|
||||
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
// We don't have a byMachineKey map, so we need to iterate
|
||||
// This could be optimized by adding a byMachineKey map if this becomes a hot path
|
||||
for _, node := range snapshot.nodesByID {
|
||||
if node.MachineKey == machineKey {
|
||||
return node.View(), true
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
if node, exists := userMap[userID]; exists {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
return types.NodeView{}, false
|
||||
}
|
||||
|
||||
// GetNodeByMachineKeyAnyUser returns the first node with the given machine key,
|
||||
// regardless of which user it belongs to. This is useful for scenarios like
|
||||
// transferring a node to a different user when re-authenticating with a
|
||||
// different user's auth key.
|
||||
// If multiple nodes exist with the same machine key (different users), the
|
||||
// first one found is returned (order is not guaranteed).
|
||||
func (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key_any_user"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key_any_user").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
// Return the first node found (order not guaranteed due to map iteration)
|
||||
for _, node := range userMap {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,6 +563,22 @@ func (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] {
|
||||
return views.SliceOf(s.data.Load().peersByNode[id])
|
||||
}
|
||||
|
||||
// RebuildPeerMaps rebuilds the peer relationship map using the current peersFunc.
|
||||
// This must be called after policy changes because peersFunc uses PolicyManager's
|
||||
// filters to determine which nodes can see each other. Without rebuilding, the
|
||||
// peer map would use stale filter data until the next node add/delete.
|
||||
func (s *NodeStore) RebuildPeerMaps() {
|
||||
result := make(chan struct{})
|
||||
|
||||
w := work{
|
||||
op: rebuildPeerMaps,
|
||||
rebuildResult: result,
|
||||
}
|
||||
|
||||
s.writeQueue <- w
|
||||
<-result
|
||||
}
|
||||
|
||||
// ListNodesByUser returns a slice of all nodes for a given user ID.
|
||||
func (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_by_user"))
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -249,7 +253,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add first node",
|
||||
action: func(store *NodeStore) {
|
||||
node := createTestNode(1, 1, "user1", "node1")
|
||||
store.PutNode(node)
|
||||
resultNode := store.PutNode(node)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, node.ID, resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 1)
|
||||
@@ -288,7 +294,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add second node same user",
|
||||
action: func(store *NodeStore) {
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
store.PutNode(node2)
|
||||
resultNode := store.PutNode(node2)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, types.NodeID(2), resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 2)
|
||||
@@ -308,7 +316,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add third node different user",
|
||||
action: func(store *NodeStore) {
|
||||
node3 := createTestNode(3, 2, "user2", "node3")
|
||||
store.PutNode(node3)
|
||||
resultNode := store.PutNode(node3)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, types.NodeID(3), resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 3)
|
||||
@@ -409,10 +419,14 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
{
|
||||
name: "update node hostname",
|
||||
action: func(store *NodeStore) {
|
||||
store.UpdateNode(1, func(n *types.Node) {
|
||||
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "updated-node1"
|
||||
n.GivenName = "updated-node1"
|
||||
})
|
||||
assert.True(t, ok, "UpdateNode should return true for existing node")
|
||||
assert.True(t, resultNode.Valid(), "Result node should be valid")
|
||||
assert.Equal(t, "updated-node1", resultNode.Hostname())
|
||||
assert.Equal(t, "updated-node1", resultNode.GivenName())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Equal(t, "updated-node1", snapshot.nodesByID[1].Hostname)
|
||||
@@ -436,10 +450,14 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add nodes with odd-even filtering",
|
||||
action: func(store *NodeStore) {
|
||||
// Add nodes in sequence
|
||||
store.PutNode(createTestNode(1, 1, "user1", "node1"))
|
||||
store.PutNode(createTestNode(2, 2, "user2", "node2"))
|
||||
store.PutNode(createTestNode(3, 3, "user3", "node3"))
|
||||
store.PutNode(createTestNode(4, 4, "user4", "node4"))
|
||||
n1 := store.PutNode(createTestNode(1, 1, "user1", "node1"))
|
||||
assert.True(t, n1.Valid())
|
||||
n2 := store.PutNode(createTestNode(2, 2, "user2", "node2"))
|
||||
assert.True(t, n2.Valid())
|
||||
n3 := store.PutNode(createTestNode(3, 3, "user3", "node3"))
|
||||
assert.True(t, n3.Valid())
|
||||
n4 := store.PutNode(createTestNode(4, 4, "user4", "node4"))
|
||||
assert.True(t, n4.Valid())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 4)
|
||||
@@ -478,6 +496,328 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test batch modifications return correct node state",
|
||||
setupFunc: func(t *testing.T) *NodeStore {
|
||||
node1 := createTestNode(1, 1, "user1", "node1")
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
initialNodes := types.Nodes{&node1, &node2}
|
||||
return NewNodeStore(initialNodes, allowAllPeersFunc)
|
||||
},
|
||||
steps: []testStep{
|
||||
{
|
||||
name: "verify initial state",
|
||||
action: func(store *NodeStore) {
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 2)
|
||||
assert.Equal(t, "node1", snapshot.nodesByID[1].Hostname)
|
||||
assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "concurrent updates should reflect all batch changes",
|
||||
action: func(store *NodeStore) {
|
||||
// Start multiple updates that will be batched together
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var resultNode1, resultNode2 types.NodeView
|
||||
var newNode3 types.NodeView
|
||||
var ok1, ok2 bool
|
||||
|
||||
// These should all be processed in the same batch
|
||||
go func() {
|
||||
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "batch-updated-node1"
|
||||
n.GivenName = "batch-given-1"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode2, ok2 = store.UpdateNode(2, func(n *types.Node) {
|
||||
n.Hostname = "batch-updated-node2"
|
||||
n.GivenName = "batch-given-2"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
node3 := createTestNode(3, 1, "user1", "node3")
|
||||
newNode3 = store.PutNode(node3)
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all operations to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
// Verify the returned nodes reflect the batch state
|
||||
assert.True(t, ok1, "UpdateNode should succeed for node 1")
|
||||
assert.True(t, ok2, "UpdateNode should succeed for node 2")
|
||||
assert.True(t, resultNode1.Valid())
|
||||
assert.True(t, resultNode2.Valid())
|
||||
assert.True(t, newNode3.Valid())
|
||||
|
||||
// Check that returned nodes have the updated values
|
||||
assert.Equal(t, "batch-updated-node1", resultNode1.Hostname())
|
||||
assert.Equal(t, "batch-given-1", resultNode1.GivenName())
|
||||
assert.Equal(t, "batch-updated-node2", resultNode2.Hostname())
|
||||
assert.Equal(t, "batch-given-2", resultNode2.GivenName())
|
||||
assert.Equal(t, "node3", newNode3.Hostname())
|
||||
|
||||
// Verify the snapshot also reflects all changes
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 3)
|
||||
assert.Equal(t, "batch-updated-node1", snapshot.nodesByID[1].Hostname)
|
||||
assert.Equal(t, "batch-updated-node2", snapshot.nodesByID[2].Hostname)
|
||||
assert.Equal(t, "node3", snapshot.nodesByID[3].Hostname)
|
||||
|
||||
// Verify peer relationships are updated correctly with new node
|
||||
assert.Len(t, snapshot.peersByNode[1], 2) // sees nodes 2 and 3
|
||||
assert.Len(t, snapshot.peersByNode[2], 2) // sees nodes 1 and 3
|
||||
assert.Len(t, snapshot.peersByNode[3], 2) // sees nodes 1 and 2
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update non-existent node returns invalid view",
|
||||
action: func(store *NodeStore) {
|
||||
resultNode, ok := store.UpdateNode(999, func(n *types.Node) {
|
||||
n.Hostname = "should-not-exist"
|
||||
})
|
||||
|
||||
assert.False(t, ok, "UpdateNode should return false for non-existent node")
|
||||
assert.False(t, resultNode.Valid(), "Result should be invalid NodeView")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple updates to same node in batch all see final state",
|
||||
action: func(store *NodeStore) {
|
||||
// This test verifies that when multiple updates to the same node
|
||||
// are batched together, each returned node reflects ALL changes
|
||||
// in the batch, not just the individual update's changes.
|
||||
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var resultNode1, resultNode2, resultNode3 types.NodeView
|
||||
var ok1, ok2, ok3 bool
|
||||
|
||||
// These updates all modify node 1 and should be batched together
|
||||
// The final state should have all three modifications applied
|
||||
go func() {
|
||||
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "multi-update-hostname"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode2, ok2 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.GivenName = "multi-update-givenname"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode3, ok3 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.ForcedTags = []string{"tag1", "tag2"}
|
||||
})
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all operations to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
// All updates should succeed
|
||||
assert.True(t, ok1, "First update should succeed")
|
||||
assert.True(t, ok2, "Second update should succeed")
|
||||
assert.True(t, ok3, "Third update should succeed")
|
||||
|
||||
// CRITICAL: Each returned node should reflect ALL changes from the batch
|
||||
// not just the change from its specific update call
|
||||
|
||||
// resultNode1 (from hostname update) should also have the givenname and tags changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode1.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode1.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode1.ForcedTags().AsSlice())
|
||||
|
||||
// resultNode2 (from givenname update) should also have the hostname and tags changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode2.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode2.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode2.ForcedTags().AsSlice())
|
||||
|
||||
// resultNode3 (from tags update) should also have the hostname and givenname changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode3.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode3.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode3.ForcedTags().AsSlice())
|
||||
|
||||
// Verify the snapshot also has all changes
|
||||
snapshot := store.data.Load()
|
||||
finalNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, "multi-update-hostname", finalNode.Hostname)
|
||||
assert.Equal(t, "multi-update-givenname", finalNode.GivenName)
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, finalNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test UpdateNode result is immutable for database save",
|
||||
setupFunc: func(t *testing.T) *NodeStore {
|
||||
node1 := createTestNode(1, 1, "user1", "node1")
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
initialNodes := types.Nodes{&node1, &node2}
|
||||
return NewNodeStore(initialNodes, allowAllPeersFunc)
|
||||
},
|
||||
steps: []testStep{
|
||||
{
|
||||
name: "verify returned node is complete and consistent",
|
||||
action: func(store *NodeStore) {
|
||||
// Update a node and verify the returned view is complete
|
||||
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "db-save-hostname"
|
||||
n.GivenName = "db-save-given"
|
||||
n.ForcedTags = []string{"db-tag1", "db-tag2"}
|
||||
})
|
||||
|
||||
assert.True(t, ok, "UpdateNode should succeed")
|
||||
assert.True(t, resultNode.Valid(), "Result should be valid")
|
||||
|
||||
// Verify the returned node has all expected values
|
||||
assert.Equal(t, "db-save-hostname", resultNode.Hostname())
|
||||
assert.Equal(t, "db-save-given", resultNode.GivenName())
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, resultNode.ForcedTags().AsSlice())
|
||||
|
||||
// Convert to struct as would be done for database save
|
||||
nodePtr := resultNode.AsStruct()
|
||||
assert.NotNil(t, nodePtr)
|
||||
assert.Equal(t, "db-save-hostname", nodePtr.Hostname)
|
||||
assert.Equal(t, "db-save-given", nodePtr.GivenName)
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, nodePtr.ForcedTags)
|
||||
|
||||
// Verify the snapshot also reflects the same state
|
||||
snapshot := store.data.Load()
|
||||
storedNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, "db-save-hostname", storedNode.Hostname)
|
||||
assert.Equal(t, "db-save-given", storedNode.GivenName)
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, storedNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "concurrent updates all return consistent final state for DB save",
|
||||
action: func(store *NodeStore) {
|
||||
// Multiple goroutines updating the same node
|
||||
// All should receive the final batch state suitable for DB save
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var result1, result2, result3 types.NodeView
|
||||
var ok1, ok2, ok3 bool
|
||||
|
||||
// Start concurrent updates
|
||||
go func() {
|
||||
result1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "concurrent-db-hostname"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
result2, ok2 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.GivenName = "concurrent-db-given"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
result3, ok3 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.ForcedTags = []string{"concurrent-tag"}
|
||||
})
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
assert.True(t, ok1 && ok2 && ok3, "All updates should succeed")
|
||||
|
||||
// All results should be valid and suitable for database save
|
||||
assert.True(t, result1.Valid())
|
||||
assert.True(t, result2.Valid())
|
||||
assert.True(t, result3.Valid())
|
||||
|
||||
// Convert each to struct as would be done for DB save
|
||||
nodePtr1 := result1.AsStruct()
|
||||
nodePtr2 := result2.AsStruct()
|
||||
nodePtr3 := result3.AsStruct()
|
||||
|
||||
// All should have the complete final state
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr1.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr1.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr1.ForcedTags)
|
||||
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr2.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr2.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr2.ForcedTags)
|
||||
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr3.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr3.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr3.ForcedTags)
|
||||
|
||||
// Verify consistency with stored state
|
||||
snapshot := store.data.Load()
|
||||
storedNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, nodePtr1.Hostname, storedNode.Hostname)
|
||||
assert.Equal(t, nodePtr1.GivenName, storedNode.GivenName)
|
||||
assert.Equal(t, nodePtr1.ForcedTags, storedNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "verify returned node preserves all fields for DB save",
|
||||
action: func(store *NodeStore) {
|
||||
// Get initial state
|
||||
snapshot := store.data.Load()
|
||||
originalNode := snapshot.nodesByID[2]
|
||||
originalIPv4 := originalNode.IPv4
|
||||
originalIPv6 := originalNode.IPv6
|
||||
originalCreatedAt := originalNode.CreatedAt
|
||||
originalUser := originalNode.User
|
||||
|
||||
// Update only hostname
|
||||
resultNode, ok := store.UpdateNode(2, func(n *types.Node) {
|
||||
n.Hostname = "preserve-test-hostname"
|
||||
})
|
||||
|
||||
assert.True(t, ok, "Update should succeed")
|
||||
|
||||
// Convert to struct for DB save
|
||||
nodeForDB := resultNode.AsStruct()
|
||||
|
||||
// Verify all fields are preserved
|
||||
assert.Equal(t, "preserve-test-hostname", nodeForDB.Hostname)
|
||||
assert.Equal(t, originalIPv4, nodeForDB.IPv4)
|
||||
assert.Equal(t, originalIPv6, nodeForDB.IPv6)
|
||||
assert.Equal(t, originalCreatedAt, nodeForDB.CreatedAt)
|
||||
assert.Equal(t, originalUser.Name, nodeForDB.User.Name)
|
||||
assert.Equal(t, types.NodeID(2), nodeForDB.ID)
|
||||
|
||||
// These fields should be suitable for direct database save
|
||||
assert.NotNil(t, nodeForDB.IPv4)
|
||||
assert.NotNil(t, nodeForDB.IPv6)
|
||||
assert.False(t, nodeForDB.CreatedAt.IsZero())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -499,3 +839,302 @@ type testStep struct {
|
||||
name string
|
||||
action func(store *NodeStore)
|
||||
}
|
||||
|
||||
// --- Additional NodeStore concurrency, batching, race, resource, timeout, and allocation tests ---
|
||||
|
||||
// Helper for concurrent test nodes
|
||||
func createConcurrentTestNode(id types.NodeID, hostname string) types.Node {
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
return types.Node{
|
||||
ID: id,
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "concurrent-test-user",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// --- Concurrency: concurrent PutNode operations ---
|
||||
func TestNodeStoreConcurrentPutNode(t *testing.T) {
|
||||
const concurrentOps = 20
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, concurrentOps)
|
||||
for i := 0; i < concurrentOps; i++ {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "concurrent-node")
|
||||
resultNode := store.PutNode(node)
|
||||
results <- resultNode.Valid()
|
||||
}(i + 1)
|
||||
}
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
successCount := 0
|
||||
for success := range results {
|
||||
if success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, concurrentOps, successCount, "All concurrent PutNode operations should succeed")
|
||||
}
|
||||
|
||||
// --- Batching: concurrent ops fit in one batch ---
|
||||
func TestNodeStoreBatchingEfficiency(t *testing.T) {
|
||||
const batchSize = 10
|
||||
const ops = 15 // more than batchSize
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, ops)
|
||||
for i := 0; i < ops; i++ {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "batch-node")
|
||||
resultNode := store.PutNode(node)
|
||||
results <- resultNode.Valid()
|
||||
}(i + 1)
|
||||
}
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
successCount := 0
|
||||
for success := range results {
|
||||
if success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, ops, successCount, "All batch PutNode operations should succeed")
|
||||
}
|
||||
|
||||
// --- Race conditions: many goroutines on same node ---
|
||||
func TestNodeStoreRaceConditions(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
node := createConcurrentTestNode(nodeID, "race-node")
|
||||
resultNode := store.PutNode(node)
|
||||
require.True(t, resultNode.Valid())
|
||||
|
||||
const numGoroutines = 30
|
||||
const opsPerGoroutine = 10
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*opsPerGoroutine)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(gid int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < opsPerGoroutine; j++ {
|
||||
switch j % 3 {
|
||||
case 0:
|
||||
resultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "race-updated"
|
||||
})
|
||||
if !resultNode.Valid() {
|
||||
errors <- fmt.Errorf("UpdateNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
case 1:
|
||||
retrieved, found := store.GetNode(nodeID)
|
||||
if !found || !retrieved.Valid() {
|
||||
errors <- fmt.Errorf("GetNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
case 2:
|
||||
newNode := createConcurrentTestNode(nodeID, "race-put")
|
||||
resultNode := store.PutNode(newNode)
|
||||
if !resultNode.Valid() {
|
||||
errors <- fmt.Errorf("PutNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
errorCount := 0
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
if errorCount > 0 {
|
||||
t.Fatalf("Race condition test failed with %d errors", errorCount)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Resource cleanup: goroutine leak detection ---
|
||||
func TestNodeStoreResourceCleanup(t *testing.T) {
|
||||
// initialGoroutines := runtime.NumGoroutine()
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
afterStartGoroutines := runtime.NumGoroutine()
|
||||
|
||||
const ops = 100
|
||||
for i := 0; i < ops; i++ {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "cleanup-node")
|
||||
resultNode := store.PutNode(node)
|
||||
assert.True(t, resultNode.Valid())
|
||||
store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "cleanup-updated"
|
||||
})
|
||||
retrieved, found := store.GetNode(nodeID)
|
||||
assert.True(t, found && retrieved.Valid())
|
||||
if i%10 == 9 {
|
||||
store.DeleteNode(nodeID)
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
if finalGoroutines > afterStartGoroutines+2 {
|
||||
t.Errorf("Potential goroutine leak: started with %d, ended with %d", afterStartGoroutines, finalGoroutines)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Timeout/deadlock: operations complete within reasonable time ---
|
||||
func TestNodeStoreOperationTimeout(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
const ops = 30
|
||||
var wg sync.WaitGroup
|
||||
putResults := make([]error, ops)
|
||||
updateResults := make([]error, ops)
|
||||
|
||||
// Launch all PutNode operations concurrently
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
wg.Add(1)
|
||||
go func(idx int, id types.NodeID) {
|
||||
defer wg.Done()
|
||||
startPut := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) starting\n", startPut.Format("15:04:05.000"), id)
|
||||
node := createConcurrentTestNode(id, "timeout-node")
|
||||
resultNode := store.PutNode(node)
|
||||
endPut := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) finished, valid=%v, duration=%v\n", endPut.Format("15:04:05.000"), id, resultNode.Valid(), endPut.Sub(startPut))
|
||||
if !resultNode.Valid() {
|
||||
putResults[idx-1] = fmt.Errorf("PutNode failed for node %d", id)
|
||||
}
|
||||
}(i, nodeID)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Launch all UpdateNode operations concurrently
|
||||
wg = sync.WaitGroup{}
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
wg.Add(1)
|
||||
go func(idx int, id types.NodeID) {
|
||||
defer wg.Done()
|
||||
startUpdate := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) starting\n", startUpdate.Format("15:04:05.000"), id)
|
||||
resultNode, ok := store.UpdateNode(id, func(n *types.Node) {
|
||||
n.Hostname = "timeout-updated"
|
||||
})
|
||||
endUpdate := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) finished, valid=%v, ok=%v, duration=%v\n", endUpdate.Format("15:04:05.000"), id, resultNode.Valid(), ok, endUpdate.Sub(startUpdate))
|
||||
if !ok || !resultNode.Valid() {
|
||||
updateResults[idx-1] = fmt.Errorf("UpdateNode failed for node %d", id)
|
||||
}
|
||||
}(i, nodeID)
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
errorCount := 0
|
||||
for _, err := range putResults {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
for _, err := range updateResults {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
if errorCount == 0 {
|
||||
t.Log("All concurrent operations completed successfully within timeout")
|
||||
} else {
|
||||
t.Fatalf("Some concurrent operations failed: %d errors", errorCount)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
fmt.Println("[TestNodeStoreOperationTimeout] Timeout reached, test failed")
|
||||
t.Fatal("Operations timed out - potential deadlock or resource issue")
|
||||
}
|
||||
}
|
||||
|
||||
// --- Edge case: update non-existent node ---
|
||||
func TestNodeStoreUpdateNonExistentNode(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
nonExistentID := types.NodeID(999 + i)
|
||||
updateCallCount := 0
|
||||
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) starting\n", nonExistentID)
|
||||
resultNode, ok := store.UpdateNode(nonExistentID, func(n *types.Node) {
|
||||
updateCallCount++
|
||||
n.Hostname = "should-never-be-called"
|
||||
})
|
||||
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) finished, valid=%v, ok=%v, updateCallCount=%d\n", nonExistentID, resultNode.Valid(), ok, updateCallCount)
|
||||
assert.False(t, ok, "UpdateNode should return false for non-existent node")
|
||||
assert.False(t, resultNode.Valid(), "UpdateNode should return invalid node for non-existent node")
|
||||
assert.Equal(t, 0, updateCallCount, "UpdateFn should not be called for non-existent node")
|
||||
store.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// --- Allocation benchmark ---
|
||||
func BenchmarkNodeStoreAllocations(b *testing.B) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "bench-node")
|
||||
store.PutNode(node)
|
||||
store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "bench-updated"
|
||||
})
|
||||
store.GetNode(nodeID)
|
||||
if i%10 == 9 {
|
||||
store.DeleteNode(nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeStoreAllocationStats(t *testing.T) {
|
||||
res := testing.Benchmark(BenchmarkNodeStoreAllocations)
|
||||
allocs := res.AllocsPerOp()
|
||||
t.Logf("NodeStore allocations per op: %.2f", float64(allocs))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ package change
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
)
|
||||
@@ -68,6 +69,9 @@ type ChangeSet struct {
|
||||
|
||||
// IsSubnetRouter indicates whether the node is a subnet router.
|
||||
IsSubnetRouter bool
|
||||
|
||||
// NodeExpiry is set if the change is NodeKeyExpiry.
|
||||
NodeExpiry *time.Time
|
||||
}
|
||||
|
||||
func (c *ChangeSet) Validate() error {
|
||||
@@ -126,6 +130,11 @@ func RemoveUpdatesForSelf(id types.NodeID, cs []ChangeSet) (ret []ChangeSet) {
|
||||
return ret
|
||||
}
|
||||
|
||||
// IsSelfUpdate reports whether this ChangeSet represents an update to the given node itself.
|
||||
func (c ChangeSet) IsSelfUpdate(nodeID types.NodeID) bool {
|
||||
return c.NodeID == nodeID
|
||||
}
|
||||
|
||||
func (c ChangeSet) AlsoSelf() bool {
|
||||
// If NodeID is 0, it means this ChangeSet is not related to a specific node,
|
||||
// so we consider it as a change that should be sent to all nodes.
|
||||
@@ -179,10 +188,11 @@ func NodeOffline(id types.NodeID) ChangeSet {
|
||||
}
|
||||
}
|
||||
|
||||
func KeyExpiry(id types.NodeID) ChangeSet {
|
||||
func KeyExpiry(id types.NodeID, expiry time.Time) ChangeSet {
|
||||
return ChangeSet{
|
||||
Change: NodeKeyExpiry,
|
||||
NodeID: id,
|
||||
Change: NodeKeyExpiry,
|
||||
NodeID: id,
|
||||
NodeExpiry: &expiry,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
@@ -186,6 +187,28 @@ func (r RegistrationID) String() string {
|
||||
type RegisterNode struct {
|
||||
Node Node
|
||||
Registered chan *Node
|
||||
closed *atomic.Bool
|
||||
}
|
||||
|
||||
func NewRegisterNode(node Node) RegisterNode {
|
||||
return RegisterNode{
|
||||
Node: node,
|
||||
Registered: make(chan *Node),
|
||||
closed: &atomic.Bool{},
|
||||
}
|
||||
}
|
||||
|
||||
func (rn *RegisterNode) SendAndClose(node *Node) {
|
||||
if rn.closed.Swap(true) {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case rn.Registered <- node:
|
||||
default:
|
||||
}
|
||||
|
||||
close(rn.Registered)
|
||||
}
|
||||
|
||||
// DefaultBatcherWorkers returns the default number of batcher workers.
|
||||
|
||||
@@ -235,6 +235,8 @@ type Tuning struct {
|
||||
BatchChangeDelay time.Duration
|
||||
NodeMapSessionBufferedChanSize int
|
||||
BatcherWorkers int
|
||||
RegisterCacheCleanup time.Duration
|
||||
RegisterCacheExpiration time.Duration
|
||||
}
|
||||
|
||||
func validatePKCEMethod(method string) error {
|
||||
@@ -338,7 +340,7 @@ func LoadConfig(path string, isFile bool) error {
|
||||
viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential))
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Warn().Msg("No config file found, using defaults")
|
||||
return nil
|
||||
}
|
||||
@@ -1002,6 +1004,8 @@ func LoadServerConfig() (*Config, error) {
|
||||
}
|
||||
return DefaultBatcherWorkers()
|
||||
}(),
|
||||
RegisterCacheCleanup: viper.GetDuration("tuning.register_cache_cleanup"),
|
||||
RegisterCacheExpiration: viper.GetDuration("tuning.register_cache_expiration"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -27,6 +28,8 @@ var (
|
||||
ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars")
|
||||
ErrNodeHasNoGivenName = errors.New("node has no given name")
|
||||
ErrNodeUserHasNoName = errors.New("node user has no name")
|
||||
|
||||
invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -144,7 +147,10 @@ func (ns Nodes) ViewSlice() views.Slice[NodeView] {
|
||||
|
||||
// GivenNameHasBeenChanged returns whether the `givenName` can be automatically changed based on the `Hostname` of the node.
|
||||
func (node *Node) GivenNameHasBeenChanged() bool {
|
||||
return node.GivenName == util.ConvertWithFQDNRules(node.Hostname)
|
||||
// Strip invalid DNS characters for givenName comparison
|
||||
normalised := strings.ToLower(node.Hostname)
|
||||
normalised = invalidDNSRegex.ReplaceAllString(normalised, "")
|
||||
return node.GivenName == normalised
|
||||
}
|
||||
|
||||
// IsExpired returns whether the node registration has expired.
|
||||
@@ -531,20 +537,34 @@ func (node *Node) ApplyHostnameFromHostInfo(hostInfo *tailcfg.Hostinfo) {
|
||||
return
|
||||
}
|
||||
|
||||
if node.Hostname != hostInfo.Hostname {
|
||||
newHostname := strings.ToLower(hostInfo.Hostname)
|
||||
if err := util.ValidateHostname(newHostname); err != nil {
|
||||
log.Warn().
|
||||
Str("node.id", node.ID.String()).
|
||||
Str("current_hostname", node.Hostname).
|
||||
Str("rejected_hostname", hostInfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Rejecting invalid hostname update from hostinfo")
|
||||
return
|
||||
}
|
||||
|
||||
if node.Hostname != newHostname {
|
||||
log.Trace().
|
||||
Str("node.id", node.ID.String()).
|
||||
Str("old_hostname", node.Hostname).
|
||||
Str("new_hostname", hostInfo.Hostname).
|
||||
Str("new_hostname", newHostname).
|
||||
Str("old_given_name", node.GivenName).
|
||||
Bool("given_name_changed", node.GivenNameHasBeenChanged()).
|
||||
Msg("Updating hostname from hostinfo")
|
||||
|
||||
if node.GivenNameHasBeenChanged() {
|
||||
node.GivenName = util.ConvertWithFQDNRules(hostInfo.Hostname)
|
||||
// Strip invalid DNS characters for givenName display
|
||||
givenName := strings.ToLower(newHostname)
|
||||
givenName = invalidDNSRegex.ReplaceAllString(givenName, "")
|
||||
node.GivenName = givenName
|
||||
}
|
||||
|
||||
node.Hostname = hostInfo.Hostname
|
||||
node.Hostname = newHostname
|
||||
|
||||
log.Trace().
|
||||
Str("node.id", node.ID.String()).
|
||||
@@ -638,6 +658,11 @@ func (node Node) DebugString() string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (v NodeView) UserView() UserView {
|
||||
u := v.User()
|
||||
return u.View()
|
||||
}
|
||||
|
||||
func (v NodeView) IPs() []netip.Addr {
|
||||
if !v.Valid() {
|
||||
return nil
|
||||
@@ -830,3 +855,22 @@ func (v NodeView) IPsAsString() []string {
|
||||
}
|
||||
return v.ж.IPsAsString()
|
||||
}
|
||||
|
||||
// HasNetworkChanges checks if the node has network-related changes.
|
||||
// Returns true if IPs, announced routes, or approved routes changed.
|
||||
// This is primarily used for policy cache invalidation.
|
||||
func (v NodeView) HasNetworkChanges(other NodeView) bool {
|
||||
if !slices.Equal(v.IPs(), other.IPs()) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !slices.Equal(v.AnnouncedRoutes(), other.AnnouncedRoutes()) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !slices.Equal(v.SubnetRoutes(), other.SubnetRoutes()) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -369,7 +369,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "manual-test.local",
|
||||
Hostname: "NewHostName.Local",
|
||||
Hostname: "newhostname.local",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -383,7 +383,245 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "newhostname.local",
|
||||
Hostname: "NewHostName.Local",
|
||||
Hostname: "newhostname.local",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-hostname-with-emoji-rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "hostname-with-💩",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname", // Should reject and keep old hostname
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-hostname-with-unicode-rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname", // Should keep old hostname
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-hostname-with-special-chars-rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "node-with-special!@#$%",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname", // Should reject and keep old hostname
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-hostname-too-short-rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "a",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname", // Should keep old hostname
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-hostname-uppercase-accepted-lowercased",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "ValidHostName",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "validhostname", // GivenName follows hostname when it changes
|
||||
Hostname: "validhostname", // Uppercase is lowercased, not rejected
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uppercase_to_lowercase_accepted",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "User2-Host",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "user2-host",
|
||||
Hostname: "user2-host",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "at_sign_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "Test@Host",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "server-北京-01",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "chinese_only_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "emoji_with_text_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "laptop-🚀",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "测试💻机器",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only_emojis_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "🎉🎊",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only_at_signs_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "@@@",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "starts_with_dash_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "-test",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ends_with_dash_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "test-",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "too_long_hostname_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: strings.Repeat("t", 65),
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "underscore_rejected",
|
||||
nodeBefore: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "test_node",
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
Hostname: "valid-hostname",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -555,3 +793,179 @@ func TestNodeRegisterMethodToV1Enum(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestHasNetworkChanges tests the NodeView method for detecting
|
||||
// when a node's network properties have changed.
|
||||
func TestHasNetworkChanges(t *testing.T) {
|
||||
mustIPPtr := func(s string) *netip.Addr {
|
||||
ip := netip.MustParseAddr(s)
|
||||
return &ip
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
old *Node
|
||||
new *Node
|
||||
changed bool
|
||||
}{
|
||||
{
|
||||
name: "no changes",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
{
|
||||
name: "IPv4 changed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.2"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6 changed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
IPv6: mustIPPtr("fd7a:115c:a1e0::2"),
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "RoutableIPs added",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "RoutableIPs removed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "RoutableIPs changed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "SubnetRoutes added",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "SubnetRoutes removed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "SubnetRoutes changed",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
|
||||
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "irrelevant property changed (Hostname)",
|
||||
old: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostname: "old-name",
|
||||
},
|
||||
new: &Node{
|
||||
ID: 1,
|
||||
IPv4: mustIPPtr("100.64.0.1"),
|
||||
Hostname: "new-name",
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.new.View().HasNetworkChanges(tt.old.View())
|
||||
if got != tt.changed {
|
||||
t.Errorf("HasNetworkChanges() = %v, want %v", got, tt.changed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,27 +104,31 @@ func (u *User) profilePicURL() string {
|
||||
return u.ProfilePicURL
|
||||
}
|
||||
|
||||
func (u *User) TailscaleUser() *tailcfg.User {
|
||||
user := tailcfg.User{
|
||||
func (u *User) TailscaleUser() tailcfg.User {
|
||||
return tailcfg.User{
|
||||
ID: tailcfg.UserID(u.ID),
|
||||
DisplayName: u.Display(),
|
||||
ProfilePicURL: u.profilePicURL(),
|
||||
Created: u.CreatedAt,
|
||||
}
|
||||
|
||||
return &user
|
||||
}
|
||||
|
||||
func (u *User) TailscaleLogin() *tailcfg.Login {
|
||||
login := tailcfg.Login{
|
||||
func (u UserView) TailscaleUser() tailcfg.User {
|
||||
return u.ж.TailscaleUser()
|
||||
}
|
||||
|
||||
func (u *User) TailscaleLogin() tailcfg.Login {
|
||||
return tailcfg.Login{
|
||||
ID: tailcfg.LoginID(u.ID),
|
||||
Provider: u.Provider,
|
||||
LoginName: u.Username(),
|
||||
DisplayName: u.Display(),
|
||||
ProfilePicURL: u.profilePicURL(),
|
||||
}
|
||||
}
|
||||
|
||||
return &login
|
||||
func (u UserView) TailscaleLogin() tailcfg.Login {
|
||||
return u.ж.TailscaleLogin()
|
||||
}
|
||||
|
||||
func (u *User) TailscaleUserProfile() tailcfg.UserProfile {
|
||||
@@ -136,6 +140,10 @@ func (u *User) TailscaleUserProfile() tailcfg.UserProfile {
|
||||
}
|
||||
}
|
||||
|
||||
func (u UserView) TailscaleUserProfile() tailcfg.UserProfile {
|
||||
return u.ж.TailscaleUserProfile()
|
||||
}
|
||||
|
||||
func (u *User) Proto() *v1.User {
|
||||
return &v1.User{
|
||||
Id: uint64(u.ID),
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
invalidCharsInUserRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
)
|
||||
|
||||
var ErrInvalidUserName = errors.New("invalid user name")
|
||||
var ErrInvalidHostName = errors.New("invalid hostname")
|
||||
|
||||
// ValidateUsername checks if a username is valid.
|
||||
// It must be at least 2 characters long, start with a letter, and contain
|
||||
@@ -67,42 +67,86 @@ func ValidateUsername(username string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckForFQDNRules(name string) error {
|
||||
// Ensure the username meets the minimum length requirement
|
||||
// ValidateHostname checks if a hostname meets DNS requirements.
|
||||
// This function does NOT modify the input - it only validates.
|
||||
// The hostname must already be lowercase and contain only valid characters.
|
||||
func ValidateHostname(name string) error {
|
||||
if len(name) < 2 {
|
||||
return errors.New("name must be at least 2 characters long")
|
||||
return fmt.Errorf(
|
||||
"hostname %q is too short, must be at least 2 characters",
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if len(name) > LabelHostnameLength {
|
||||
return fmt.Errorf(
|
||||
"DNS segment must not be over 63 chars. %v doesn't comply with this rule: %w",
|
||||
"hostname %q is too long, must not exceed 63 characters",
|
||||
name,
|
||||
ErrInvalidUserName,
|
||||
)
|
||||
}
|
||||
if strings.ToLower(name) != name {
|
||||
return fmt.Errorf(
|
||||
"DNS segment should be lowercase. %v doesn't comply with this rule: %w",
|
||||
"hostname %q must be lowercase (try %q)",
|
||||
name,
|
||||
strings.ToLower(name),
|
||||
)
|
||||
}
|
||||
if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") {
|
||||
return fmt.Errorf(
|
||||
"hostname %q cannot start or end with a hyphen",
|
||||
name,
|
||||
)
|
||||
}
|
||||
if strings.HasPrefix(name, ".") || strings.HasSuffix(name, ".") {
|
||||
return fmt.Errorf(
|
||||
"hostname %q cannot start or end with a dot",
|
||||
name,
|
||||
ErrInvalidUserName,
|
||||
)
|
||||
}
|
||||
if invalidDNSRegex.MatchString(name) {
|
||||
return fmt.Errorf(
|
||||
"DNS segment should only be composed of lowercase ASCII letters numbers, hyphen and dots. %v doesn't comply with these rules: %w",
|
||||
"hostname %q contains invalid characters, only lowercase letters, numbers, hyphens and dots are allowed",
|
||||
name,
|
||||
ErrInvalidUserName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertWithFQDNRules(name string) string {
|
||||
// NormaliseHostname transforms a string into a valid DNS hostname.
|
||||
// Returns error if the transformation results in an invalid hostname.
|
||||
//
|
||||
// Transformations applied:
|
||||
// - Converts to lowercase
|
||||
// - Removes invalid DNS characters
|
||||
// - Truncates to 63 characters if needed
|
||||
//
|
||||
// After transformation, validates the result.
|
||||
func NormaliseHostname(name string) (string, error) {
|
||||
// Early return if already valid
|
||||
if err := ValidateHostname(name); err == nil {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Transform to lowercase
|
||||
name = strings.ToLower(name)
|
||||
|
||||
// Strip invalid DNS characters
|
||||
name = invalidDNSRegex.ReplaceAllString(name, "")
|
||||
|
||||
return name
|
||||
// Truncate to DNS label limit
|
||||
if len(name) > LabelHostnameLength {
|
||||
name = name[:LabelHostnameLength]
|
||||
}
|
||||
|
||||
// Validate result after transformation
|
||||
if err := ValidateHostname(name); err != nil {
|
||||
return "", fmt.Errorf(
|
||||
"hostname invalid after normalisation: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
|
||||
@@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -9,94 +10,173 @@ import (
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
func TestCheckForFQDNRules(t *testing.T) {
|
||||
func TestNormaliseHostname(t *testing.T) {
|
||||
type args struct {
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid: user",
|
||||
name: "valid: lowercase user",
|
||||
args: args{name: "valid-user"},
|
||||
want: "valid-user",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: capitalized user",
|
||||
name: "normalise: capitalized user",
|
||||
args: args{name: "Invalid-CapItaLIzed-user"},
|
||||
wantErr: true,
|
||||
want: "invalid-capitalized-user",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: email as user",
|
||||
name: "normalise: email as user",
|
||||
args: args{name: "foo.bar@example.com"},
|
||||
wantErr: true,
|
||||
want: "foo.barexample.com",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: chars in user name",
|
||||
name: "normalise: chars in user name",
|
||||
args: args{name: "super-user+name"},
|
||||
wantErr: true,
|
||||
want: "super-username",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: too long name for user",
|
||||
name: "invalid: too long name truncated leaves trailing hyphen",
|
||||
args: args{
|
||||
name: "super-long-useruseruser-name-that-should-be-a-little-more-than-63-chars",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid: emoji stripped leaves trailing hyphen",
|
||||
args: args{name: "hostname-with-💩"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "normalise: multiple emojis stripped",
|
||||
args: args{name: "node-🎉-🚀-test"},
|
||||
want: "node---test",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid: only emoji becomes empty",
|
||||
args: args{name: "💩"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid: emoji at start leaves leading hyphen",
|
||||
args: args{name: "🚀-rocket-node"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid: emoji at end leaves trailing hyphen",
|
||||
args: args{name: "node-test-🎉"},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := CheckForFQDNRules(tt.args.name); (err != nil) != tt.wantErr {
|
||||
t.Errorf("CheckForFQDNRules() error = %v, wantErr %v", err, tt.wantErr)
|
||||
got, err := NormaliseHostname(tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NormaliseHostname() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr && got != tt.want {
|
||||
t.Errorf("NormaliseHostname() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertWithFQDNRules(t *testing.T) {
|
||||
func TestValidateHostname(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hostname string
|
||||
dnsHostName string
|
||||
name string
|
||||
hostname string
|
||||
wantErr bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "User1.test",
|
||||
hostname: "User1.Test",
|
||||
dnsHostName: "user1.test",
|
||||
name: "valid lowercase",
|
||||
hostname: "valid-hostname",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "User'1$2.test",
|
||||
hostname: "User'1$2.Test",
|
||||
dnsHostName: "user12.test",
|
||||
name: "uppercase rejected",
|
||||
hostname: "MyHostname",
|
||||
wantErr: true,
|
||||
errorContains: "must be lowercase",
|
||||
},
|
||||
{
|
||||
name: "User-^_12.local.test",
|
||||
hostname: "User-^_12.local.Test",
|
||||
dnsHostName: "user-12.local.test",
|
||||
name: "too short",
|
||||
hostname: "a",
|
||||
wantErr: true,
|
||||
errorContains: "too short",
|
||||
},
|
||||
{
|
||||
name: "User-MacBook-Pro",
|
||||
hostname: "User-MacBook-Pro",
|
||||
dnsHostName: "user-macbook-pro",
|
||||
name: "too long",
|
||||
hostname: "a" + strings.Repeat("b", 63),
|
||||
wantErr: true,
|
||||
errorContains: "too long",
|
||||
},
|
||||
{
|
||||
name: "User-Linux-Ubuntu/Fedora",
|
||||
hostname: "User-Linux-Ubuntu/Fedora",
|
||||
dnsHostName: "user-linux-ubuntufedora",
|
||||
name: "emoji rejected",
|
||||
hostname: "hostname-💩",
|
||||
wantErr: true,
|
||||
errorContains: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "User-[Space]123",
|
||||
hostname: "User-[ ]123",
|
||||
dnsHostName: "user-123",
|
||||
name: "starts with hyphen",
|
||||
hostname: "-hostname",
|
||||
wantErr: true,
|
||||
errorContains: "cannot start or end with a hyphen",
|
||||
},
|
||||
{
|
||||
name: "ends with hyphen",
|
||||
hostname: "hostname-",
|
||||
wantErr: true,
|
||||
errorContains: "cannot start or end with a hyphen",
|
||||
},
|
||||
{
|
||||
name: "starts with dot",
|
||||
hostname: ".hostname",
|
||||
wantErr: true,
|
||||
errorContains: "cannot start or end with a dot",
|
||||
},
|
||||
{
|
||||
name: "ends with dot",
|
||||
hostname: "hostname.",
|
||||
wantErr: true,
|
||||
errorContains: "cannot start or end with a dot",
|
||||
},
|
||||
{
|
||||
name: "special characters",
|
||||
hostname: "host!@#$name",
|
||||
wantErr: true,
|
||||
errorContains: "invalid characters",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fqdnHostName := ConvertWithFQDNRules(tt.hostname)
|
||||
assert.Equal(t, tt.dnsHostName, fqdnHostName)
|
||||
err := ValidateHostname(tt.hostname)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateHostname() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr && tt.errorContains != "" {
|
||||
if err == nil || !strings.Contains(err.Error(), tt.errorContains) {
|
||||
t.Errorf("ValidateHostname() error = %v, should contain %q", err, tt.errorContains)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,6 +66,11 @@ func MustGenerateRandomStringDNSSafe(size int) string {
|
||||
return hash
|
||||
}
|
||||
|
||||
func InvalidString() string {
|
||||
hash, _ := GenerateRandomStringDNSSafe(8)
|
||||
return "invalid-" + hash
|
||||
}
|
||||
|
||||
func TailNodesToString(nodes []*tailcfg.Node) string {
|
||||
temp := make([]string, len(nodes))
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/cmpver"
|
||||
)
|
||||
|
||||
@@ -258,3 +260,37 @@ func IsCI() bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SafeHostname extracts a hostname from Hostinfo, providing sensible defaults
|
||||
// if Hostinfo is nil or Hostname is empty. This prevents nil pointer dereferences
|
||||
// and ensures nodes always have a valid hostname.
|
||||
// The hostname is truncated to 63 characters to comply with DNS label length limits (RFC 1123).
|
||||
// EnsureHostname guarantees a valid hostname for node registration.
|
||||
// This function never fails - it always returns a valid hostname.
|
||||
//
|
||||
// Strategy:
|
||||
// 1. If hostinfo is nil/empty → generate default from keys
|
||||
// 2. If hostname is provided → normalise it
|
||||
// 3. If normalisation fails → generate invalid-<random> replacement
|
||||
//
|
||||
// Returns the guaranteed-valid hostname to use.
|
||||
func EnsureHostname(hostinfo *tailcfg.Hostinfo, machineKey, nodeKey string) string {
|
||||
if hostinfo == nil || hostinfo.Hostname == "" {
|
||||
key := cmp.Or(machineKey, nodeKey)
|
||||
if key == "" {
|
||||
return "unknown-node"
|
||||
}
|
||||
keyPrefix := key
|
||||
if len(key) > 8 {
|
||||
keyPrefix = key[:8]
|
||||
}
|
||||
return fmt.Sprintf("node-%s", keyPrefix)
|
||||
}
|
||||
|
||||
lowercased := strings.ToLower(hostinfo.Hostname)
|
||||
if err := ValidateHostname(lowercased); err == nil {
|
||||
return lowercased
|
||||
}
|
||||
|
||||
return InvalidString()
|
||||
}
|
||||
|
||||
@@ -3,10 +3,12 @@ package util
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestTailscaleVersionNewerOrEqual(t *testing.T) {
|
||||
@@ -793,3 +795,496 @@ over a maximum of 30 hops:
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureHostname(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
hostinfo *tailcfg.Hostinfo
|
||||
machineKey string
|
||||
nodeKey string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "valid_hostname",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "test-node",
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_with_machine_key",
|
||||
hostinfo: nil,
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "node-mkey1234",
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_with_node_key_only",
|
||||
hostinfo: nil,
|
||||
machineKey: "",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "node-nkey1234",
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_no_keys",
|
||||
hostinfo: nil,
|
||||
machineKey: "",
|
||||
nodeKey: "",
|
||||
want: "unknown-node",
|
||||
},
|
||||
{
|
||||
name: "empty_hostname_with_machine_key",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "node-mkey1234",
|
||||
},
|
||||
{
|
||||
name: "empty_hostname_with_node_key_only",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "node-nkey1234",
|
||||
},
|
||||
{
|
||||
name: "empty_hostname_no_keys",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "",
|
||||
nodeKey: "",
|
||||
want: "unknown-node",
|
||||
},
|
||||
{
|
||||
name: "hostname_exactly_63_chars",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "123456789012345678901234567890123456789012345678901234567890123",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "123456789012345678901234567890123456789012345678901234567890123",
|
||||
},
|
||||
{
|
||||
name: "hostname_64_chars_truncated",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "1234567890123456789012345678901234567890123456789012345678901234",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "hostname_very_long_truncated",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters-and-should-be-truncated",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "hostname_with_special_chars",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "node-with-special!@#$%",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "hostname_with_unicode",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "node-ñoño-测试",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "short_machine_key",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "short",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "node-short",
|
||||
},
|
||||
{
|
||||
name: "short_node_key",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "",
|
||||
nodeKey: "short",
|
||||
want: "node-short",
|
||||
},
|
||||
{
|
||||
name: "hostname_with_emoji_replaced",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "hostname-with-💩",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "hostname_only_emoji_replaced",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "🚀",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "hostname_with_multiple_emojis_replaced",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "node-🎉-🚀-test",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "uppercase_to_lowercase",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "User2-Host",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "user2-host",
|
||||
},
|
||||
{
|
||||
name: "underscore_removed",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test_node",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "at_sign_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "Test@Host",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "server-北京-01",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "chinese_only_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "emoji_with_text_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "laptop-🚀",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "测试💻机器",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "only_emojis_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "🎉🎊",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "only_at_signs_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "@@@",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "starts_with_dash_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "-test",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "ends_with_dash_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "very_long_hostname_truncated",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: strings.Repeat("t", 70),
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
want: "invalid-",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey)
|
||||
// For invalid hostnames, we just check the prefix since the random part varies
|
||||
if strings.HasPrefix(tt.want, "invalid-") {
|
||||
if !strings.HasPrefix(got, "invalid-") {
|
||||
t.Errorf("EnsureHostname() = %v, want prefix %v", got, tt.want)
|
||||
}
|
||||
} else if got != tt.want {
|
||||
t.Errorf("EnsureHostname() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureHostnameWithHostinfo(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
hostinfo *tailcfg.Hostinfo
|
||||
machineKey string
|
||||
nodeKey string
|
||||
wantHostname string
|
||||
checkHostinfo func(*testing.T, *tailcfg.Hostinfo)
|
||||
}{
|
||||
{
|
||||
name: "valid_hostinfo_unchanged",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "test-node",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if hi.Hostname != "test-node" {
|
||||
t.Errorf("hostname = %v, want test-node", hi.Hostname)
|
||||
}
|
||||
if hi.OS != "linux" {
|
||||
t.Errorf("OS = %v, want linux", hi.OS)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_creates_default",
|
||||
hostinfo: nil,
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "node-mkey1234",
|
||||
},
|
||||
{
|
||||
name: "empty_hostname_updated",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
OS: "darwin",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "node-mkey1234",
|
||||
},
|
||||
{
|
||||
name: "long_hostname_rejected",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "invalid-",
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_node_key_only",
|
||||
hostinfo: nil,
|
||||
machineKey: "",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "node-nkey1234",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if hi.Hostname != "node-nkey1234" {
|
||||
t.Errorf("hostname = %v, want node-nkey1234", hi.Hostname)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil_hostinfo_no_keys",
|
||||
hostinfo: nil,
|
||||
machineKey: "",
|
||||
nodeKey: "",
|
||||
wantHostname: "unknown-node",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if hi.Hostname != "unknown-node" {
|
||||
t.Errorf("hostname = %v, want unknown-node", hi.Hostname)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty_hostname_no_keys",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "",
|
||||
},
|
||||
machineKey: "",
|
||||
nodeKey: "",
|
||||
wantHostname: "unknown-node",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if hi.Hostname != "unknown-node" {
|
||||
t.Errorf("hostname = %v, want unknown-node", hi.Hostname)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preserves_other_fields",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "test",
|
||||
OS: "windows",
|
||||
OSVersion: "10.0.19044",
|
||||
DeviceModel: "test-device",
|
||||
BackendLogID: "log123",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "test",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if hi.Hostname != "test" {
|
||||
t.Errorf("hostname = %v, want test", hi.Hostname)
|
||||
}
|
||||
if hi.OS != "windows" {
|
||||
t.Errorf("OS = %v, want windows", hi.OS)
|
||||
}
|
||||
if hi.OSVersion != "10.0.19044" {
|
||||
t.Errorf("OSVersion = %v, want 10.0.19044", hi.OSVersion)
|
||||
}
|
||||
if hi.DeviceModel != "test-device" {
|
||||
t.Errorf("DeviceModel = %v, want test-device", hi.DeviceModel)
|
||||
}
|
||||
if hi.BackendLogID != "log123" {
|
||||
t.Errorf("BackendLogID = %v, want log123", hi.BackendLogID)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exactly_63_chars_unchanged",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "123456789012345678901234567890123456789012345678901234567890123",
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
wantHostname: "123456789012345678901234567890123456789012345678901234567890123",
|
||||
checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) {
|
||||
if hi == nil {
|
||||
t.Error("hostinfo should not be nil")
|
||||
}
|
||||
if len(hi.Hostname) != 63 {
|
||||
t.Errorf("hostname length = %v, want 63", len(hi.Hostname))
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotHostname := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey)
|
||||
// For invalid hostnames, we just check the prefix since the random part varies
|
||||
if strings.HasPrefix(tt.wantHostname, "invalid-") {
|
||||
if !strings.HasPrefix(gotHostname, "invalid-") {
|
||||
t.Errorf("EnsureHostname() = %v, want prefix %v", gotHostname, tt.wantHostname)
|
||||
}
|
||||
} else if gotHostname != tt.wantHostname {
|
||||
t.Errorf("EnsureHostname() hostname = %v, want %v", gotHostname, tt.wantHostname)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureHostname_DNSLabelLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []string{
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
|
||||
"dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
|
||||
}
|
||||
|
||||
for i, hostname := range testCases {
|
||||
t.Run(cmp.Diff("", ""), func(t *testing.T) {
|
||||
hostinfo := &tailcfg.Hostinfo{Hostname: hostname}
|
||||
result := EnsureHostname(hostinfo, "mkey", "nkey")
|
||||
if len(result) > 63 {
|
||||
t.Errorf("test case %d: hostname length = %d, want <= 63", i, len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureHostname_Idempotent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
originalHostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
}
|
||||
|
||||
hostname1 := EnsureHostname(originalHostinfo, "mkey", "nkey")
|
||||
hostname2 := EnsureHostname(originalHostinfo, "mkey", "nkey")
|
||||
|
||||
if hostname1 != hostname2 {
|
||||
t.Errorf("hostnames not equal: %v != %v", hostname1, hostname2)
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
657
integration/api_auth_test.go
Normal file
657
integration/api_auth_test.go
Normal file
@@ -0,0 +1,657 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
// TestAPIAuthenticationBypass tests that the API authentication middleware
|
||||
// properly blocks unauthorized requests and does not leak sensitive data.
|
||||
// This test reproduces the security issue described in:
|
||||
// - https://github.com/juanfont/headscale/issues/2809
|
||||
// - https://github.com/juanfont/headscale/pull/2810
|
||||
//
|
||||
// The bug: When authentication fails, the middleware writes "Unauthorized"
|
||||
// but doesn't return early, allowing the handler to execute and append
|
||||
// sensitive data to the response.
|
||||
func TestAPIAuthenticationBypass(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"user1", "user2", "user3"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthbypass"))
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create an API key using the CLI
|
||||
var validAPIKey string
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
apiKeyOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
},
|
||||
)
|
||||
assert.NoError(ct, err)
|
||||
assert.NotEmpty(ct, apiKeyOutput)
|
||||
validAPIKey = strings.TrimSpace(apiKeyOutput)
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
|
||||
// Get the API endpoint
|
||||
endpoint := headscale.GetEndpoint()
|
||||
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
|
||||
|
||||
// Create HTTP client
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("HTTP_NoAuthHeader", func(t *testing.T) {
|
||||
// Test 1: Request without any Authorization header
|
||||
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
|
||||
req, err := http.NewRequest("GET", apiURL, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should return 401 Unauthorized
|
||||
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||
"Expected 401 status code for request without auth header")
|
||||
|
||||
bodyStr := string(body)
|
||||
|
||||
// Should contain "Unauthorized" message
|
||||
assert.Contains(t, bodyStr, "Unauthorized",
|
||||
"Response should contain 'Unauthorized' message")
|
||||
|
||||
// Should NOT contain user data after "Unauthorized"
|
||||
// This is the security bypass - if users array is present, auth was bypassed
|
||||
var jsonCheck map[string]interface{}
|
||||
jsonErr := json.Unmarshal(body, &jsonCheck)
|
||||
|
||||
// If we can unmarshal JSON and it contains "users", that's the bypass
|
||||
if jsonErr == nil {
|
||||
assert.NotContains(t, jsonCheck, "users",
|
||||
"SECURITY ISSUE: Response should NOT contain 'users' data when unauthorized")
|
||||
assert.NotContains(t, jsonCheck, "user",
|
||||
"SECURITY ISSUE: Response should NOT contain 'user' data when unauthorized")
|
||||
}
|
||||
|
||||
// Additional check: response should not contain "user1", "user2", "user3"
|
||||
assert.NotContains(t, bodyStr, "user1",
|
||||
"SECURITY ISSUE: Response should NOT leak user 'user1' data")
|
||||
assert.NotContains(t, bodyStr, "user2",
|
||||
"SECURITY ISSUE: Response should NOT leak user 'user2' data")
|
||||
assert.NotContains(t, bodyStr, "user3",
|
||||
"SECURITY ISSUE: Response should NOT leak user 'user3' data")
|
||||
|
||||
// Response should be minimal, just "Unauthorized"
|
||||
// Allow some variation in response format but body should be small
|
||||
assert.Less(t, len(bodyStr), 100,
|
||||
"SECURITY ISSUE: Unauthorized response body should be minimal, got: %s", bodyStr)
|
||||
})
|
||||
|
||||
t.Run("HTTP_InvalidAuthHeader", func(t *testing.T) {
|
||||
// Test 2: Request with invalid Authorization header (missing "Bearer " prefix)
|
||||
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
|
||||
req, err := http.NewRequest("GET", apiURL, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Authorization", "InvalidToken")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||
"Expected 401 status code for invalid auth header format")
|
||||
|
||||
bodyStr := string(body)
|
||||
assert.Contains(t, bodyStr, "Unauthorized")
|
||||
|
||||
// Should not leak user data
|
||||
assert.NotContains(t, bodyStr, "user1",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
assert.NotContains(t, bodyStr, "user2",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
assert.NotContains(t, bodyStr, "user3",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
|
||||
assert.Less(t, len(bodyStr), 100,
|
||||
"SECURITY ISSUE: Unauthorized response should be minimal")
|
||||
})
|
||||
|
||||
t.Run("HTTP_InvalidBearerToken", func(t *testing.T) {
|
||||
// Test 3: Request with Bearer prefix but invalid token
|
||||
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
|
||||
// Note: Both malformed and properly formatted invalid tokens should return 401
|
||||
req, err := http.NewRequest("GET", apiURL, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Authorization", "Bearer invalid-token-12345")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||
"Expected 401 status code for invalid bearer token")
|
||||
|
||||
bodyStr := string(body)
|
||||
assert.Contains(t, bodyStr, "Unauthorized")
|
||||
|
||||
// Should not leak user data
|
||||
assert.NotContains(t, bodyStr, "user1",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
assert.NotContains(t, bodyStr, "user2",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
assert.NotContains(t, bodyStr, "user3",
|
||||
"SECURITY ISSUE: Response should NOT leak user data")
|
||||
|
||||
assert.Less(t, len(bodyStr), 100,
|
||||
"SECURITY ISSUE: Unauthorized response should be minimal")
|
||||
})
|
||||
|
||||
t.Run("HTTP_ValidAPIKey", func(t *testing.T) {
|
||||
// Test 4: Request with valid API key
|
||||
// Expected: Should return 200 with user data (this is the authorized case)
|
||||
req, err := http.NewRequest("GET", apiURL, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", validAPIKey))
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should succeed with valid auth
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||
"Expected 200 status code with valid API key")
|
||||
|
||||
// Should be able to parse as protobuf JSON
|
||||
var response v1.ListUsersResponse
|
||||
err = protojson.Unmarshal(body, &response)
|
||||
assert.NoError(t, err, "Response should be valid protobuf JSON with valid API key")
|
||||
|
||||
// Should contain our test users
|
||||
users := response.GetUsers()
|
||||
assert.Len(t, users, 3, "Should have 3 users")
|
||||
userNames := make([]string, len(users))
|
||||
for i, u := range users {
|
||||
userNames[i] = u.GetName()
|
||||
}
|
||||
assert.Contains(t, userNames, "user1")
|
||||
assert.Contains(t, userNames, "user2")
|
||||
assert.Contains(t, userNames, "user3")
|
||||
})
|
||||
}
|
||||
|
||||
// TestAPIAuthenticationBypassCurl tests the same security issue using curl
|
||||
// from inside a container, which is closer to how the issue was discovered.
|
||||
func TestAPIAuthenticationBypassCurl(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"testuser1", "testuser2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthcurl"))
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a valid API key
|
||||
apiKeyOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
validAPIKey := strings.TrimSpace(apiKeyOutput)
|
||||
|
||||
endpoint := headscale.GetEndpoint()
|
||||
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
|
||||
|
||||
t.Run("Curl_NoAuth", func(t *testing.T) {
|
||||
// Execute curl from inside the headscale container without auth
|
||||
curlOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"curl",
|
||||
"-s",
|
||||
"-w",
|
||||
"\nHTTP_CODE:%{http_code}",
|
||||
apiURL,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Parse the output
|
||||
lines := strings.Split(curlOutput, "\n")
|
||||
var httpCode string
|
||||
var responseBody string
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "HTTP_CODE:") {
|
||||
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
|
||||
} else {
|
||||
responseBody += line
|
||||
}
|
||||
}
|
||||
|
||||
// Should return 401
|
||||
assert.Equal(t, "401", httpCode,
|
||||
"Curl without auth should return 401")
|
||||
|
||||
// Should contain Unauthorized
|
||||
assert.Contains(t, responseBody, "Unauthorized",
|
||||
"Response should contain 'Unauthorized'")
|
||||
|
||||
// Should NOT leak user data
|
||||
assert.NotContains(t, responseBody, "testuser1",
|
||||
"SECURITY ISSUE: Should not leak user data")
|
||||
assert.NotContains(t, responseBody, "testuser2",
|
||||
"SECURITY ISSUE: Should not leak user data")
|
||||
|
||||
// Response should be small (just "Unauthorized")
|
||||
assert.Less(t, len(responseBody), 100,
|
||||
"SECURITY ISSUE: Unauthorized response should be minimal, got: %s", responseBody)
|
||||
})
|
||||
|
||||
t.Run("Curl_InvalidAuth", func(t *testing.T) {
|
||||
// Execute curl with invalid auth header
|
||||
curlOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"curl",
|
||||
"-s",
|
||||
"-H",
|
||||
"Authorization: InvalidToken",
|
||||
"-w",
|
||||
"\nHTTP_CODE:%{http_code}",
|
||||
apiURL,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(curlOutput, "\n")
|
||||
var httpCode string
|
||||
var responseBody string
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "HTTP_CODE:") {
|
||||
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
|
||||
} else {
|
||||
responseBody += line
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, "401", httpCode)
|
||||
assert.Contains(t, responseBody, "Unauthorized")
|
||||
assert.NotContains(t, responseBody, "testuser1",
|
||||
"SECURITY ISSUE: Should not leak user data")
|
||||
assert.NotContains(t, responseBody, "testuser2",
|
||||
"SECURITY ISSUE: Should not leak user data")
|
||||
})
|
||||
|
||||
t.Run("Curl_ValidAuth", func(t *testing.T) {
|
||||
// Execute curl with valid API key
|
||||
curlOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"curl",
|
||||
"-s",
|
||||
"-H",
|
||||
fmt.Sprintf("Authorization: Bearer %s", validAPIKey),
|
||||
"-w",
|
||||
"\nHTTP_CODE:%{http_code}",
|
||||
apiURL,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(curlOutput, "\n")
|
||||
var httpCode string
|
||||
var responseBody string
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "HTTP_CODE:") {
|
||||
httpCode = strings.TrimPrefix(line, "HTTP_CODE:")
|
||||
} else {
|
||||
responseBody += line
|
||||
}
|
||||
}
|
||||
|
||||
// Should succeed
|
||||
assert.Equal(t, "200", httpCode,
|
||||
"Curl with valid API key should return 200")
|
||||
|
||||
// Should contain user data
|
||||
var response v1.ListUsersResponse
|
||||
err = protojson.Unmarshal([]byte(responseBody), &response)
|
||||
assert.NoError(t, err, "Response should be valid protobuf JSON")
|
||||
users := response.GetUsers()
|
||||
assert.Len(t, users, 2, "Should have 2 users")
|
||||
})
|
||||
}
|
||||
|
||||
// TestGRPCAuthenticationBypass tests that the gRPC authentication interceptor
|
||||
// properly blocks unauthorized requests.
|
||||
// This test verifies that the gRPC API does not have the same bypass issue
|
||||
// as the HTTP API middleware.
|
||||
func TestGRPCAuthenticationBypass(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"grpcuser1", "grpcuser2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// We need TLS for remote gRPC connections
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("grpcauthtest"),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
// Enable gRPC on the standard port
|
||||
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
|
||||
}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a valid API key
|
||||
apiKeyOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
validAPIKey := strings.TrimSpace(apiKeyOutput)
|
||||
|
||||
// Get the gRPC endpoint
|
||||
// For gRPC, we need to use the hostname and port 50443
|
||||
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
|
||||
|
||||
t.Run("gRPC_NoAPIKey", func(t *testing.T) {
|
||||
// Test 1: Try to use CLI without API key (should fail)
|
||||
// When HEADSCALE_CLI_ADDRESS is set but HEADSCALE_CLI_API_KEY is not set,
|
||||
// the CLI should fail immediately
|
||||
_, err := headscale.Execute(
|
||||
[]string{
|
||||
"sh", "-c",
|
||||
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
|
||||
},
|
||||
)
|
||||
|
||||
// Should fail - CLI exits when API key is missing
|
||||
assert.Error(t, err,
|
||||
"gRPC connection without API key should fail")
|
||||
})
|
||||
|
||||
t.Run("gRPC_InvalidAPIKey", func(t *testing.T) {
|
||||
// Test 2: Try to use CLI with invalid API key (should fail with auth error)
|
||||
output, err := headscale.Execute(
|
||||
[]string{
|
||||
"sh", "-c",
|
||||
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=invalid-key-12345 HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
|
||||
},
|
||||
)
|
||||
|
||||
// Should fail with authentication error
|
||||
assert.Error(t, err,
|
||||
"gRPC connection with invalid API key should fail")
|
||||
|
||||
// Should contain authentication error message
|
||||
outputStr := strings.ToLower(output)
|
||||
assert.True(t,
|
||||
strings.Contains(outputStr, "unauthenticated") ||
|
||||
strings.Contains(outputStr, "invalid token") ||
|
||||
strings.Contains(outputStr, "failed to validate token") ||
|
||||
strings.Contains(outputStr, "authentication"),
|
||||
"Error should indicate authentication failure, got: %s", output)
|
||||
|
||||
// Should NOT leak user data
|
||||
assert.NotContains(t, output, "grpcuser1",
|
||||
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
|
||||
assert.NotContains(t, output, "grpcuser2",
|
||||
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
|
||||
})
|
||||
|
||||
t.Run("gRPC_ValidAPIKey", func(t *testing.T) {
|
||||
// Test 3: Use CLI with valid API key (should succeed)
|
||||
output, err := headscale.Execute(
|
||||
[]string{
|
||||
"sh", "-c",
|
||||
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json", grpcAddress, validAPIKey),
|
||||
},
|
||||
)
|
||||
|
||||
// Should succeed
|
||||
assert.NoError(t, err,
|
||||
"gRPC connection with valid API key should succeed, output: %s", output)
|
||||
|
||||
// CLI outputs the users array directly, not wrapped in ListUsersResponse
|
||||
// Parse as JSON array (CLI uses json.Marshal, not protojson)
|
||||
var users []*v1.User
|
||||
err = json.Unmarshal([]byte(output), &users)
|
||||
assert.NoError(t, err, "Response should be valid JSON array")
|
||||
assert.Len(t, users, 2, "Should have 2 users")
|
||||
|
||||
userNames := make([]string, len(users))
|
||||
for i, u := range users {
|
||||
userNames[i] = u.GetName()
|
||||
}
|
||||
assert.Contains(t, userNames, "grpcuser1")
|
||||
assert.Contains(t, userNames, "grpcuser2")
|
||||
})
|
||||
}
|
||||
|
||||
// TestCLIWithConfigAuthenticationBypass tests that the headscale CLI
|
||||
// with --config flag does not have authentication bypass issues when
|
||||
// connecting to a remote server.
|
||||
// Note: When using --config with local unix socket, no auth is needed.
|
||||
// This test focuses on remote gRPC connections which require API keys.
|
||||
func TestCLIWithConfigAuthenticationBypass(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"cliuser1", "cliuser2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("cliconfigauth"),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
|
||||
}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a valid API key
|
||||
apiKeyOutput, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"apikeys",
|
||||
"create",
|
||||
"--expiration",
|
||||
"24h",
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
validAPIKey := strings.TrimSpace(apiKeyOutput)
|
||||
|
||||
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
|
||||
|
||||
// Create a config file for testing
|
||||
configWithoutKey := fmt.Sprintf(`
|
||||
cli:
|
||||
address: %s
|
||||
timeout: 5s
|
||||
insecure: true
|
||||
`, grpcAddress)
|
||||
|
||||
configWithInvalidKey := fmt.Sprintf(`
|
||||
cli:
|
||||
address: %s
|
||||
api_key: invalid-key-12345
|
||||
timeout: 5s
|
||||
insecure: true
|
||||
`, grpcAddress)
|
||||
|
||||
configWithValidKey := fmt.Sprintf(`
|
||||
cli:
|
||||
address: %s
|
||||
api_key: %s
|
||||
timeout: 5s
|
||||
insecure: true
|
||||
`, grpcAddress, validAPIKey)
|
||||
|
||||
t.Run("CLI_Config_NoAPIKey", func(t *testing.T) {
|
||||
// Create config file without API key
|
||||
err := headscale.WriteFile("/tmp/config_no_key.yaml", []byte(configWithoutKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to use CLI with config that has no API key
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"--config", "/tmp/config_no_key.yaml",
|
||||
"users", "list",
|
||||
"--output", "json",
|
||||
},
|
||||
)
|
||||
|
||||
// Should fail
|
||||
assert.Error(t, err,
|
||||
"CLI with config missing API key should fail")
|
||||
})
|
||||
|
||||
t.Run("CLI_Config_InvalidAPIKey", func(t *testing.T) {
|
||||
// Create config file with invalid API key
|
||||
err := headscale.WriteFile("/tmp/config_invalid_key.yaml", []byte(configWithInvalidKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to use CLI with invalid API key
|
||||
output, err := headscale.Execute(
|
||||
[]string{
|
||||
"sh", "-c",
|
||||
"headscale --config /tmp/config_invalid_key.yaml users list --output json 2>&1",
|
||||
},
|
||||
)
|
||||
|
||||
// Should fail
|
||||
assert.Error(t, err,
|
||||
"CLI with invalid API key should fail")
|
||||
|
||||
// Should indicate authentication failure
|
||||
outputStr := strings.ToLower(output)
|
||||
assert.True(t,
|
||||
strings.Contains(outputStr, "unauthenticated") ||
|
||||
strings.Contains(outputStr, "invalid token") ||
|
||||
strings.Contains(outputStr, "failed to validate token") ||
|
||||
strings.Contains(outputStr, "authentication"),
|
||||
"Error should indicate authentication failure, got: %s", output)
|
||||
|
||||
// Should NOT leak user data
|
||||
assert.NotContains(t, output, "cliuser1",
|
||||
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
|
||||
assert.NotContains(t, output, "cliuser2",
|
||||
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
|
||||
})
|
||||
|
||||
t.Run("CLI_Config_ValidAPIKey", func(t *testing.T) {
|
||||
// Create config file with valid API key
|
||||
err := headscale.WriteFile("/tmp/config_valid_key.yaml", []byte(configWithValidKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use CLI with valid API key
|
||||
output, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"--config", "/tmp/config_valid_key.yaml",
|
||||
"users", "list",
|
||||
"--output", "json",
|
||||
},
|
||||
)
|
||||
|
||||
// Should succeed
|
||||
assert.NoError(t, err,
|
||||
"CLI with valid API key should succeed")
|
||||
|
||||
// CLI outputs the users array directly, not wrapped in ListUsersResponse
|
||||
// Parse as JSON array (CLI uses json.Marshal, not protojson)
|
||||
var users []*v1.User
|
||||
err = json.Unmarshal([]byte(output), &users)
|
||||
assert.NoError(t, err, "Response should be valid JSON array")
|
||||
assert.Len(t, users, 2, "Should have 2 users")
|
||||
|
||||
userNames := make([]string, len(users))
|
||||
for i, u := range users {
|
||||
userNames[i] = u.GetName()
|
||||
}
|
||||
assert.Contains(t, userNames, "cliuser1")
|
||||
assert.Contains(t, userNames, "cliuser2")
|
||||
})
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
opts := []hsic.Option{
|
||||
@@ -43,31 +43,25 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assertNoErr(t, err)
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 30*time.Second)
|
||||
expectedNodes := collectExpectedNodeIDs(t, allClients)
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 120*time.Second)
|
||||
|
||||
// Validate that all nodes have NetInfo and DERP servers before logout
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP before logout", 1*time.Minute)
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP before logout", 3*time.Minute)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -80,14 +74,21 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
clientIPs[client] = ips
|
||||
}
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
var listNodes []*v1.Node
|
||||
var nodeCountBeforeLogout int
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, len(allClients))
|
||||
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSet(t, node)
|
||||
}
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSetWithCollect(c, node)
|
||||
}
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
|
||||
|
||||
nodeCountBeforeLogout = len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
for _, client := range allClients {
|
||||
err := client.Logout()
|
||||
@@ -97,19 +98,20 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
// After taking down all nodes, verify all systems show nodes offline
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should have logged out", 120*time.Second)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to list nodes after logout")
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
|
||||
}, 30*time.Second, 2*time.Second, "validating node persistence after logout (nodes should remain in database)")
|
||||
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSet(t, node)
|
||||
@@ -125,7 +127,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
|
||||
userMap, err := headscale.MapUsers()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, userName := range spec.Users {
|
||||
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
|
||||
@@ -139,12 +141,13 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Validating node persistence after relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match after HTTPS reconnection")
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
assert.NoError(ct, err, "Failed to list nodes after relogin")
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after relogin - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
|
||||
}, 60*time.Second, 2*time.Second, "validating node count stability after same-user auth key relogin")
|
||||
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSet(t, node)
|
||||
@@ -152,11 +155,15 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to batcher", 120*time.Second)
|
||||
|
||||
// Wait for Tailscale sync before validating NetInfo to ensure proper state propagation
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// Validate that all nodes have NetInfo and DERP servers after reconnection
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after reconnection", 1*time.Minute)
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after reconnection", 3*time.Minute)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -188,78 +195,24 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
require.Len(t, listNodes, nodeCountBeforeLogout)
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSet(t, node)
|
||||
}
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, nodeCountBeforeLogout)
|
||||
|
||||
for _, node := range listNodes {
|
||||
assertLastSeenSetWithCollect(c, node)
|
||||
}
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for node list after relogin")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// requireAllClientsNetInfoAndDERP validates that all nodes have NetInfo in the database
|
||||
// and a valid DERP server based on the NetInfo. This function follows the pattern of
|
||||
// requireAllClientsOnline by using hsic.DebugNodeStore to get the database state.
|
||||
func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, message string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
startTime := time.Now()
|
||||
t.Logf("requireAllClientsNetInfoAndDERP: Starting validation at %s - %s", startTime.Format(TimestampFormat), message)
|
||||
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Get nodestore state
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate node counts first
|
||||
expectedCount := len(expectedNodes)
|
||||
assert.Equal(c, expectedCount, len(nodeStore), "NodeStore total nodes mismatch")
|
||||
|
||||
// Check each expected node
|
||||
for _, nodeID := range expectedNodes {
|
||||
node, exists := nodeStore[nodeID]
|
||||
assert.True(c, exists, "Node %d not found in nodestore", nodeID)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate that the node has Hostinfo
|
||||
assert.NotNil(c, node.Hostinfo, "Node %d (%s) should have Hostinfo", nodeID, node.Hostname)
|
||||
if node.Hostinfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate that the node has NetInfo
|
||||
assert.NotNil(c, node.Hostinfo.NetInfo, "Node %d (%s) should have NetInfo in Hostinfo", nodeID, node.Hostname)
|
||||
if node.Hostinfo.NetInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate that the node has a valid DERP server (PreferredDERP should be > 0)
|
||||
preferredDERP := node.Hostinfo.NetInfo.PreferredDERP
|
||||
assert.Greater(c, preferredDERP, 0, "Node %d (%s) should have a valid DERP server (PreferredDERP > 0), got %d", nodeID, node.Hostname, preferredDERP)
|
||||
|
||||
t.Logf("Node %d (%s) has valid NetInfo with DERP server %d", nodeID, node.Hostname, preferredDERP)
|
||||
}
|
||||
}, timeout, 2*time.Second, message)
|
||||
|
||||
endTime := time.Now()
|
||||
duration := endTime.Sub(startTime)
|
||||
t.Logf("requireAllClientsNetInfoAndDERP: Completed validation at %s - Duration: %v - %s", endTime.Format(TimestampFormat), duration, message)
|
||||
}
|
||||
|
||||
func assertLastSeenSet(t *testing.T, node *v1.Node) {
|
||||
assert.NotNil(t, node)
|
||||
assert.NotNil(t, node.GetLastSeen())
|
||||
}
|
||||
|
||||
// This test will first log in two sets of nodes to two sets of users, then
|
||||
// it will log out all users from user2 and log them in as user1.
|
||||
// This should leave us with all nodes connected to user1, while user2
|
||||
// still has nodes, but they are not connected.
|
||||
// it will log out all nodes and log them in as user1 using a pre-auth key.
|
||||
// This should create new nodes for user1 while preserving the original nodes for user2.
|
||||
// Pre-auth key re-authentication with a different user creates new nodes, not transfers.
|
||||
func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@@ -269,7 +222,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
|
||||
@@ -277,22 +230,36 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
hsic.WithTLS(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
// Collect expected node IDs for validation
|
||||
expectedNodes := collectExpectedNodeIDs(t, allClients)
|
||||
|
||||
// Validate initial connection state
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
|
||||
|
||||
var listNodes []*v1.Node
|
||||
var nodeCountBeforeLogout int
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, len(allClients))
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
|
||||
|
||||
nodeCountBeforeLogout = len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
for _, client := range allClients {
|
||||
@@ -303,12 +270,15 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
// Validate that all nodes are offline after logout
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
userMap, err := headscale.MapUsers()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a new authkey for user1, to be used for all clients
|
||||
key, err := scenario.CreatePreAuthKey(userMap["user1"].GetId(), true, false)
|
||||
@@ -326,28 +296,43 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
}
|
||||
|
||||
var user1Nodes []*v1.Node
|
||||
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
user1Nodes, err = headscale.ListNodes("user1")
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all clients after re-login")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to list nodes for user1 after relogin")
|
||||
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after relogin, got %d nodes", len(allClients), len(user1Nodes))
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after auth key relogin")
|
||||
|
||||
// Validate that all the old nodes are still present with user2
|
||||
// Collect expected node IDs for user1 after relogin
|
||||
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
|
||||
for _, node := range user1Nodes {
|
||||
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
|
||||
}
|
||||
|
||||
// Validate connection state after relogin as user1
|
||||
requireAllClientsOnline(t, headscale, expectedUser1Nodes, true, "all user1 nodes should be connected after relogin", 120*time.Second)
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedUser1Nodes, "all user1 nodes should have NetInfo and DERP after relogin", 3*time.Minute)
|
||||
|
||||
// Validate that user2 still has their original nodes after user1's re-authentication
|
||||
// When nodes re-authenticate with a different user's pre-auth key, NEW nodes are created
|
||||
// for the new user. The original nodes remain with the original user.
|
||||
var user2Nodes []*v1.Node
|
||||
t.Logf("Validating user2 node persistence after user1 relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
user2Nodes, err = headscale.ListNodes("user2")
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should have half the clients")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to list nodes for user2 after user1 relogin")
|
||||
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d clients after user1 relogin, got %d nodes", len(allClients)/2, len(user2Nodes))
|
||||
}, 30*time.Second, 2*time.Second, "validating user2 nodes persist after user1 relogin (should not be affected)")
|
||||
|
||||
t.Logf("Validating client login states after user switch at %s", time.Now().Format(TimestampFormat))
|
||||
for _, client := range allClients {
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
||||
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1", client.Hostname())
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
|
||||
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after auth key user switch", client.Hostname()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,7 +347,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
opts := []hsic.Option{
|
||||
@@ -376,13 +361,13 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -396,11 +381,25 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
// Collect expected node IDs for validation
|
||||
expectedNodes := collectExpectedNodeIDs(t, allClients)
|
||||
|
||||
// Validate initial connection state
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
|
||||
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
|
||||
|
||||
var listNodes []*v1.Node
|
||||
var nodeCountBeforeLogout int
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, len(allClients))
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
|
||||
|
||||
nodeCountBeforeLogout = len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
for _, client := range allClients {
|
||||
@@ -411,7 +410,10 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
// Validate that all nodes are offline after logout
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
@@ -425,7 +427,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
}
|
||||
|
||||
userMap, err := headscale.MapUsers()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, userName := range spec.Users {
|
||||
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
|
||||
@@ -443,7 +445,8 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
"expire",
|
||||
key.GetKey(),
|
||||
})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
|
||||
assert.ErrorContains(t, err, "authkey expired")
|
||||
|
||||
@@ -3,18 +3,22 @@ package integration
|
||||
import (
|
||||
"maps"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/oauth2-proxy/mockoidc"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
@@ -33,7 +37,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -51,16 +55,16 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -72,10 +76,10 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
want := []*v1.User{
|
||||
{
|
||||
@@ -141,7 +145,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
@@ -156,18 +160,18 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||
hsic.WithTestName("oidcexpirenodes"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
// Record when sync completes to better estimate token expiry timing
|
||||
syncCompleteTime := time.Now()
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
loginDuration := time.Since(syncCompleteTime)
|
||||
t.Logf("Login and sync completed in %v", loginDuration)
|
||||
|
||||
@@ -348,7 +352,7 @@ func TestOIDC024UserCreation(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
@@ -366,20 +370,20 @@ func TestOIDC024UserCreation(t *testing.T) {
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
// Ensure that the nodes have logged in, this is what
|
||||
// triggers user creation via OIDC.
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
want := tt.want(scenario.mockOIDC.Issuer())
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
@@ -405,7 +409,7 @@ func TestOIDCAuthenticationWithPKCE(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
@@ -423,17 +427,17 @@ func TestOIDCAuthenticationWithPKCE(t *testing.T) {
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
// Get all clients and verify they can connect
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -443,6 +447,11 @@ func TestOIDCAuthenticationWithPKCE(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameNodeNewUser tests the scenario where:
|
||||
// 1. A Tailscale client logs in with user1 (creates node1 for user1)
|
||||
// 2. The same client logs out and logs in with user2 (creates node2 for user2)
|
||||
// 3. The same client logs out and logs in with user1 again (reuses node1, node2 remains)
|
||||
// This validates that OIDC relogin properly handles node reuse and cleanup.
|
||||
func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@@ -457,7 +466,7 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
@@ -476,24 +485,25 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating initial user creation at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
assert.NoError(ct, err, "Failed to list users during initial validation")
|
||||
assert.Len(ct, listUsers, 1, "Expected exactly 1 user after first login, got %d", len(listUsers))
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
@@ -509,44 +519,66 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
ct.Errorf("User validation failed after first login - unexpected users: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating users after first login")
|
||||
}, 30*time.Second, 1*time.Second, "validating user1 creation after initial OIDC login")
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listNodes, 1)
|
||||
t.Logf("Validating initial node creation at %s", time.Now().Format(TimestampFormat))
|
||||
var listNodes []*v1.Node
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during initial validation")
|
||||
assert.Len(ct, listNodes, 1, "Expected exactly 1 node after first login, got %d", len(listNodes))
|
||||
}, 30*time.Second, 1*time.Second, "validating initial node creation for user1 after OIDC login")
|
||||
|
||||
// Collect expected node IDs for validation after user1 initial login
|
||||
expectedNodes := make([]types.NodeID, 0, 1)
|
||||
var nodeID uint64
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status := ts.MustStatus()
|
||||
assert.NotEmpty(ct, status.Self.ID, "Node ID should be populated in status")
|
||||
var err error
|
||||
nodeID, err = strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assert.NoError(ct, err, "Failed to parse node ID from status")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for node ID to be populated in status after initial login")
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
|
||||
// Validate initial connection state for user1
|
||||
validateInitialConnection(t, headscale, expectedNodes)
|
||||
|
||||
// Log out user1 and log in user2, this should create a new node
|
||||
// for user2, the node should have the same machine key and
|
||||
// a new node key.
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
// manually.
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for logout to complete and then do second logout
|
||||
t.Logf("Waiting for user1 logout completion at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to get client status during logout validation")
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout, got %s", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user1 logout to complete before user2 login")
|
||||
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating user2 creation at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 2)
|
||||
assert.NoError(ct, err, "Failed to list users after user2 login")
|
||||
assert.Len(ct, listUsers, 2, "Expected exactly 2 users after user2 login, got %d users", len(listUsers))
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
@@ -569,27 +601,83 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("unexpected users: %s", diff)
|
||||
ct.Errorf("User validation failed after user2 login - expected both user1 and user2: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating users after new user login")
|
||||
}, 30*time.Second, 1*time.Second, "validating both user1 and user2 exist after second OIDC login")
|
||||
|
||||
var listNodesAfterNewUserLogin []*v1.Node
|
||||
// First, wait for the new node to be created
|
||||
t.Logf("Waiting for user2 node creation at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodesAfterNewUserLogin, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodesAfterNewUserLogin, 2)
|
||||
assert.NoError(ct, err, "Failed to list nodes after user2 login")
|
||||
// We might temporarily have more than 2 nodes during cleanup, so check for at least 2
|
||||
assert.GreaterOrEqual(ct, len(listNodesAfterNewUserLogin), 2, "Should have at least 2 nodes after user2 login, got %d (may include temporary nodes during cleanup)", len(listNodesAfterNewUserLogin))
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user2 node creation (allowing temporary extra nodes during cleanup)")
|
||||
|
||||
// Machine key is the same as the "machine" has not changed,
|
||||
// but Node key is not as it is a new node
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(ct, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey())
|
||||
}, 30*time.Second, 1*time.Second, "listing nodes after new user login")
|
||||
// Then wait for cleanup to stabilize at exactly 2 nodes
|
||||
t.Logf("Waiting for node cleanup stabilization at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodesAfterNewUserLogin, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during cleanup validation")
|
||||
assert.Len(ct, listNodesAfterNewUserLogin, 2, "Should have exactly 2 nodes after cleanup (1 for user1, 1 for user2), got %d nodes", len(listNodesAfterNewUserLogin))
|
||||
|
||||
// Validate that both nodes have the same machine key but different node keys
|
||||
if len(listNodesAfterNewUserLogin) >= 2 {
|
||||
// Machine key is the same as the "machine" has not changed,
|
||||
// but Node key is not as it is a new node
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), "Machine key should be preserved from original node")
|
||||
assert.Equal(ct, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), "Both nodes should share the same machine key")
|
||||
assert.NotEqual(ct, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey(), "Node keys should be different between user1 and user2 nodes")
|
||||
}
|
||||
}, 90*time.Second, 2*time.Second, "waiting for node count stabilization at exactly 2 nodes after user2 login")
|
||||
|
||||
// Security validation: Only user2's node should be active after user switch
|
||||
var activeUser2NodeID types.NodeID
|
||||
for _, node := range listNodesAfterNewUserLogin {
|
||||
if node.GetUser().GetId() == 2 { // user2
|
||||
activeUser2NodeID = types.NodeID(node.GetId())
|
||||
t.Logf("Active user2 node: %d (User: %s)", node.GetId(), node.GetUser().GetName())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Validate only user2's node is online (security requirement)
|
||||
t.Logf("Validating only user2 node is online at %s", time.Now().Format(TimestampFormat))
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
|
||||
// Check user2 node is online
|
||||
if node, exists := nodeStore[activeUser2NodeID]; exists {
|
||||
assert.NotNil(c, node.IsOnline, "User2 node should have online status")
|
||||
if node.IsOnline != nil {
|
||||
assert.True(c, *node.IsOnline, "User2 node should be online after login")
|
||||
}
|
||||
} else {
|
||||
assert.Fail(c, "User2 node not found in nodestore")
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating only user2 node is online after user switch")
|
||||
|
||||
// Before logging out user2, validate we have exactly 2 nodes and both are stable
|
||||
t.Logf("Pre-logout validation: checking node stability at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
currentNodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes before user2 logout")
|
||||
assert.Len(ct, currentNodes, 2, "Should have exactly 2 stable nodes before user2 logout, got %d", len(currentNodes))
|
||||
|
||||
// Validate node stability - ensure no phantom nodes
|
||||
for i, node := range currentNodes {
|
||||
assert.NotNil(ct, node.GetUser(), "Node %d should have a valid user before logout", i)
|
||||
assert.NotEmpty(ct, node.GetMachineKey(), "Node %d should have a valid machine key before logout", i)
|
||||
t.Logf("Pre-logout node %d: User=%s, MachineKey=%s", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+"...")
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating stable node count and integrity before user2 logout")
|
||||
|
||||
// Log out user2, and log into user1, no new node should be created,
|
||||
// the node should now "become" node1 again
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Logged out take one")
|
||||
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
|
||||
@@ -598,41 +686,63 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
// manually.
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Logged out take two")
|
||||
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
|
||||
|
||||
// Wait for logout to complete and then do second logout
|
||||
t.Logf("Waiting for user2 logout completion at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to get client status during user2 logout validation")
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after user2 logout, got %s", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user2 logout to complete before user1 relogin")
|
||||
|
||||
// Before logging back in, ensure we still have exactly 2 nodes
|
||||
// Note: We skip validateLogoutComplete here since it expects all nodes to be offline,
|
||||
// but in OIDC scenario we maintain both nodes in DB with only active user online
|
||||
|
||||
// Additional validation that nodes are properly maintained during logout
|
||||
t.Logf("Post-logout validation: checking node persistence at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
currentNodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes after user2 logout")
|
||||
assert.Len(ct, currentNodes, 2, "Should still have exactly 2 nodes after user2 logout (nodes should persist), got %d", len(currentNodes))
|
||||
|
||||
// Ensure both nodes are still valid (not cleaned up incorrectly)
|
||||
for i, node := range currentNodes {
|
||||
assert.NotNil(ct, node.GetUser(), "Node %d should still have a valid user after user2 logout", i)
|
||||
assert.NotEmpty(ct, node.GetMachineKey(), "Node %d should still have a valid machine key after user2 logout", i)
|
||||
t.Logf("Post-logout node %d: User=%s, MachineKey=%s", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+"...")
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating node persistence and integrity after user2 logout")
|
||||
|
||||
// We do not actually "change" the user here, it is done by logging in again
|
||||
// as the OIDC mock server is kind of like a stack, and the next user is
|
||||
// prepared and ready to go.
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Waiting for user1 relogin completion at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to get client status during user1 relogin validation")
|
||||
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after user1 relogin, got %s", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user1 relogin to complete (final login)")
|
||||
|
||||
t.Logf("Logged back in")
|
||||
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
|
||||
|
||||
t.Logf("Final validation: checking user persistence at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listUsers, 2)
|
||||
assert.NoError(ct, err, "Failed to list users during final validation")
|
||||
assert.Len(ct, listUsers, 2, "Should still have exactly 2 users after user1 relogin, got %d", len(listUsers))
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
@@ -655,59 +765,419 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("unexpected users: %s", diff)
|
||||
ct.Errorf("Final user validation failed - both users should persist after relogin cycle: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "log out user2, and log into user1, no new node should be created")
|
||||
}, 30*time.Second, 1*time.Second, "validating user persistence after complete relogin cycle (user1->user2->user1)")
|
||||
|
||||
var listNodesAfterLoggingBackIn []*v1.Node
|
||||
// Wait for login to complete and nodes to stabilize
|
||||
t.Logf("Final node validation: checking node stability after user1 relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodesAfterLoggingBackIn, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodesAfterLoggingBackIn, 2)
|
||||
listNodesAfterLoggingBackIn, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during final validation")
|
||||
|
||||
// Allow for temporary instability during login process
|
||||
if len(listNodesAfterLoggingBackIn) < 2 {
|
||||
ct.Errorf("Not enough nodes yet during final validation, got %d, want at least 2", len(listNodesAfterLoggingBackIn))
|
||||
return
|
||||
}
|
||||
|
||||
// Final check should have exactly 2 nodes
|
||||
assert.Len(ct, listNodesAfterLoggingBackIn, 2, "Should have exactly 2 nodes after complete relogin cycle, got %d", len(listNodesAfterLoggingBackIn))
|
||||
|
||||
// Validate that the machine we had when we logged in the first time, has the same
|
||||
// machine key, but a different ID than the newly logged in version of the same
|
||||
// machine.
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(ct, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey())
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId())
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId())
|
||||
assert.NotEqual(ct, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId())
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), "Original user1 machine key should match user1 node after user switch")
|
||||
assert.Equal(ct, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey(), "Original user1 node key should match user1 node after user switch")
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId(), "Original user1 node ID should match user1 node after user switch")
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), "User1 and user2 nodes should share the same machine key")
|
||||
assert.NotEqual(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId(), "User1 and user2 nodes should have different node IDs")
|
||||
assert.NotEqual(ct, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId(), "User1 and user2 nodes should belong to different users")
|
||||
|
||||
// Even tho we are logging in again with the same user, the previous key has been expired
|
||||
// and a new one has been generated. The node entry in the database should be the same
|
||||
// as the user + machinekey still matches.
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey())
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId())
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey(), "Machine key should remain consistent after user1 relogin")
|
||||
assert.NotEqual(ct, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey(), "Node key should be regenerated after user1 relogin")
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId(), "Node ID should be preserved for user1 after relogin")
|
||||
|
||||
// The "logged back in" machine should have the same machinekey but a different nodekey
|
||||
// than the version logged in with a different user.
|
||||
assert.Equal(ct, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey())
|
||||
}, 30*time.Second, 1*time.Second, "log out user2, and log into user1, no new node should be created")
|
||||
}
|
||||
assert.Equal(ct, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey(), "Both final nodes should share the same machine key")
|
||||
assert.NotEqual(ct, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey(), "Final nodes should have different node keys for different users")
|
||||
|
||||
// assertTailscaleNodesLogout verifies that all provided Tailscale clients
|
||||
// are in the logged-out state (NeedsLogin).
|
||||
func assertTailscaleNodesLogout(t assert.TestingT, clients []TailscaleClient) {
|
||||
if h, ok := t.(interface{ Helper() }); ok {
|
||||
h.Helper()
|
||||
t.Logf("Final validation complete - node counts and key relationships verified at %s", time.Now().Format(TimestampFormat))
|
||||
}, 60*time.Second, 2*time.Second, "validating final node state after complete user1->user2->user1 relogin cycle with detailed key validation")
|
||||
|
||||
// Security validation: Only user1's node should be active after relogin
|
||||
var activeUser1NodeID types.NodeID
|
||||
for _, node := range listNodesAfterLoggingBackIn {
|
||||
if node.GetUser().GetId() == 1 { // user1
|
||||
activeUser1NodeID = types.NodeID(node.GetId())
|
||||
t.Logf("Active user1 node after relogin: %d (User: %s)", node.GetId(), node.GetUser().GetName())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range clients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(t, err, "failed to get status for client %s", client.Hostname())
|
||||
assert.Equal(t, "NeedsLogin", status.BackendState,
|
||||
"client %s should be logged out", client.Hostname())
|
||||
}
|
||||
// Validate only user1's node is online (security requirement)
|
||||
t.Logf("Validating only user1 node is online after relogin at %s", time.Now().Format(TimestampFormat))
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
|
||||
// Check user1 node is online
|
||||
if node, exists := nodeStore[activeUser1NodeID]; exists {
|
||||
assert.NotNil(c, node.IsOnline, "User1 node should have online status after relogin")
|
||||
if node.IsOnline != nil {
|
||||
assert.True(c, *node.IsOnline, "User1 node should be online after relogin")
|
||||
}
|
||||
} else {
|
||||
assert.Fail(c, "User1 node not found in nodestore after relogin")
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating only user1 node is online after final relogin")
|
||||
}
|
||||
|
||||
func oidcMockUser(username string, emailVerified bool) mockoidc.MockUser {
|
||||
return mockoidc.MockUser{
|
||||
Subject: username,
|
||||
PreferredUsername: username,
|
||||
Email: username + "@headscale.net",
|
||||
EmailVerified: emailVerified,
|
||||
// TestOIDCFollowUpUrl validates the follow-up login flow
|
||||
// Prerequisites:
|
||||
// - short TTL for the registration cache via HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION
|
||||
// Scenario:
|
||||
// - client starts a login process and gets initial AuthURL
|
||||
// - time.sleep(HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION + 30 secs) waits for the cache to expire
|
||||
// - client checks its status to verify that AuthUrl has changed (by followup URL)
|
||||
// - client uses the new AuthURL to log in. It should complete successfully.
|
||||
func TestOIDCFollowUpUrl(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Create no nodes and no users
|
||||
scenario, err := NewScenario(
|
||||
ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
// smaller cache expiration time to quickly expire AuthURL
|
||||
"HEADSCALE_TUNING_REGISTER_CACHE_CLEANUP": "10s",
|
||||
"HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION": "1m30s",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcauthrelog"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode(
|
||||
"unstable",
|
||||
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for the registration cache to expire
|
||||
// a little bit more than HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION
|
||||
time.Sleep(2 * time.Minute)
|
||||
|
||||
var newUrl *url.URL
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
st, err := ts.Status()
|
||||
assert.NoError(c, err)
|
||||
assert.Equal(c, "NeedsLogin", st.BackendState)
|
||||
|
||||
// get new AuthURL from daemon
|
||||
newUrl, err = url.Parse(st.AuthURL)
|
||||
assert.NoError(c, err)
|
||||
|
||||
assert.NotEqual(c, u.String(), st.AuthURL, "AuthURL should change")
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for registration cache to expire and status to reflect NeedsLogin")
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), newUrl)
|
||||
require.NoError(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
},
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(
|
||||
wantUsers,
|
||||
listUsers,
|
||||
cmpopts.IgnoreUnexported(v1.User{}),
|
||||
cmpopts.IgnoreFields(v1.User{}, "CreatedAt"),
|
||||
); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, listNodes, 1)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list after OIDC login")
|
||||
}
|
||||
|
||||
// TestOIDCReloginSameNodeSameUser tests the scenario where a single Tailscale client
|
||||
// authenticates using OIDC (OpenID Connect), logs out, and then logs back in as the same user.
|
||||
//
|
||||
// OIDC is an authentication layer built on top of OAuth 2.0 that allows users to authenticate
|
||||
// using external identity providers (like Google, Microsoft, etc.) rather than managing
|
||||
// credentials directly in headscale.
|
||||
//
|
||||
// This test validates the "same user relogin" behavior in headscale's OIDC authentication flow:
|
||||
// - A single client authenticates via OIDC as user1
|
||||
// - The client logs out, ending the session
|
||||
// - The same client logs back in via OIDC as the same user (user1)
|
||||
// - The test verifies that the user account persists correctly
|
||||
// - The test verifies that the machine key is preserved (since it's the same physical device)
|
||||
// - The test verifies that the node ID is preserved (since it's the same user on the same device)
|
||||
// - The test verifies that the node key is regenerated (since it's a new session)
|
||||
// - The test verifies that the client comes back online properly
|
||||
//
|
||||
// This scenario is important for normal user workflows where someone might need to restart
|
||||
// their Tailscale client, reboot their computer, or temporarily disconnect and reconnect.
|
||||
// It ensures that headscale properly handles session management while preserving device
|
||||
// identity and user associations.
|
||||
//
|
||||
// The test uses a single node scenario (unlike multi-node tests) to focus specifically on
|
||||
// the authentication and session management aspects rather than network topology changes.
|
||||
// The "same node" in the name refers to the same physical device/client, while "same user"
|
||||
// refers to authenticating with the same OIDC identity.
|
||||
func TestOIDCReloginSameNodeSameUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
// Create scenario with same user for both login attempts
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
OIDCUsers: []mockoidc.MockUser{
|
||||
oidcMockUser("user1", true), // Initial login
|
||||
oidcMockUser("user1", true), // Relogin with same user
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
|
||||
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
|
||||
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("oidcsameuser"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithDERPAsIP(),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initial login as user1
|
||||
u, err := ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Validating initial user1 creation at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assert.NoError(ct, err, "Failed to list users during initial validation")
|
||||
assert.Len(ct, listUsers, 1, "Expected exactly 1 user after first login, got %d", len(listUsers))
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("User validation failed after first login - unexpected users: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating user1 creation after initial OIDC login")
|
||||
|
||||
t.Logf("Validating initial node creation at %s", time.Now().Format(TimestampFormat))
|
||||
var initialNodes []*v1.Node
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
initialNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during initial validation")
|
||||
assert.Len(ct, initialNodes, 1, "Expected exactly 1 node after first login, got %d", len(initialNodes))
|
||||
}, 30*time.Second, 1*time.Second, "validating initial node creation for user1 after OIDC login")
|
||||
|
||||
// Collect expected node IDs for validation after user1 initial login
|
||||
expectedNodes := make([]types.NodeID, 0, 1)
|
||||
var nodeID uint64
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status := ts.MustStatus()
|
||||
assert.NotEmpty(ct, status.Self.ID, "Node ID should be populated in status")
|
||||
var err error
|
||||
nodeID, err = strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assert.NoError(ct, err, "Failed to parse node ID from status")
|
||||
}, 30*time.Second, 1*time.Second, "waiting for node ID to be populated in status after initial login")
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
|
||||
// Validate initial connection state for user1
|
||||
validateInitialConnection(t, headscale, expectedNodes)
|
||||
|
||||
// Store initial node keys for comparison
|
||||
initialMachineKey := initialNodes[0].GetMachineKey()
|
||||
initialNodeKey := initialNodes[0].GetNodeKey()
|
||||
initialNodeID := initialNodes[0].GetId()
|
||||
|
||||
// Logout user1
|
||||
err = ts.Logout()
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
// manually.
|
||||
err = ts.Logout()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for logout to complete
|
||||
t.Logf("Waiting for user1 logout completion at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err, "Failed to get client status during logout validation")
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout, got %s", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user1 logout to complete before same-user relogin")
|
||||
|
||||
// Validate node persistence during logout (node should remain in DB)
|
||||
t.Logf("Validating node persistence during logout at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during logout validation")
|
||||
assert.Len(ct, listNodes, 1, "Should still have exactly 1 node during logout (node should persist in DB), got %d", len(listNodes))
|
||||
}, 30*time.Second, 1*time.Second, "validating node persistence in database during same-user logout")
|
||||
|
||||
// Login again as the same user (user1)
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Waiting for user1 relogin completion at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err, "Failed to get client status during relogin validation")
|
||||
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after user1 relogin, got %s", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second, "waiting for user1 relogin to complete (same user)")
|
||||
|
||||
t.Logf("Final validation: checking user persistence after same-user relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assert.NoError(ct, err, "Failed to list users during final validation")
|
||||
assert.Len(ct, listUsers, 1, "Should still have exactly 1 user after same-user relogin, got %d", len(listUsers))
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("Final user validation failed - user1 should persist after same-user relogin: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating user1 persistence after same-user OIDC relogin cycle")
|
||||
|
||||
var finalNodes []*v1.Node
|
||||
t.Logf("Final node validation: checking node stability after same-user relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
finalNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes during final validation")
|
||||
assert.Len(ct, finalNodes, 1, "Should have exactly 1 node after same-user relogin, got %d", len(finalNodes))
|
||||
|
||||
// Validate node key behavior for same user relogin
|
||||
finalNode := finalNodes[0]
|
||||
|
||||
// Machine key should be preserved (same physical machine)
|
||||
assert.Equal(ct, initialMachineKey, finalNode.GetMachineKey(), "Machine key should be preserved for same user same node relogin")
|
||||
|
||||
// Node ID should be preserved (same user, same machine)
|
||||
assert.Equal(ct, initialNodeID, finalNode.GetId(), "Node ID should be preserved for same user same node relogin")
|
||||
|
||||
// Node key should be regenerated (new session after logout)
|
||||
assert.NotEqual(ct, initialNodeKey, finalNode.GetNodeKey(), "Node key should be regenerated after logout/relogin even for same user")
|
||||
|
||||
t.Logf("Final validation complete - same user relogin key relationships verified at %s", time.Now().Format(TimestampFormat))
|
||||
}, 60*time.Second, 2*time.Second, "validating final node state after same-user OIDC relogin cycle with key preservation validation")
|
||||
|
||||
// Security validation: user1's node should be active after relogin
|
||||
activeUser1NodeID := types.NodeID(finalNodes[0].GetId())
|
||||
t.Logf("Validating user1 node is online after same-user relogin at %s", time.Now().Format(TimestampFormat))
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
|
||||
// Check user1 node is online
|
||||
if node, exists := nodeStore[activeUser1NodeID]; exists {
|
||||
assert.NotNil(c, node.IsOnline, "User1 node should have online status after same-user relogin")
|
||||
if node.IsOnline != nil {
|
||||
assert.True(c, *node.IsOnline, "User1 node should be online after same-user relogin")
|
||||
}
|
||||
} else {
|
||||
assert.Fail(c, "User1 node not found in nodestore after same-user relogin")
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 node is online after same-user OIDC relogin")
|
||||
}
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
@@ -33,16 +37,16 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
hsic.WithDERPAsIP(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -54,7 +58,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
}
|
||||
|
||||
func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
@@ -63,7 +67,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
@@ -72,16 +76,16 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
hsic.WithDERPAsIP(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -93,15 +97,22 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// Collect expected node IDs for validation
|
||||
expectedNodes := collectExpectedNodeIDs(t, allClients)
|
||||
|
||||
// Validate initial connection state
|
||||
validateInitialConnection(t, headscale, expectedNodes)
|
||||
|
||||
var listNodes []*v1.Node
|
||||
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, len(allClients), "Node count should match client count after login")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
assert.NoError(ct, err, "Failed to list nodes after web authentication")
|
||||
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
|
||||
}, 30*time.Second, 2*time.Second, "validating node count matches client count after web authentication")
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
@@ -122,7 +133,10 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
// Validate that all nodes are offline after logout
|
||||
validateLogoutComplete(t, headscale, expectedNodes)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
@@ -135,8 +149,20 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
|
||||
t.Logf("all clients logged in again")
|
||||
|
||||
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes after web flow logout")
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after logout - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
|
||||
}, 60*time.Second, 2*time.Second, "validating node persistence in database after web flow logout")
|
||||
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
|
||||
|
||||
// Validate connection state after relogin
|
||||
validateReloginComplete(t, headscale, expectedNodes)
|
||||
|
||||
allIps, err = scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -145,14 +171,6 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
success = pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count after re-login")
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
|
||||
|
||||
for _, client := range allClients {
|
||||
ips, err := client.IPs()
|
||||
if err != nil {
|
||||
@@ -180,3 +198,166 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
|
||||
t.Logf("all clients IPs are the same")
|
||||
}
|
||||
|
||||
// TestAuthWebFlowLogoutAndReloginNewUser tests the scenario where multiple Tailscale clients
|
||||
// initially authenticate using the web-based authentication flow (where users visit a URL
|
||||
// in their browser to authenticate), then all clients log out and log back in as a different user.
|
||||
//
|
||||
// This test validates the "user switching" behavior in headscale's web authentication flow:
|
||||
// - Multiple clients authenticate via web flow, each to their respective users (user1, user2)
|
||||
// - All clients log out simultaneously
|
||||
// - All clients log back in via web flow, but this time they all authenticate as user1
|
||||
// - The test verifies that user1 ends up with all the client nodes
|
||||
// - The test verifies that user2's original nodes still exist in the database but are offline
|
||||
// - The test verifies network connectivity works after the user switch
|
||||
//
|
||||
// This scenario is important for organizations that need to reassign devices between users
|
||||
// or when consolidating multiple user accounts. It ensures that headscale properly handles
|
||||
// the security implications of user switching while maintaining node persistence in the database.
|
||||
//
|
||||
// The test uses headscale's web authentication flow, which is the most user-friendly method
|
||||
// where authentication happens through a web browser rather than pre-shared keys or OIDC.
|
||||
func TestAuthWebFlowLogoutAndReloginNewUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnvWithLoginURL(
|
||||
nil,
|
||||
hsic.WithTestName("webflowrelnewuser"),
|
||||
hsic.WithDERPAsIP(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// Collect expected node IDs for validation
|
||||
expectedNodes := collectExpectedNodeIDs(t, allClients)
|
||||
|
||||
// Validate initial connection state
|
||||
validateInitialConnection(t, headscale, expectedNodes)
|
||||
|
||||
var listNodes []*v1.Node
|
||||
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
listNodes, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err, "Failed to list nodes after initial web authentication")
|
||||
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
|
||||
}, 30*time.Second, 2*time.Second, "validating node count matches client count after initial web authentication")
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
// Log out all clients
|
||||
for _, client := range allClients {
|
||||
err := client.Logout()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
// Validate that all nodes are offline after logout
|
||||
validateLogoutComplete(t, headscale, expectedNodes)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
// Log all clients back in as user1 using web flow
|
||||
// We manually iterate over all clients and authenticate each one as user1
|
||||
// This tests the cross-user re-authentication behavior where ALL clients
|
||||
// (including those originally from user2) are registered to user1
|
||||
for _, client := range allClients {
|
||||
loginURL, err := client.LoginWithURL(headscale.GetEndpoint())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get login URL for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
body, err := doLoginURL(client.Hostname(), loginURL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to complete login for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
// Register all clients as user1 (this is where cross-user registration happens)
|
||||
// This simulates: headscale nodes register --user user1 --key <key>
|
||||
scenario.runHeadscaleRegister("user1", body)
|
||||
}
|
||||
|
||||
// Wait for all clients to reach running state
|
||||
for _, client := range allClients {
|
||||
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
|
||||
if err != nil {
|
||||
t.Fatalf("%s tailscale node has not reached running: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("all clients logged back in as user1")
|
||||
|
||||
var user1Nodes []*v1.Node
|
||||
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
user1Nodes, err = headscale.ListNodes("user1")
|
||||
assert.NoError(ct, err, "Failed to list nodes for user1 after web flow relogin")
|
||||
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after web flow relogin, got %d nodes", len(allClients), len(user1Nodes))
|
||||
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after web flow user switch relogin")
|
||||
|
||||
// Collect expected node IDs for user1 after relogin
|
||||
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
|
||||
for _, node := range user1Nodes {
|
||||
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
|
||||
}
|
||||
|
||||
// Validate connection state after relogin as user1
|
||||
validateReloginComplete(t, headscale, expectedUser1Nodes)
|
||||
|
||||
// Validate that user2's old nodes still exist in database (but are expired/offline)
|
||||
// When CLI registration creates new nodes for user1, user2's old nodes remain
|
||||
var user2Nodes []*v1.Node
|
||||
t.Logf("Validating user2 old nodes remain in database after CLI registration to user1 at %s", time.Now().Format(TimestampFormat))
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
var err error
|
||||
user2Nodes, err = headscale.ListNodes("user2")
|
||||
assert.NoError(ct, err, "Failed to list nodes for user2 after CLI registration to user1")
|
||||
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d old nodes (likely expired) after CLI registration to user1, got %d nodes", len(allClients)/2, len(user2Nodes))
|
||||
}, 30*time.Second, 2*time.Second, "validating user2 old nodes remain in database after CLI registration to user1")
|
||||
|
||||
t.Logf("Validating client login states after web flow user switch at %s", time.Now().Format(TimestampFormat))
|
||||
for _, client := range allClients {
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
|
||||
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after web flow user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
|
||||
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after web flow user switch", client.Hostname()))
|
||||
}
|
||||
|
||||
// Test connectivity after user switch
|
||||
allIps, err = scenario.ListTailscaleClientsIPs()
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d after web flow user switch", success, len(allClients)*len(allIps))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ type ControlServer interface {
|
||||
CreateUser(user string) (*v1.User, error)
|
||||
CreateAuthKey(user uint64, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
|
||||
ListNodes(users ...string) ([]*v1.Node, error)
|
||||
DeleteNode(nodeID uint64) error
|
||||
NodesByUser() (map[string][]*v1.Node, error)
|
||||
NodesByName() (map[string]*v1.Node, error)
|
||||
ListUsers() ([]*v1.User, error)
|
||||
@@ -38,4 +39,5 @@ type ControlServer interface {
|
||||
PrimaryRoutes() (*routes.DebugRoutes, error)
|
||||
DebugBatcher() (*hscontrol.DebugBatcherInfo, error)
|
||||
DebugNodeStore() (map[types.NodeID]types.Node, error)
|
||||
DebugFilter() ([]tailcfg.FilterRule, error)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/net/netmon"
|
||||
@@ -23,7 +24,7 @@ func TestDERPVerifyEndpoint(t *testing.T) {
|
||||
|
||||
// Generate random hostname for the headscale instance
|
||||
hash, err := util.GenerateRandomStringDNSSafe(6)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
testName := "derpverify"
|
||||
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
|
||||
|
||||
@@ -31,7 +32,7 @@ func TestDERPVerifyEndpoint(t *testing.T) {
|
||||
|
||||
// Create cert for headscale
|
||||
certHeadscale, keyHeadscale, err := integrationutil.CreateCertificate(hostname)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -39,14 +40,14 @@ func TestDERPVerifyEndpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
derper, err := scenario.CreateDERPServer("head",
|
||||
dsic.WithCACert(certHeadscale),
|
||||
dsic.WithVerifyClientURL(fmt.Sprintf("https://%s/verify", net.JoinHostPort(hostname, strconv.Itoa(headscalePort)))),
|
||||
)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
derpRegion := tailcfg.DERPRegion{
|
||||
RegionCode: "test-derpverify",
|
||||
@@ -74,17 +75,17 @@ func TestDERPVerifyEndpoint(t *testing.T) {
|
||||
hsic.WithPort(headscalePort),
|
||||
hsic.WithCustomTLS(certHeadscale, keyHeadscale),
|
||||
hsic.WithDERPConfig(derpMap))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
fakeKey := key.NewNode()
|
||||
DERPVerify(t, fakeKey, derpRegion, false)
|
||||
|
||||
for _, client := range allClients {
|
||||
nodeKey, err := client.GetNodePrivateKey()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
DERPVerify(t, *nodeKey, derpRegion, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -22,26 +23,26 @@ func TestResolveMagicDNS(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("magicdns"))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
// Poor mans cache
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
assertNoErrListFQDN(t, err)
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
@@ -78,7 +79,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
const erPath = "/tmp/extra_records.json"
|
||||
@@ -109,29 +110,29 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
// Poor mans cache
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
assertNoErrListFQDN(t, err)
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
_, err = scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
|
||||
}
|
||||
|
||||
hs, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write the file directly into place from the docker API.
|
||||
b0, _ := json.Marshal([]tailcfg.DNSRecord{
|
||||
@@ -143,7 +144,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
})
|
||||
|
||||
err = hs.WriteFile(erPath, b0)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "2.2.2.2")
|
||||
@@ -159,9 +160,9 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
b2, _ := json.Marshal(extraRecords)
|
||||
|
||||
err = hs.WriteFile(erPath+"2", b2)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
_, err = hs.Execute([]string{"mv", erPath + "2", erPath})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
|
||||
@@ -179,9 +180,9 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
})
|
||||
|
||||
err = hs.WriteFile(erPath+"3", b3)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
_, err = hs.Execute([]string{"cp", erPath + "3", erPath})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
|
||||
@@ -197,7 +198,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
})
|
||||
command := []string{"echo", fmt.Sprintf("'%s'", string(b4)), ">", erPath}
|
||||
_, err = hs.Execute([]string{"bash", "-c", strings.Join(command, " ")})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "9.9.9.9")
|
||||
@@ -205,7 +206,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
|
||||
// Delete the file and create a new one to ensure it is picked up again.
|
||||
_, err = hs.Execute([]string{"rm", erPath})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The same paths should still be available as it is not cleared on delete.
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
@@ -219,7 +220,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
// Write a new file, the backoff mechanism should make the filewatcher pick it up
|
||||
// again.
|
||||
err = hs.WriteFile(erPath, b3)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
|
||||
|
||||
17
integration/dockertestutil/build.go
Normal file
17
integration/dockertestutil/build.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package dockertestutil
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// RunDockerBuildForDiagnostics runs docker build manually to get detailed error output.
|
||||
// This is used when a docker build fails to provide more detailed diagnostic information
|
||||
// than what dockertest typically provides.
|
||||
func RunDockerBuildForDiagnostics(contextDir, dockerfile string) string {
|
||||
cmd := exec.Command("docker", "build", "-f", dockerfile, contextDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return string(output)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
@@ -29,7 +30,7 @@ func TestDERPServerScenario(t *testing.T) {
|
||||
|
||||
derpServerScenario(t, spec, false, func(scenario *Scenario) {
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
t.Logf("checking %d clients for websocket connections", len(allClients))
|
||||
|
||||
for _, client := range allClients {
|
||||
@@ -43,7 +44,7 @@ func TestDERPServerScenario(t *testing.T) {
|
||||
}
|
||||
|
||||
hsServer, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
derpRegion := tailcfg.DERPRegion{
|
||||
RegionCode: "test-derpverify",
|
||||
@@ -79,7 +80,7 @@ func TestDERPServerWebsocketScenario(t *testing.T) {
|
||||
|
||||
derpServerScenario(t, spec, true, func(scenario *Scenario) {
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
t.Logf("checking %d clients for websocket connections", len(allClients))
|
||||
|
||||
for _, client := range allClients {
|
||||
@@ -108,7 +109,7 @@ func derpServerScenario(
|
||||
IntegrationSkip(t)
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
@@ -128,16 +129,16 @@ func derpServerScenario(
|
||||
"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS": "true",
|
||||
}),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
||||
assertNoErrListFQDN(t, err)
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
|
||||
@@ -10,19 +10,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/types/key"
|
||||
@@ -38,7 +34,7 @@ func TestPingAllByIP(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
@@ -48,16 +44,16 @@ func TestPingAllByIP(t *testing.T) {
|
||||
hsic.WithTLS(),
|
||||
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
hs, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
@@ -80,7 +76,7 @@ func TestPingAllByIP(t *testing.T) {
|
||||
|
||||
// Get headscale instance for batcher debug check
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test our DebugBatcher functionality
|
||||
t.Logf("Testing DebugBatcher functionality...")
|
||||
@@ -90,6 +86,108 @@ func TestPingAllByIP(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
}
|
||||
|
||||
// TestPingAllByIPRandomClientPort is a variant of TestPingAllByIP that validates
|
||||
// direct connections between nodes with randomize_client_port enabled. This test
|
||||
// ensures that nodes can establish direct peer-to-peer connections without relying
|
||||
// on DERP relay servers, and that the randomize_client_port feature works correctly.
|
||||
func TestPingAllByIPRandomClientPort(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1", "user2"},
|
||||
MaxWait: dockertestMaxWait(),
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("pingdirect"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_RANDOMIZE_CLIENT_PORT": "true",
|
||||
}),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
hs, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract node IDs for validation
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
require.NoError(t, err, "failed to parse node ID")
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, hs, expectedNodes, true, "all clients should be online across all systems", 30*time.Second)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
// Perform pings to establish connections
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
// Validate that connections are direct (not relayed through DERP)
|
||||
// We check that each client has direct connections to its peers
|
||||
t.Logf("Validating direct connections...")
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err, "failed to get status for client %s", client.Hostname())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check each peer to see if we have a direct connection
|
||||
directCount := 0
|
||||
relayedCount := 0
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
// CurAddr indicates the current address being used to communicate with this peer
|
||||
// Direct connections have CurAddr set to an actual IP:port
|
||||
// DERP-relayed connections either have no CurAddr or it contains the DERP magic IP
|
||||
if peerStatus.CurAddr != "" && !strings.Contains(peerStatus.CurAddr, "127.3.3.40") {
|
||||
// This is a direct connection - CurAddr contains the actual peer IP:port
|
||||
directCount++
|
||||
t.Logf("Client %s -> Peer %s: DIRECT connection via %s (relay: %s)",
|
||||
client.Hostname(), peerStatus.HostName, peerStatus.CurAddr, peerStatus.Relay)
|
||||
} else {
|
||||
// This is a relayed connection through DERP
|
||||
relayedCount++
|
||||
t.Logf("Client %s -> Peer %s: RELAYED connection (CurAddr: %s, relay: %s)",
|
||||
client.Hostname(), peerStatus.HostName, peerStatus.CurAddr, peerStatus.Relay)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that we have at least some direct connections
|
||||
// In a local Docker network, we should be able to establish direct connections
|
||||
assert.Greater(ct, directCount, 0,
|
||||
"Client %s should have at least one direct connection, got %d direct and %d relayed",
|
||||
client.Hostname(), directCount, relayedCount)
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating direct connections between peers")
|
||||
}
|
||||
|
||||
func TestPingAllByIPPublicDERP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
@@ -99,23 +197,23 @@ func TestPingAllByIPPublicDERP(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("pingallbyippubderp"),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -148,11 +246,11 @@ func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
headscale, err := scenario.Headscale(opts...)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
for _, userName := range spec.Users {
|
||||
user, err := scenario.CreateUser(userName)
|
||||
@@ -177,13 +275,13 @@ func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -200,7 +298,7 @@ func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
requireNoErrLogout(t, err)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
@@ -222,7 +320,7 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
headscale, err := scenario.Headscale(
|
||||
@@ -231,7 +329,7 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "1m6s",
|
||||
}),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
for _, userName := range spec.Users {
|
||||
user, err := scenario.CreateUser(userName)
|
||||
@@ -256,13 +354,13 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -344,22 +442,22 @@ func TestPingAllByHostname(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("pingallbyname"))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
|
||||
assertNoErrListFQDN(t, err)
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
success := pingAllHelper(t, allClients, allHostnames)
|
||||
|
||||
@@ -379,7 +477,7 @@ func TestTaildrop(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
|
||||
@@ -387,17 +485,17 @@ func TestTaildrop(t *testing.T) {
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// This will essentially fetch and cache all the FQDNs
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
assertNoErrListFQDN(t, err)
|
||||
requireNoErrListFQDN(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
if !strings.Contains(client.Hostname(), "head") {
|
||||
@@ -498,7 +596,7 @@ func TestTaildrop(t *testing.T) {
|
||||
)
|
||||
|
||||
result, _, err := client.Execute(command)
|
||||
assertNoErrf(t, "failed to execute command to ls taildrop: %s", err)
|
||||
require.NoErrorf(t, err, "failed to execute command to ls taildrop")
|
||||
|
||||
log.Printf("Result for %s: %s\n", peer.Hostname(), result)
|
||||
if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result {
|
||||
@@ -518,7 +616,7 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
|
||||
hostnames := map[string]string{
|
||||
"1": "user1-host",
|
||||
"2": "User2-Host",
|
||||
"2": "user2-host",
|
||||
"3": "user3-host",
|
||||
}
|
||||
|
||||
@@ -528,25 +626,24 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErrf(t, "failed to create scenario: %s", err)
|
||||
require.NoErrorf(t, err, "failed to create scenario")
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("updatehostname"))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// update hostnames using the up command
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
status := client.MustStatus()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
@@ -554,11 +651,11 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
"--hostname=" + hostnames[string(status.Self.ID)],
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
assertNoErrf(t, "failed to set hostname: %s", err)
|
||||
require.NoErrorf(t, err, "failed to set hostname")
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// Wait for nodestore batch processing to complete
|
||||
// NodeStore batching timeout is 500ms, so we wait up to 1 second
|
||||
@@ -581,7 +678,11 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
for _, node := range nodes {
|
||||
hostname := hostnames[strconv.FormatUint(node.GetId(), 10)]
|
||||
assert.Equal(ct, hostname, node.GetName(), "Node name should match hostname")
|
||||
assert.Equal(ct, util.ConvertWithFQDNRules(hostname), node.GetGivenName(), "Given name should match FQDN rules")
|
||||
|
||||
// GivenName is normalized (lowercase, invalid chars stripped)
|
||||
normalised, err := util.NormaliseHostname(hostname)
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, normalised, node.GetGivenName(), "Given name should match FQDN rules")
|
||||
}
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
|
||||
@@ -597,7 +698,7 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
"--identifier",
|
||||
strconv.FormatUint(node.GetId(), 10),
|
||||
})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify that the server-side rename is reflected in DNSName while HostName remains unchanged
|
||||
@@ -642,8 +743,7 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
}, 60*time.Second, 2*time.Second)
|
||||
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
status := client.MustStatus()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
@@ -651,11 +751,11 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
"--hostname=" + hostnames[string(status.Self.ID)] + "NEW",
|
||||
}
|
||||
_, _, err = client.Execute(command)
|
||||
assertNoErrf(t, "failed to set hostname: %s", err)
|
||||
require.NoErrorf(t, err, "failed to set hostname")
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// Wait for nodestore batch processing to complete
|
||||
// NodeStore batching timeout is 500ms, so we wait up to 1 second
|
||||
@@ -679,12 +779,13 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
for _, node := range nodes {
|
||||
hostname := hostnames[strconv.FormatUint(node.GetId(), 10)]
|
||||
givenName := fmt.Sprintf("%d-givenname", node.GetId())
|
||||
if node.GetName() != hostname+"NEW" || node.GetGivenName() != givenName {
|
||||
// Hostnames are lowercased before being stored, so "NEW" becomes "new"
|
||||
if node.GetName() != hostname+"new" || node.GetGivenName() != givenName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, time.Second, 50*time.Millisecond, "hostname updates should be reflected in node list with NEW suffix")
|
||||
}, time.Second, 50*time.Millisecond, "hostname updates should be reflected in node list with new suffix")
|
||||
}
|
||||
|
||||
func TestExpireNode(t *testing.T) {
|
||||
@@ -696,20 +797,20 @@ func TestExpireNode(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("expirenode"))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -731,22 +832,22 @@ func TestExpireNode(t *testing.T) {
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO(kradalby): This is Headscale specific and would not play nicely
|
||||
// with other implementations of the ControlServer interface
|
||||
result, err := headscale.Execute([]string{
|
||||
"headscale", "nodes", "expire", "--identifier", "1", "--output", "json",
|
||||
})
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = json.Unmarshal([]byte(result), &node)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var expiredNodeKey key.NodePublic
|
||||
err = expiredNodeKey.UnmarshalText([]byte(node.GetNodeKey()))
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
|
||||
|
||||
@@ -772,26 +873,25 @@ func TestExpireNode(t *testing.T) {
|
||||
|
||||
// Verify that the expired node has been marked in all peers list.
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
if client.Hostname() == node.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
if client.Hostname() != node.GetName() {
|
||||
t.Logf("available peers of %s: %v", client.Hostname(), status.Peers())
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Ensures that the node is present, and that it is expired.
|
||||
if peerStatus, ok := status.Peer[expiredNodeKey]; ok {
|
||||
assertNotNil(t, peerStatus.Expired)
|
||||
assert.NotNil(t, peerStatus.KeyExpiry)
|
||||
peerStatus, ok := status.Peer[expiredNodeKey]
|
||||
assert.True(c, ok, "expired node key should be present in peer list")
|
||||
|
||||
if ok {
|
||||
assert.NotNil(c, peerStatus.Expired)
|
||||
assert.NotNil(c, peerStatus.KeyExpiry)
|
||||
|
||||
t.Logf(
|
||||
"node %q should have a key expire before %s, was %s",
|
||||
peerStatus.HostName,
|
||||
now.String(),
|
||||
peerStatus.KeyExpiry,
|
||||
)
|
||||
if peerStatus.KeyExpiry != nil {
|
||||
assert.Truef(
|
||||
t,
|
||||
c,
|
||||
peerStatus.KeyExpiry.Before(now),
|
||||
"node %q should have a key expire before %s, was %s",
|
||||
peerStatus.HostName,
|
||||
@@ -801,7 +901,7 @@ func TestExpireNode(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Truef(
|
||||
t,
|
||||
c,
|
||||
peerStatus.Expired,
|
||||
"node %q should be expired, expired is %v",
|
||||
peerStatus.HostName,
|
||||
@@ -810,24 +910,112 @@ func TestExpireNode(t *testing.T) {
|
||||
|
||||
_, stderr, _ := client.Execute([]string{"tailscale", "ping", node.GetName()})
|
||||
if !strings.Contains(stderr, "node key has expired") {
|
||||
t.Errorf(
|
||||
c.Errorf(
|
||||
"expected to be unable to ping expired host %q from %q",
|
||||
node.GetName(),
|
||||
client.Hostname(),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired", node.GetName(), expiredNodeKey)
|
||||
}
|
||||
} else {
|
||||
if status.Self.KeyExpiry != nil {
|
||||
assert.Truef(t, status.Self.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", status.Self.HostName, now.String(), status.Self.KeyExpiry)
|
||||
}
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expired node status to propagate")
|
||||
}
|
||||
}
|
||||
|
||||
// NeedsLogin means that the node has understood that it is no longer
|
||||
// valid.
|
||||
assert.Equalf(t, "NeedsLogin", status.BackendState, "checking node %q", status.Self.HostName)
|
||||
// TestSetNodeExpiryInFuture tests setting arbitrary expiration date
|
||||
// New expiration date should be stored in the db and propagated to all peers
|
||||
func TestSetNodeExpiryInFuture(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("expirenodefuture"))
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
targetExpiry := time.Now().Add(2 * time.Hour).Round(time.Second).UTC()
|
||||
|
||||
result, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale", "nodes", "expire",
|
||||
"--identifier", "1",
|
||||
"--output", "json",
|
||||
"--expiry", targetExpiry.Format(time.RFC3339),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = json.Unmarshal([]byte(result), &node)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, node.GetExpiry().AsTime().After(time.Now()))
|
||||
require.WithinDuration(t, targetExpiry, node.GetExpiry().AsTime(), 2*time.Second)
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText([]byte(node.GetNodeKey()))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
if client.Hostname() == node.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
peerStatus, ok := status.Peer[nodeKey]
|
||||
assert.True(ct, ok, "node key should be present in peer list")
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
assert.NotNil(ct, peerStatus.KeyExpiry)
|
||||
assert.NotNil(ct, peerStatus.Expired)
|
||||
|
||||
if peerStatus.KeyExpiry != nil {
|
||||
assert.WithinDuration(
|
||||
ct,
|
||||
targetExpiry,
|
||||
*peerStatus.KeyExpiry,
|
||||
5*time.Second,
|
||||
"node %q should have key expiry near the requested future time",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
|
||||
assert.Truef(
|
||||
ct,
|
||||
peerStatus.KeyExpiry.After(time.Now()),
|
||||
"node %q should have a key expiry timestamp in the future",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}
|
||||
|
||||
assert.Falsef(
|
||||
ct,
|
||||
peerStatus.Expired,
|
||||
"node %q should not be marked as expired",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}, 3*time.Minute, 5*time.Second, "Waiting for future expiry to propagate",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -840,20 +1028,20 @@ func TestNodeOnlineStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("online"))
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -865,15 +1053,17 @@ func TestNodeOnlineStatus(t *testing.T) {
|
||||
t.Logf("before expire: %d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Assert that we have the original count - self
|
||||
assert.Len(t, status.Peers(), len(MustTestVersions)-1)
|
||||
// Assert that we have the original count - self
|
||||
assert.Len(c, status.Peers(), len(MustTestVersions)-1)
|
||||
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected peer count")
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Duration is chosen arbitrarily, 10m is reported in #1561
|
||||
testDuration := 12 * time.Minute
|
||||
@@ -963,7 +1153,7 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
@@ -973,16 +1163,16 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
hsic.WithDERPAsIP(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -992,7 +1182,7 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
|
||||
// Get headscale instance for batcher debug checks
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initial check: all nodes should be connected to batcher
|
||||
// Extract node IDs for validation
|
||||
@@ -1000,7 +1190,7 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to batcher", 30*time.Second)
|
||||
@@ -1072,7 +1262,7 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
@@ -1081,16 +1271,16 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
assertNoErrListClientIPs(t, err)
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
@@ -1100,7 +1290,7 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test list all nodes after added otherUser
|
||||
var nodeList []v1.Node
|
||||
@@ -1170,159 +1360,3 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
assert.True(t, nodeListAfter[0].GetOnline())
|
||||
assert.Equal(t, nodeList[1].GetId(), nodeListAfter[0].GetId())
|
||||
}
|
||||
|
||||
// NodeSystemStatus represents the online status of a node across different systems
|
||||
type NodeSystemStatus struct {
|
||||
Batcher bool
|
||||
BatcherConnCount int
|
||||
MapResponses bool
|
||||
NodeStore bool
|
||||
}
|
||||
|
||||
// requireAllSystemsOnline checks that nodes are online/offline across batcher, mapresponses, and nodestore
|
||||
func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
startTime := time.Now()
|
||||
t.Logf("requireAllSystemsOnline: Starting validation at %s - %s", startTime.Format(TimestampFormat), message)
|
||||
|
||||
var prevReport string
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Get batcher state
|
||||
debugInfo, err := headscale.DebugBatcher()
|
||||
assert.NoError(c, err, "Failed to get batcher debug info")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get map responses
|
||||
mapResponses, err := headscale.GetAllMapReponses()
|
||||
assert.NoError(c, err, "Failed to get map responses")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get nodestore state
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate node counts first
|
||||
expectedCount := len(expectedNodes)
|
||||
assert.Equal(c, expectedCount, debugInfo.TotalNodes, "Batcher total nodes mismatch")
|
||||
assert.Equal(c, expectedCount, len(nodeStore), "NodeStore total nodes mismatch")
|
||||
|
||||
// Check that we have map responses for expected nodes
|
||||
mapResponseCount := len(mapResponses)
|
||||
assert.Equal(c, expectedCount, mapResponseCount, "MapResponses total nodes mismatch")
|
||||
|
||||
// Build status map for each node
|
||||
nodeStatus := make(map[types.NodeID]NodeSystemStatus)
|
||||
|
||||
// Initialize all expected nodes
|
||||
for _, nodeID := range expectedNodes {
|
||||
nodeStatus[nodeID] = NodeSystemStatus{}
|
||||
}
|
||||
|
||||
// Check batcher state
|
||||
for nodeIDStr, nodeInfo := range debugInfo.ConnectedNodes {
|
||||
nodeID := types.MustParseNodeID(nodeIDStr)
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
status.Batcher = nodeInfo.Connected
|
||||
status.BatcherConnCount = nodeInfo.ActiveConnections
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
|
||||
// Check map responses using buildExpectedOnlineMap
|
||||
onlineFromMaps := make(map[types.NodeID]bool)
|
||||
onlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)
|
||||
for nodeID := range nodeStatus {
|
||||
NODE_STATUS:
|
||||
for id, peerMap := range onlineMap {
|
||||
if id == nodeID {
|
||||
continue
|
||||
}
|
||||
|
||||
online := peerMap[nodeID]
|
||||
// If the node is offline in any map response, we consider it offline
|
||||
if !online {
|
||||
onlineFromMaps[nodeID] = false
|
||||
continue NODE_STATUS
|
||||
}
|
||||
|
||||
onlineFromMaps[nodeID] = true
|
||||
}
|
||||
}
|
||||
assert.Lenf(c, onlineFromMaps, expectedCount, "MapResponses missing nodes in status check")
|
||||
|
||||
// Update status with map response data
|
||||
for nodeID, online := range onlineFromMaps {
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
status.MapResponses = online
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
|
||||
// Check nodestore state
|
||||
for nodeID, node := range nodeStore {
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
// Check if node is online in nodestore
|
||||
status.NodeStore = node.IsOnline != nil && *node.IsOnline
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all systems show nodes in expected state and report failures
|
||||
allMatch := true
|
||||
var failureReport strings.Builder
|
||||
|
||||
ids := types.NodeIDs(maps.Keys(nodeStatus))
|
||||
slices.Sort(ids)
|
||||
for _, nodeID := range ids {
|
||||
status := nodeStatus[nodeID]
|
||||
systemsMatch := (status.Batcher == expectedOnline) &&
|
||||
(status.MapResponses == expectedOnline) &&
|
||||
(status.NodeStore == expectedOnline)
|
||||
|
||||
if !systemsMatch {
|
||||
allMatch = false
|
||||
stateStr := "offline"
|
||||
if expectedOnline {
|
||||
stateStr = "online"
|
||||
}
|
||||
failureReport.WriteString(fmt.Sprintf("node:%d is not fully %s:\n", nodeID, stateStr))
|
||||
failureReport.WriteString(fmt.Sprintf(" - batcher: %t\n", status.Batcher))
|
||||
failureReport.WriteString(fmt.Sprintf(" - conn count: %d\n", status.BatcherConnCount))
|
||||
failureReport.WriteString(fmt.Sprintf(" - mapresponses: %t (down with at least one peer)\n", status.MapResponses))
|
||||
failureReport.WriteString(fmt.Sprintf(" - nodestore: %t\n", status.NodeStore))
|
||||
}
|
||||
}
|
||||
|
||||
if !allMatch {
|
||||
if diff := cmp.Diff(prevReport, failureReport.String()); diff != "" {
|
||||
t.Log("Diff between reports:")
|
||||
t.Logf("Prev report: \n%s\n", prevReport)
|
||||
t.Logf("New report: \n%s\n", failureReport.String())
|
||||
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
|
||||
prevReport = failureReport.String()
|
||||
}
|
||||
|
||||
failureReport.WriteString("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
|
||||
|
||||
assert.Fail(c, failureReport.String())
|
||||
}
|
||||
|
||||
stateStr := "offline"
|
||||
if expectedOnline {
|
||||
stateStr = "online"
|
||||
}
|
||||
assert.True(c, allMatch, fmt.Sprintf("Not all nodes are %s across all systems", stateStr))
|
||||
}, timeout, 2*time.Second, message)
|
||||
|
||||
endTime := time.Now()
|
||||
duration := endTime.Sub(startTime)
|
||||
t.Logf("requireAllSystemsOnline: Completed validation at %s - Duration: %v - %s", endTime.Format(TimestampFormat), duration, message)
|
||||
}
|
||||
|
||||
1049
integration/helpers.go
Normal file
1049
integration/helpers.go
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user